diff --git a/owl-bot-staging/google-cloud-bigtable/google-cloud-bigtable/google-cloud-bigtable.txt b/owl-bot-staging/google-cloud-bigtable/google-cloud-bigtable/google-cloud-bigtable.txt
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc
new file mode 100644
index 000000000000..f12d4dc21a9f
--- /dev/null
+++ b/packages/google-cloud-bigtable/.coveragerc
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[run]
+branch = True
+omit =
+ google/cloud/bigtable_admin/__init__.py
+ google/cloud/bigtable_admin/gapic_version.py
+
+[report]
+fail_under = 99
+show_missing = True
+exclude_lines =
+ # Re-enable the standard pragma
+ pragma: NO COVER
+ # Ignore debug-only repr
+ def __repr__
+ # Ignore abstract methods
+ raise NotImplementedError
+omit =
+ */site-packages/*.py
diff --git a/packages/google-cloud-bigtable/.cross_sync/README.md b/packages/google-cloud-bigtable/.cross_sync/README.md
new file mode 100644
index 000000000000..0d8a1cf8c2c1
--- /dev/null
+++ b/packages/google-cloud-bigtable/.cross_sync/README.md
@@ -0,0 +1,75 @@
+# CrossSync
+
+CrossSync provides a simple way to share logic between async and sync code.
+It is made up of a small library that provides:
+1. a set of shims that provide a shared sync/async API surface
+2. annotations that are used to guide generation of a sync version from an async class
+
+Using CrossSync, the async code is treated as the source of truth, and sync code is generated from it.
+
+## Usage
+
+### CrossSync Shims
+
+Many Asyncio components have direct, 1:1 threaded counterparts for use in non-asyncio code. CrossSync
+provides a compatibility layer that works with both
+
+| CrossSync | Asyncio Version | Sync Version |
+| --- | --- | --- |
+| CrossSync.Queue | asyncio.Queue | queue.Queue |
+| CrossSync.Condition | asyncio.Condition | threading.Condition |
+| CrossSync.Future | asyncio.Future | Concurrent.futures.Future |
+| CrossSync.Task | asyncio.Task | Concurrent.futures.Future |
+| CrossSync.Event | asyncio.Event | threading.Event |
+| CrossSync.Semaphore | asyncio.Semaphore | threading.Semaphore |
+| CrossSync.Awaitable | typing.Awaitable | typing.Union (no-op type) |
+| CrossSync.Iterable | typing.AsyncIterable | typing.Iterable |
+| CrossSync.Iterator | typing.AsyncIterator | typing.Iterator |
+| CrossSync.Generator | typing.AsyncGenerator | typing.Generator |
+| CrossSync.Retry | google.api_core.retry.AsyncRetry | google.api_core.retry.Retry |
+| CrossSync.StopIteration | StopAsyncIteration | StopIteration |
+| CrossSync.Mock | unittest.mock.AsyncMock | unittest.mock.Mock |
+
+Custom aliases can be added using `CrossSync.add_mapping(class, name)`
+
+Additionally, CrossSync provides method implementations that work equivalently in async and sync code:
+- `CrossSync.sleep()`
+- `CrossSync.gather_partials()`
+- `CrossSync.wait()`
+- `CrossSync.condition_wait()`
+- `CrossSync,event_wait()`
+- `CrossSync.create_task()`
+- `CrossSync.retry_target()`
+- `CrossSync.retry_target_stream()`
+
+### Annotations
+
+CrossSync provides a set of annotations to mark up async classes, to guide the generation of sync code.
+
+- `@CrossSync.convert_sync`
+ - marks classes for conversion. Unmarked classes will be copied as-is
+ - if add_mapping is included, the async and sync classes can be accessed using a shared CrossSync.X alias
+- `@CrossSync.convert`
+ - marks async functions for conversion. Unmarked methods will be copied as-is
+- `@CrossSync.drop`
+ - marks functions or classes that should not be included in sync output
+- `@CrossSync.pytest`
+ - marks test functions. Test functions automatically have all async keywords stripped (i.e., rm_aio is unneeded)
+- `CrossSync.add_mapping`
+ - manually registers a new CrossSync.X alias, for custom types
+- `CrossSync.rm_aio`
+ - Marks regions of the code that include asyncio keywords that should be stripped during generation
+
+### Code Generation
+
+Generation can be initiated using `nox -s generate_sync`
+from the root of the project. This will find all classes with the `__CROSS_SYNC_OUTPUT__ = "path/to/output"`
+annotation, and generate a sync version of classes marked with `@CrossSync.convert_sync` at the output path.
+
+There is a unit test at `tests/unit/data/test_sync_up_to_date.py` that verifies that the generated code is up to date
+
+## Architecture
+
+CrossSync is made up of two parts:
+- the runtime shims and annotations live in `/google/cloud/bigtable/_cross_sync`
+- the code generation logic lives in `/.cross_sync/` in the repo root
diff --git a/packages/google-cloud-bigtable/.cross_sync/generate.py b/packages/google-cloud-bigtable/.cross_sync/generate.py
new file mode 100644
index 000000000000..5158d0f37338
--- /dev/null
+++ b/packages/google-cloud-bigtable/.cross_sync/generate.py
@@ -0,0 +1,107 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+from typing import Sequence
+import ast
+"""
+Entrypoint for initiating an async -> sync conversion using CrossSync
+
+Finds all python files rooted in a given directory, and uses
+transformers.CrossSyncFileProcessor to handle any files marked with
+__CROSS_SYNC_OUTPUT__
+"""
+
+
+def extract_header_comments(file_path) -> str:
+ """
+ Extract the file header. Header is defined as the top-level
+ comments before any code or imports
+ """
+ header = []
+ with open(file_path, "r") as f:
+ for line in f:
+ if line.startswith("#") or line.strip() == "":
+ header.append(line)
+ else:
+ break
+ header.append("\n# This file is automatically generated by CrossSync. Do not edit manually.\n\n")
+ return "".join(header)
+
+
+class CrossSyncOutputFile:
+
+ def __init__(self, output_path: str, ast_tree, header: str | None = None):
+ self.output_path = output_path
+ self.tree = ast_tree
+ self.header = header or ""
+
+ def render(self, with_formatter=True, save_to_disk: bool = True) -> str:
+ """
+ Render the file to a string, and optionally save to disk
+
+ Args:
+ with_formatter: whether to run the output through black before returning
+ save_to_disk: whether to write the output to the file path
+ """
+ full_str = self.header + ast.unparse(self.tree)
+ if with_formatter:
+ import black # type: ignore
+ import autoflake # type: ignore
+
+ full_str = black.format_str(
+ autoflake.fix_code(full_str, remove_all_unused_imports=True),
+ mode=black.FileMode(),
+ )
+ if save_to_disk:
+ import os
+ os.makedirs(os.path.dirname(self.output_path), exist_ok=True)
+ with open(self.output_path, "w") as f:
+ f.write(full_str)
+ return full_str
+
+
+def convert_files_in_dir(directory: str) -> set[CrossSyncOutputFile]:
+ import glob
+ from transformers import CrossSyncFileProcessor
+
+ # find all python files in the directory
+ files = glob.glob(directory + "/**/*.py", recursive=True)
+ # keep track of the output files pointed to by the annotated classes
+ artifacts: set[CrossSyncOutputFile] = set()
+ file_transformer = CrossSyncFileProcessor()
+ # run each file through ast transformation to find all annotated classes
+ for file_path in files:
+ ast_tree = ast.parse(open(file_path).read())
+ output_path = file_transformer.get_output_path(ast_tree)
+ if output_path is not None:
+ # contains __CROSS_SYNC_OUTPUT__ annotation
+ converted_tree = file_transformer.visit(ast_tree)
+ header = extract_header_comments(file_path)
+ artifacts.add(CrossSyncOutputFile(output_path, converted_tree, header))
+ # return set of output artifacts
+ return artifacts
+
+
+def save_artifacts(artifacts: Sequence[CrossSyncOutputFile]):
+ for a in artifacts:
+ a.render(save_to_disk=True)
+
+
+if __name__ == "__main__":
+ import sys
+
+ search_root = sys.argv[1]
+ outputs = convert_files_in_dir(search_root)
+ print(f"Generated {len(outputs)} artifacts: {[a.output_path for a in outputs]}")
+ save_artifacts(outputs)
diff --git a/packages/google-cloud-bigtable/.cross_sync/transformers.py b/packages/google-cloud-bigtable/.cross_sync/transformers.py
new file mode 100644
index 000000000000..9adadd0aa727
--- /dev/null
+++ b/packages/google-cloud-bigtable/.cross_sync/transformers.py
@@ -0,0 +1,338 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Provides a set of ast.NodeTransformer subclasses that are composed to generate
+async code into sync code.
+
+At a high level:
+- The main entrypoint is CrossSyncFileProcessor, which is used to find files in
+ the codebase that include __CROSS_SYNC_OUTPUT__, and transform them
+ according to the `CrossSync` annotations they contains
+- SymbolReplacer is used to swap out CrossSync.X with CrossSync._Sync_Impl.X
+- RmAioFunctions is used to strip out asyncio keywords marked with CrossSync.rm_aio
+ (deferring to AsyncToSync to handle the actual transformation)
+- StripAsyncConditionalBranches finds `if CrossSync.is_async:` conditionals, and strips out
+ the unneeded branch for the sync output
+"""
+from __future__ import annotations
+
+import ast
+
+import sys
+# add cross_sync to path
+sys.path.append("google/cloud/bigtable/data/_cross_sync")
+from _decorators import AstDecorator
+
+
+class SymbolReplacer(ast.NodeTransformer):
+ """
+ Replaces all instances of a symbol in an AST with a replacement
+
+ Works for function signatures, method calls, docstrings, and type annotations
+ """
+ def __init__(self, replacements: dict[str, str]):
+ self.replacements = replacements
+
+ def visit_Name(self, node):
+ if node.id in self.replacements:
+ node.id = self.replacements[node.id]
+ return node
+
+ def visit_Attribute(self, node):
+ return ast.copy_location(
+ ast.Attribute(
+ self.visit(node.value),
+ self.replacements.get(node.attr, node.attr),
+ node.ctx,
+ ),
+ node,
+ )
+
+ def visit_AsyncFunctionDef(self, node):
+ """
+ Replace async function docstrings
+ """
+ # use same logic as FunctionDef
+ return self.visit_FunctionDef(node)
+
+ def visit_FunctionDef(self, node):
+ """
+ Replace function docstrings
+ """
+ docstring = ast.get_docstring(node)
+ if docstring and isinstance(node.body[0], ast.Expr) \
+ and isinstance(node.body[0].value, ast.Constant) \
+ and isinstance(node.body[0].value.value, str) \
+ :
+ for key_word, replacement in self.replacements.items():
+ docstring = docstring.replace(key_word, replacement)
+ node.body[0].value.value = docstring
+ return self.generic_visit(node)
+
+ def visit_Constant(self, node):
+ """Replace string type annotations"""
+ try:
+ node.value = self.replacements.get(node.value, node.value)
+ except TypeError:
+ # ignore unhashable types (e.g. list)
+ pass
+ return node
+
+
+class AsyncToSync(ast.NodeTransformer):
+ """
+ Replaces or strips all async keywords from a given AST
+ """
+ def visit_Await(self, node):
+ """
+ Strips await keyword
+ """
+ return self.visit(node.value)
+
+ def visit_AsyncFor(self, node):
+ """
+ Replaces `async for` with `for`
+ """
+ return ast.copy_location(
+ ast.For(
+ self.visit(node.target),
+ self.visit(node.iter),
+ [self.visit(stmt) for stmt in node.body],
+ [self.visit(stmt) for stmt in node.orelse],
+ ),
+ node,
+ )
+
+ def visit_AsyncWith(self, node):
+ """
+ Replaces `async with` with `with`
+ """
+ return ast.copy_location(
+ ast.With(
+ [self.visit(item) for item in node.items],
+ [self.visit(stmt) for stmt in node.body],
+ ),
+ node,
+ )
+
+ def visit_AsyncFunctionDef(self, node):
+ """
+ Replaces `async def` with `def`
+ """
+ return ast.copy_location(
+ ast.FunctionDef(
+ node.name,
+ self.visit(node.args),
+ [self.visit(stmt) for stmt in node.body],
+ [self.visit(decorator) for decorator in node.decorator_list],
+ node.returns and self.visit(node.returns),
+ ),
+ node,
+ )
+
+ def visit_ListComp(self, node):
+ """
+ Replaces `async for` with `for` in list comprehensions
+ """
+ for generator in node.generators:
+ generator.is_async = False
+ return self.generic_visit(node)
+
+
+class RmAioFunctions(ast.NodeTransformer):
+ """
+ Visits all calls marked with CrossSync.rm_aio, and removes asyncio keywords
+ """
+ RM_AIO_FN_NAME = "rm_aio"
+ RM_AIO_CLASS_NAME = "CrossSync"
+
+ def __init__(self):
+ self.to_sync = AsyncToSync()
+
+ def _is_rm_aio_call(self, node) -> bool:
+ """
+ Check if a node is a CrossSync.rm_aio call
+ """
+ if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name):
+ if node.func.attr == self.RM_AIO_FN_NAME and node.func.value.id == self.RM_AIO_CLASS_NAME:
+ return True
+ return False
+
+ def visit_Call(self, node):
+ if self._is_rm_aio_call(node):
+ return self.visit(self.to_sync.visit(node.args[0]))
+ return self.generic_visit(node)
+
+ def visit_AsyncWith(self, node):
+ """
+ `async with` statements can contain multiple async context managers.
+
+ If any of them contains a CrossSync.rm_aio statement, convert into standard `with` statement
+ """
+ if any(self._is_rm_aio_call(item.context_expr) for item in node.items
+ ):
+ new_node = ast.copy_location(
+ ast.With(
+ [self.visit(item) for item in node.items],
+ [self.visit(stmt) for stmt in node.body],
+ ),
+ node,
+ )
+ return self.generic_visit(new_node)
+ return self.generic_visit(node)
+
+ def visit_AsyncFor(self, node):
+ """
+ Async for statements are not fully wrapped by calls
+ """
+ it = node.iter
+ if self._is_rm_aio_call(it):
+ return ast.copy_location(
+ ast.For(
+ self.visit(node.target),
+ self.visit(it),
+ [self.visit(stmt) for stmt in node.body],
+ [self.visit(stmt) for stmt in node.orelse],
+ ),
+ node,
+ )
+ return self.generic_visit(node)
+
+
+class StripAsyncConditionalBranches(ast.NodeTransformer):
+ """
+ Visits all if statements in an AST, and removes branches marked with CrossSync.is_async
+ """
+
+ def visit_If(self, node):
+ """
+ remove CrossSync.is_async branches from top-level if statements
+ """
+ kept_branch = None
+ # check for CrossSync.is_async
+ if self._is_async_check(node.test):
+ kept_branch = node.orelse
+ # check for not CrossSync.is_async
+ elif isinstance(node.test, ast.UnaryOp) and isinstance(node.test.op, ast.Not) and self._is_async_check(node.test.operand):
+ kept_branch = node.body
+ if kept_branch is not None:
+ # only keep the statements in the kept branch
+ return [self.visit(n) for n in kept_branch]
+ else:
+ # keep the entire if statement
+ return self.generic_visit(node)
+
+ def _is_async_check(self, node) -> bool:
+ """
+ Check for CrossSync.is_async or CrossSync.is_async == True checks
+ """
+ if isinstance(node, ast.Attribute):
+ # for CrossSync.is_async
+ return isinstance(node.value, ast.Name) and node.value.id == "CrossSync" and node.attr == "is_async"
+ elif isinstance(node, ast.Compare):
+ # for CrossSync.is_async == True
+ return self._is_async_check(node.left) and (isinstance(node.ops[0], ast.Eq) or isinstance(node.ops[0], ast.Is)) and len(node.comparators) == 1 and node.comparators[0].value == True
+ return False
+
+
+class CrossSyncFileProcessor(ast.NodeTransformer):
+ """
+ Visits a file, looking for __CROSS_SYNC_OUTPUT__ annotations
+
+ If found, the file is processed with the following steps:
+ - Strip out asyncio keywords within CrossSync.rm_aio calls
+ - transform classes and methods annotated with CrossSync decorators
+ - statements behind CrossSync.is_async conditional branches are removed
+ - Replace remaining CrossSync statements with corresponding CrossSync._Sync_Impl calls
+ - save changes in an output file at path specified by __CROSS_SYNC_OUTPUT__
+ """
+ FILE_ANNOTATION = "__CROSS_SYNC_OUTPUT__"
+
+ def get_output_path(self, node):
+ for n in node.body:
+ if isinstance(n, ast.Assign):
+ for target in n.targets:
+ if isinstance(target, ast.Name) and target.id == self.FILE_ANNOTATION:
+ # return the output path
+ return n.value.value.replace(".", "/") + ".py"
+
+ def visit_Module(self, node):
+ # look for __CROSS_SYNC_OUTPUT__ Assign statement
+ output_path = self.get_output_path(node)
+ if output_path:
+ # if found, process the file
+ converted = self.generic_visit(node)
+ # strip out CrossSync.rm_aio calls
+ converted = RmAioFunctions().visit(converted)
+ # strip out CrossSync.is_async branches
+ converted = StripAsyncConditionalBranches().visit(converted)
+ # replace CrossSync statements
+ converted = SymbolReplacer({"CrossSync": "CrossSync._Sync_Impl"}).visit(converted)
+ return converted
+ else:
+ # not cross_sync file. Return None
+ return None
+
+ def visit_ClassDef(self, node):
+ """
+ Called for each class in file. If class has a CrossSync decorator, it will be transformed
+ according to the decorator arguments. Otherwise, class is returned unchanged
+ """
+ orig_decorators = node.decorator_list
+ for decorator in orig_decorators:
+ try:
+ handler = AstDecorator.get_for_node(decorator)
+ # transformation is handled in sync_ast_transform method of the decorator
+ node = handler.sync_ast_transform(node, globals())
+ except ValueError:
+ # not cross_sync decorator
+ continue
+ return self.generic_visit(node) if node else None
+
+ def visit_Assign(self, node):
+ """
+ strip out __CROSS_SYNC_OUTPUT__ assignments
+ """
+ if isinstance(node.targets[0], ast.Name) and node.targets[0].id == self.FILE_ANNOTATION:
+ return None
+ return self.generic_visit(node)
+
+ def visit_FunctionDef(self, node):
+ """
+ Visit any sync methods marked with CrossSync decorators
+ """
+ return self.visit_AsyncFunctionDef(node)
+
+ def visit_AsyncFunctionDef(self, node):
+ """
+ Visit and transform any async methods marked with CrossSync decorators
+ """
+ try:
+ if hasattr(node, "decorator_list"):
+ found_list, node.decorator_list = node.decorator_list, []
+ for decorator in found_list:
+ try:
+ handler = AstDecorator.get_for_node(decorator)
+ node = handler.sync_ast_transform(node, globals())
+ if node is None:
+ return None
+ # recurse to any nested functions
+ node = self.generic_visit(node)
+ except ValueError:
+ # keep unknown decorators
+ node.decorator_list.append(decorator)
+ continue
+ return self.generic_visit(node)
+ except ValueError as e:
+ raise ValueError(f"node {node.name} failed") from e
diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8
new file mode 100644
index 000000000000..32986c79287a
--- /dev/null
+++ b/packages/google-cloud-bigtable/.flake8
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+[flake8]
+ignore = E203, E231, E266, E501, W503
+exclude =
+ # Exclude generated code.
+ **/proto/**
+ **/gapic/**
+ **/services/**
+ **/types/**
+ *_pb2.py
+
+ # Standard linting exemptions.
+ **/.nox/**
+ __pycache__,
+ .git,
+ *.pyc,
+ conf.py
diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore
new file mode 100644
index 000000000000..d083ea1ddc3e
--- /dev/null
+++ b/packages/google-cloud-bigtable/.gitignore
@@ -0,0 +1,64 @@
+*.py[cod]
+*.sw[op]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+.eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+__pycache__
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.nox
+.cache
+.pytest_cache
+
+
+# Mac
+.DS_Store
+
+# JetBrains
+.idea
+
+# VS Code
+.vscode
+
+# emacs
+*~
+
+# Built documentation
+docs/_build
+bigquery/docs/generated
+docs.metadata
+
+# Virtual environment
+env/
+venv/
+
+# Test logs
+coverage.xml
+*sponge_log.xml
+
+# System test environment variables.
+system_tests/local_test_setup
+
+# Make sure a generated file isn't accidentally committed.
+pylintrc
+pylintrc.test
diff --git a/packages/google-cloud-bigtable/.gitmodules b/packages/google-cloud-bigtable/.gitmodules
new file mode 100644
index 000000000000..5fa9b1ed5c25
--- /dev/null
+++ b/packages/google-cloud-bigtable/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "python-api-core"]
+ path = python-api-core
+ url = git@github.com:googleapis/python-api-core.git
+[submodule "gapic-generator-fork"]
+ path = gapic-generator-fork
+ url = git@github.com:googleapis/gapic-generator-python.git
diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json b/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json
new file mode 100644
index 000000000000..9de4b5f92bf5
--- /dev/null
+++ b/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json
@@ -0,0 +1,80 @@
+{
+ "name": "bigtable",
+ "name_pretty": "Cloud Bigtable",
+ "product_documentation": "https://cloud.google.com/bigtable",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigtable",
+ "distribution_name": "google-cloud-bigtable",
+ "api_id": "bigtable.googleapis.com",
+ "requires_billing": true,
+ "samples": [
+ {
+ "name": "Hello World in Cloud Bigtable",
+ "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello"
+ },
+ {
+ "name": "Hello World using HappyBase",
+ "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello_happybase"
+ },
+ {
+ "name": "cbt Command Demonstration",
+ "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
+ "file": "instanceadmin.py",
+ "runnable": true,
+ "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "instanceadmin"
+ },
+ {
+ "name": "Metric Scaler",
+ "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
+ "file": "metricscaler.py",
+ "runnable": true,
+ "custom_content": "
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments: bigtable_instance ID of the Cloud Bigtable instance to connect to. bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments: -h, --help show this help message and exit --high_cpu_threshold HIGH_CPU_THRESHOLD If Cloud Bigtable CPU usage is above this threshold, scale up --low_cpu_threshold LOW_CPU_THRESHOLD If Cloud Bigtable CPU usage is below this threshold, scale down --short_sleep SHORT_SLEEP How long to sleep in seconds between checking metrics after no scale operation --long_sleep LONG_SLEEP How long to sleep in seconds between checking metrics after a scaling operation
",
+ "override_path": "metricscaler"
+ },
+ {
+ "name": "Quickstart",
+ "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Existing table used in the quickstart. (default: my-table)
",
+ "override_path": "quickstart"
+ },
+ {
+ "name": "Quickstart using HappyBase",
+ "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "tableadmin"
+ }
+ ],
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners",
+ "api_shortname": "bigtable"
+}
diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/librarian.py b/packages/google-cloud-bigtable/.librarian/generator-input/librarian.py
new file mode 100644
index 000000000000..5b943d24bd96
--- /dev/null
+++ b/packages/google-cloud-bigtable/.librarian/generator-input/librarian.py
@@ -0,0 +1,266 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script is used to synthesize generated parts of this library."""
+
+from pathlib import Path
+import re
+import textwrap
+from typing import List, Optional
+
+import synthtool as s
+from synthtool import gcp, _tracked_paths
+from synthtool.languages import python
+from synthtool.sources import templates
+
+common = gcp.CommonTemplates()
+
+# These flags are needed because certain post-processing operations
+# append things after a certain line of text, and can infinitely loop
+# in a Github PR. We use these flags to only do those operations
+# on fresh copies of files found in googleapis-gen, and not on user-submitted
+# changes.
+is_fresh_admin_copy = False
+is_fresh_admin_v2_copy = False
+is_fresh_admin_docs_copy = False
+
+for library in s.get_staging_dirs("v2"):
+ s.move(library / "google/cloud/bigtable_v2")
+ is_fresh_admin_copy = \
+ s.move(library / "google/cloud/bigtable_admin")
+ is_fresh_admin_v2_copy = \
+ s.move(library / "google/cloud/bigtable_admin_v2")
+ s.move(library / "tests")
+ s.move(library / "samples")
+ s.move(library / "scripts")
+ is_fresh_admin_docs_copy = \
+ s.move(library / "docs/bigtable_admin_v2", destination="docs/admin_client")
+
+s.remove_staging_dirs()
+
+# ----------------------------------------------------------------------------
+# Add templated files
+# ----------------------------------------------------------------------------
+templated_files = common.py_library(
+ samples=True, # set to True only if there are samples
+ split_system_tests=True,
+ microgenerator=True,
+ cov_level=99,
+ system_test_external_dependencies=[
+ "pytest-asyncio==0.21.2",
+ ],
+ system_test_python_versions=["3.9"],
+ unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"],
+ default_python_version="3.13",
+)
+
+s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/**", ".kokoro/**", "noxfile.py", "renovate.json"])
+
+
+s.shell.run(["nox", "-s", "blacken"], hide_output=False)
+
+# ----------------------------------------------------------------------------
+# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109
+# TODO: remove after backend no longer requires empty strings
+# ----------------------------------------------------------------------------
+for file in ["async_client.py", "client.py"]:
+ s.replace(
+ f"google/cloud/bigtable_v2/services/bigtable/{file}",
+ "if request.app_profile_id:",
+ "if True: # always attach app_profile_id, even if empty string"
+ )
+# fix tests
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ 'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*',
+ """# assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])"""
+)
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ 'expected_headers = {"name": "projects/sample1/instances/sample2"}',
+ """expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }"""
+)
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ """
+ expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3"
+ }
+""",
+ """
+ expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
+ }
+"""
+)
+
+# ----------------------------------------------------------------------------
+# Samples templates
+# ----------------------------------------------------------------------------
+
+python.py_samples(skip_readmes=True)
+
+# --------------------------------------------------------------------------
+# Admin Overlay work
+# --------------------------------------------------------------------------
+
+# Add overlay imports to top level __init__.py files in admin_v2 and admin at the end
+# of each file, after the __all__ definition. These changes should only be done on fresh
+# copies of the __init__.py files.
+def add_overlay_to_init_py(init_py_location, import_statements, should_add):
+ if should_add:
+ s.replace(
+ init_py_location,
+ r"(?s)(^__all__ = \(.*\)$)",
+ r"\1\n\n" + import_statements
+ )
+
+add_overlay_to_init_py(
+ "google/cloud/bigtable_admin_v2/__init__.py",
+ """from .overlay import * # noqa: F403\n
+__all__ += overlay.__all__ # noqa: F405""",
+ is_fresh_admin_v2_copy,
+)
+
+add_overlay_to_init_py(
+ "google/cloud/bigtable_admin/__init__.py",
+ """import google.cloud.bigtable_admin_v2.overlay # noqa: F401
+from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403
+
+__all__ += google.cloud.bigtable_admin_v2.overlay.__all__""",
+ is_fresh_admin_copy,
+)
+
+# Replace all instances of BaseBigtableTableAdminClient/BaseBigtableAdminAsyncClient
+# in samples and docstrings with BigtableTableAdminClient/BigtableTableAdminAsyncClient
+s.replace(
+ [
+ "google/cloud/bigtable_admin_v2/services/*/client.py",
+ "google/cloud/bigtable_admin_v2/services/*/async_client.py",
+ "samples/generated_samples/bigtableadmin_v2_*.py"
+ ],
+ r"client = bigtable_admin_v2\.Base(BigtableTableAdmin(Async)?Client\(\))",
+ r"client = bigtable_admin_v2.\1"
+)
+
+# Fix an improperly formatted table that breaks nox -s docs.
+s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ """ For example, if \\\\_key =
+ "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following
+ schema: \\{ fields \\{ field_name: "id" type \\{ string \\{
+ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ field_name: "date"
+ type \\{ string \\{ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{
+ field_name: "product_code" type \\{ int64 \\{ encoding:
+ big_endian_bytes \\{\\} \\} \\} \\} encoding \\{ delimited_bytes \\{
+ delimiter: "#" \\} \\} \\}
+
+ \\| The decoded key parts would be: id = "some_id", date =
+ "2024-04-30", product_code = 1245427 The query "SELECT
+ \\\\_key, product_code FROM table" will return two columns:
+ /------------------------------------------------------
+ \\| \\\\\\| \\\\_key \\\\\\| product_code \\\\\\| \\\\\\|
+ --------------------------------------\\|--------------\\\\\\| \\\\\\|
+ "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" \\\\\\| 1245427 \\\\\\|
+ ------------------------------------------------------/
+""",
+ textwrap.indent(
+ """For example, if \\\\_key =
+"some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following
+schema:
+
+.. code-block::
+
+ {
+ fields {
+ field_name: "id"
+ type { string { encoding: utf8_bytes {} } }
+ }
+ fields {
+ field_name: "date"
+ type { string { encoding: utf8_bytes {} } }
+ }
+ fields {
+ field_name: "product_code"
+ type { int64 { encoding: big_endian_bytes {} } }
+ }
+ encoding { delimited_bytes { delimiter: "#" } }
+ }
+
+The decoded key parts would be:
+id = "some_id", date = "2024-04-30", product_code = 1245427
+The query "SELECT \\\\_key, product_code FROM table" will return
+two columns:
+
++========================================+==============+
+| \\\\_key | product_code |
++========================================+==============+
+| "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" | 1245427 |
++----------------------------------------+--------------+
+""",
+ " " * 12,
+ ),
+)
+
+# These changes should only be done on fresh copies of the .rst files
+# from googleapis-gen.
+if is_fresh_admin_docs_copy:
+ # Change the subpackage for clients with overridden internal methods in them
+ # from service to overlay.service.
+ s.replace(
+ "docs/admin_client/bigtable_table_admin.rst",
+ r"^\.\. automodule:: google\.cloud\.bigtable_admin_v2\.services\.bigtable_table_admin$",
+ ".. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin"
+ )
+
+ # Add overlay types to types documentation
+ s.replace(
+ "docs/admin_client/types_.rst",
+ r"""(\.\. automodule:: google\.cloud\.bigtable_admin_v2\.types
+ :members:
+ :show-inheritance:)
+""",
+ r"""\1
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.types
+ :members:
+ :show-inheritance:
+"""
+ )
+
+# These changes should only be done on a fresh copy of table.py
+# from googleapis-gen.
+if is_fresh_admin_v2_copy:
+ # Add the oneof_message import into table.py for GcRule
+ s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ r"^(from google\.cloud\.bigtable_admin_v2\.types import .+)$",
+ r"""\1
+from google.cloud.bigtable_admin_v2.utils import oneof_message""",
+ )
+
+ # Re-subclass GcRule in table.py
+ s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ r"class GcRule\(proto\.Message\)\:",
+ "class GcRule(oneof_message.OneofMessage):",
+ )
diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py b/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py
new file mode 100644
index 000000000000..16c8a6327788
--- /dev/null
+++ b/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+
+from __future__ import absolute_import
+
+import os
+import pathlib
+import re
+import shutil
+from typing import Dict, List
+import warnings
+
+import nox
+
+FLAKE8_VERSION = "flake8==6.1.0"
+BLACK_VERSION = "black[jupyter]==23.3.0"
+ISORT_VERSION = "isort==5.11.0"
+LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"]
+
+DEFAULT_PYTHON_VERSION = "3.13"
+
+UNIT_TEST_PYTHON_VERSIONS: List[str] = [
+ "3.7",
+ "3.8",
+ "3.9",
+ "3.10",
+ "3.11",
+ "3.12",
+ "3.13",
+ "3.14",
+]
+UNIT_TEST_STANDARD_DEPENDENCIES = [
+ "mock",
+ "asyncmock",
+ "pytest",
+ "pytest-cov",
+ "pytest-asyncio",
+ BLACK_VERSION,
+ "autoflake",
+]
+UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_DEPENDENCIES: List[str] = []
+UNIT_TEST_EXTRAS: List[str] = []
+UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.9", "3.14"]
+SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+]
+SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [
+ "pytest-asyncio==0.21.2",
+ BLACK_VERSION,
+ "pyyaml==6.0.2",
+]
+SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_EXTRAS: List[str] = []
+SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit-3.9",
+ "unit-3.10",
+ "unit-3.11",
+ "unit-3.12",
+ "unit-3.13",
+ "unit-3.14",
+ "system_emulated",
+ "system",
+ "mypy",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "docs",
+ "format",
+]
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint(session):
+ """Run linters.
+
+ Returns a failure if the linters find linting errors or sufficiently
+ serious code quality issues.
+ """
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
+ session.run(
+ "black",
+ "--check",
+ *LINT_PATHS,
+ )
+ session.run("flake8", "google", "tests")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def blacken(session):
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def format(session):
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run(
+ "isort",
+ "--fss",
+ *LINT_PATHS,
+ )
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def mypy(session):
+ """Verify type hints are mypy compatible."""
+ session.install("-e", ".")
+ session.install(
+ "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests"
+ )
+ session.install("google-cloud-testutils")
+ session.run("mypy", "-p", "google.cloud.bigtable.data")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint_setup_py(session):
+ """Verify that setup.py is valid (including RST check)."""
+ session.install("setuptools", "docutils", "pygments")
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
+
+
+def install_unittest_dependencies(session, *constraints):
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, *constraints)
+
+ if UNIT_TEST_EXTERNAL_DEPENDENCIES:
+ warnings.warn(
+ "'unit_test_external_dependencies' is deprecated. Instead, please "
+ "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
+ DeprecationWarning,
+ )
+ session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_LOCAL_DEPENDENCIES:
+ session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_EXTRAS_BY_PYTHON:
+ extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif UNIT_TEST_EXTRAS:
+ extras = UNIT_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def unit(session, protobuf_implementation):
+ # Install all test dependencies, then install this package in-place.
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+
+ # TODO(https://github.com/googleapis/synthtool/issues/1976):
+ # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped.
+ # The 'cpp' implementation requires Protobuf<4.
+ if protobuf_implementation == "cpp":
+ session.install("protobuf<4")
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit"),
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+def install_systemtest_dependencies(session, *constraints):
+ # Use pre-release gRPC for system tests.
+ # Exclude version 1.52.0rc1 which has a known issue.
+ # See https://github.com/grpc/grpc/issues/32163
+ session.install("--pre", "grpcio!=1.52.0rc1")
+
+ session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
+ session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_LOCAL_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTRAS_BY_PYTHON:
+ extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif SYSTEM_TEST_EXTRAS:
+ extras = SYSTEM_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def system_emulated(session):
+ import subprocess
+ import signal
+
+ try:
+ subprocess.call(["gcloud", "--version"])
+ except OSError:
+ session.skip("gcloud not found but required for emulator support")
+
+ # Currently, CI/CD doesn't have beta component of gcloud.
+ subprocess.call(["gcloud", "components", "install", "beta", "bigtable"])
+
+ hostport = "localhost:8789"
+ session.env["BIGTABLE_EMULATOR_HOST"] = hostport
+
+ p = subprocess.Popen(
+ ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport]
+ )
+
+ try:
+ system(session)
+ finally:
+ # Stop Emulator
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@nox.parametrize("client_type", ["async", "sync", "legacy"])
+def conformance(session, client_type):
+ # install dependencies
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+ with session.chdir("test_proxy"):
+ # download the conformance test suite
+ session.run(
+ "bash",
+ "-e",
+ "run_tests.sh",
+ external=True,
+ env={"CLIENT_TYPE": client_type},
+ )
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+def system(session):
+ """Run the system test suite."""
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
+ # Install pyopenssl for mTLS testing.
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
+ session.install("pyopenssl")
+
+ system_test_exists = os.path.exists(system_test_path)
+ system_test_folder_exists = os.path.exists(system_test_folder_path)
+ # Sanity check: only run tests if found.
+ if not system_test_exists and not system_test_folder_exists:
+ session.skip("System tests were not found")
+
+ install_systemtest_dependencies(session, "-c", constraints_path)
+
+ # Run py.test against the system tests.
+ if system_test_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
+ if system_test_folder_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def cover(session):
+ """Run the final coverage report.
+
+ This outputs the coverage report aggregating coverage from the unit
+ test runs (not system test runs), and then erases coverage data.
+ """
+ session.install("coverage", "pytest-cov")
+ session.run("coverage", "report", "--show-missing", "--fail-under=99")
+
+ session.run("coverage", "erase")
+
+
+@nox.session(python="3.10")
+def docs(session):
+ """Build the docs for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "sphinx==4.5.0",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-W", # warnings as errors
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "gcp-sphinx-docfx-yaml",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+ # Customization: Add extra sections to the table of contents for the Classic vs Async clients
+ session.install("pyyaml")
+ session.run("python", "docs/scripts/patch_devsite_toc.py")
+
+
+@nox.session(python="3.14")
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def prerelease_deps(session, protobuf_implementation):
+ """Run all tests with prerelease versions of dependencies installed."""
+
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ # Install all dependencies
+ session.install("-e", ".[all, tests, tracing]")
+ unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES
+ session.install(*unit_deps_all)
+ system_deps_all = (
+ SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES
+ )
+ session.install(*system_deps_all)
+
+ # Because we test minimum dependency versions on the minimum Python
+ # version, the first version we test with in the unit tests sessions has a
+ # constraints file containing all dependencies and extras.
+ with open(
+ CURRENT_DIRECTORY
+ / "testing"
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
+ encoding="utf-8",
+ ) as constraints_file:
+ constraints_text = constraints_file.read()
+
+ # Ignore leading whitespace and comment lines.
+ constraints_deps = [
+ match.group(1)
+ for match in re.finditer(
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
+ )
+ ]
+
+ session.install(*constraints_deps)
+
+ prerel_deps = [
+ "protobuf",
+ # dependency of grpc
+ "six",
+ "grpc-google-iam-v1",
+ "googleapis-common-protos",
+ "grpcio",
+ "grpcio-status",
+ "google-api-core",
+ "google-auth",
+ "proto-plus",
+ "google-cloud-testutils",
+ # dependencies of google-cloud-testutils"
+ "click",
+ ]
+
+ for dep in prerel_deps:
+ session.install("--pre", "--no-deps", "--upgrade", dep)
+
+ # Remaining dependencies
+ other_deps = [
+ "requests",
+ ]
+ session.install(*other_deps)
+
+ # Print out prerelease package versions
+ session.run(
+ "python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
+ )
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import google.auth; print(google.auth.__version__)")
+
+ session.run(
+ "py.test",
+ "tests/unit",
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Only run system tests if found.
+ if os.path.exists(system_test_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+ if os.path.exists(system_test_folder_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+@nox.session(python="3.10")
+def generate_sync(session):
+ """
+ Re-generate sync files for the library from CrossSync-annotated async source
+ """
+ session.install(BLACK_VERSION)
+ session.install("autoflake")
+ session.run("python", ".cross_sync/generate.py", ".")
diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/setup.py b/packages/google-cloud-bigtable/.librarian/generator-input/setup.py
new file mode 100644
index 000000000000..cac533db6427
--- /dev/null
+++ b/packages/google-cloud-bigtable/.librarian/generator-input/setup.py
@@ -0,0 +1,104 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+import setuptools
+
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+# Package metadata.
+
+name = "google-cloud-bigtable"
+description = "Google Cloud Bigtable API client library"
+
+version = {}
+with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+
+# Should be one of:
+# 'Development Status :: 3 - Alpha'
+# 'Development Status :: 4 - Beta'
+# 'Development Status :: 5 - Production/Stable'
+release_status = "Development Status :: 5 - Production/Stable"
+dependencies = [
+ "google-api-core[grpc] >= 2.17.0, <3.0.0",
+ "google-cloud-core >= 1.4.4, <3.0.0",
+ "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0",
+ "grpc-google-iam-v1 >= 0.12.4, <1.0.0",
+ "proto-plus >= 1.22.3, <2.0.0",
+ "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'",
+ "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
+ "google-crc32c>=1.5.0, <2.0.0dev",
+]
+extras = {"libcst": "libcst >= 0.2.5"}
+
+
+# Setup boilerplate below this line.
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+readme_filename = os.path.join(package_root, "README.rst")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+# Only include packages under the 'google' namespace. Do not include tests,
+# benchmarks, etc.
+packages = [
+ package
+ for package in setuptools.find_namespace_packages()
+ if package.startswith("google")
+]
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ author="Google LLC",
+ author_email="googleapis-packages@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-bigtable",
+ classifiers=[
+ release_status,
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
+ "Operating System :: OS Independent",
+ "Topic :: Internet",
+ ],
+ platforms="Posix; MacOS X; Windows",
+ packages=packages,
+ install_requires=dependencies,
+ extras_require=extras,
+ scripts=[
+ "scripts/fixup_bigtable_v2_keywords.py",
+ "scripts/fixup_admin_v2_keywords.py",
+ ],
+ python_requires=">=3.7",
+ include_package_data=True,
+ zip_safe=False,
+)
diff --git a/packages/google-cloud-bigtable/.librarian/state.yaml b/packages/google-cloud-bigtable/.librarian/state.yaml
new file mode 100644
index 000000000000..049e7b1cf7b6
--- /dev/null
+++ b/packages/google-cloud-bigtable/.librarian/state.yaml
@@ -0,0 +1,40 @@
+image: us-central1-docker.pkg.dev/cloud-sdk-librarian-prod/images-prod/python-librarian-generator@sha256:8e2c32496077054105bd06c54a59d6a6694287bc053588e24debe6da6920ad91
+libraries:
+ - id: google-cloud-bigtable
+ version: 2.34.0
+ last_generated_commit: a17b84add8318f780fcc8a027815d5fee644b9f7
+ apis:
+ - path: google/bigtable/v2
+ service_config: bigtable_v2.yaml
+ - path: google/bigtable/admin/v2
+ service_config: bigtableadmin_v2.yaml
+ source_roots:
+ - .
+ preserve_regex: []
+ remove_regex:
+ - ^.pre-commit-config.yaml
+ - ^.repo-metadata.json
+ - ^.trampolinerc
+ - ^docs/admin_client/bigtable
+ - ^docs/admin_client/services_.rst
+ - ^docs/admin_client/types_.rst
+ - ^docs/summary_overview.md
+ - ^google/cloud/bigtable_v2
+ - ^google/cloud/bigtable_admin/
+ - ^google/cloud/bigtable_admin_v2/services
+ - ^google/cloud/bigtable_admin_v2/types
+ - ^google/cloud/bigtable_admin_v2/__init__.py
+ - ^google/cloud/bigtable_admin_v2/gapic
+ - ^google/cloud/bigtable_admin_v2/py.typed
+ - ^samples/AUTHORING_GUIDE.md
+ - ^samples/CONTRIBUTING.md
+ - ^samples/generated_samples
+ - ^tests/unit/gapic
+ - ^noxfile.py
+ - ^scripts/fixup_bigtable
+ - ^setup.py
+ - ^SECURITY.md
+ - ^tests/__init__.py
+ - ^tests/unit/__init__.py
+ - ^tests/unit/gapic
+ tag_format: v{version}
diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml
new file mode 100644
index 000000000000..1d74695f70b6
--- /dev/null
+++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml
@@ -0,0 +1,31 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 23.7.0
+ hooks:
+ - id: black
+- repo: https://github.com/pycqa/flake8
+ rev: 6.1.0
+ hooks:
+ - id: flake8
diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json
new file mode 100644
index 000000000000..9de4b5f92bf5
--- /dev/null
+++ b/packages/google-cloud-bigtable/.repo-metadata.json
@@ -0,0 +1,80 @@
+{
+ "name": "bigtable",
+ "name_pretty": "Cloud Bigtable",
+ "product_documentation": "https://cloud.google.com/bigtable",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigtable",
+ "distribution_name": "google-cloud-bigtable",
+ "api_id": "bigtable.googleapis.com",
+ "requires_billing": true,
+ "samples": [
+ {
+ "name": "Hello World in Cloud Bigtable",
+ "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello"
+ },
+ {
+ "name": "Hello World using HappyBase",
+ "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello_happybase"
+ },
+ {
+ "name": "cbt Command Demonstration",
+ "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
+ "file": "instanceadmin.py",
+ "runnable": true,
+ "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "instanceadmin"
+ },
+ {
+ "name": "Metric Scaler",
+ "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
+ "file": "metricscaler.py",
+ "runnable": true,
+ "custom_content": "
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments: bigtable_instance ID of the Cloud Bigtable instance to connect to. bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments: -h, --help show this help message and exit --high_cpu_threshold HIGH_CPU_THRESHOLD If Cloud Bigtable CPU usage is above this threshold, scale up --low_cpu_threshold LOW_CPU_THRESHOLD If Cloud Bigtable CPU usage is below this threshold, scale down --short_sleep SHORT_SLEEP How long to sleep in seconds between checking metrics after no scale operation --long_sleep LONG_SLEEP How long to sleep in seconds between checking metrics after a scaling operation
",
+ "override_path": "metricscaler"
+ },
+ {
+ "name": "Quickstart",
+ "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Existing table used in the quickstart. (default: my-table)
",
+ "override_path": "quickstart"
+ },
+ {
+ "name": "Quickstart using HappyBase",
+ "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations. Prerequisites: - Create a Cloud Bigtable cluster. https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default- credentials
positional arguments: project_id Your Cloud Platform project ID. instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments: -h, --help show this help message and exit --table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "tableadmin"
+ }
+ ],
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners",
+ "api_shortname": "bigtable"
+}
diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md
new file mode 100644
index 000000000000..2a0251dc158f
--- /dev/null
+++ b/packages/google-cloud-bigtable/CHANGELOG.md
@@ -0,0 +1,1134 @@
+# Changelog
+
+[PyPI History][1]
+
+[1]: https://pypi.org/project/google-cloud-bigtable/#history
+
+## [2.34.0](https://github.com/googleapis/python-bigtable/compare/v2.33.0...v2.34.0) (2025-10-16)
+
+
+### Features
+
+* Add support for Python 3.14 ([#1217](https://github.com/googleapis/python-bigtable/issues/1217)) ([263332a](https://github.com/googleapis/python-bigtable/commit/263332af71a229cb4fa598008a708137086a6f67))
+
+## [2.33.0](https://github.com/googleapis/python-bigtable/compare/v2.32.0...v2.33.0) (2025-10-06)
+
+
+### Features
+
+* Add support for Proto and Enum types ([#1202](https://github.com/googleapis/python-bigtable/issues/1202)) ([34ceb86](https://github.com/googleapis/python-bigtable/commit/34ceb86007db08d453fa25cca4968d5b498ffcd6))
+* Expose universe_domain for tpc ([#1150](https://github.com/googleapis/python-bigtable/issues/1150)) ([451fd97](https://github.com/googleapis/python-bigtable/commit/451fd97e435218ffed47d39423680ffc4feccac4))
+
+
+### Bug Fixes
+
+* Fix instance registration cleanup on early iterator termination ([#1216](https://github.com/googleapis/python-bigtable/issues/1216)) ([bbfd746](https://github.com/googleapis/python-bigtable/commit/bbfd746c61a6362efa42c7899ec3e34ceb541c83))
+* Refactor channel refresh ([#1174](https://github.com/googleapis/python-bigtable/issues/1174)) ([6fa3008](https://github.com/googleapis/python-bigtable/commit/6fa30084058bc34d4487d1fee5c87d7795ff167a))
+
+## [2.32.0](https://github.com/googleapis/python-bigtable/compare/v2.31.0...v2.32.0) (2025-08-01)
+
+
+### Features
+
+* Add Idempotency to Cloud Bigtable MutateRowsRequest API ([#1143](https://github.com/googleapis/python-bigtable/issues/1143)) ([c3e3eb0](https://github.com/googleapis/python-bigtable/commit/c3e3eb0e4ce44ece72b150dc5822846627074fba))
+* Add support for AddToCell in Data Client ([#1147](https://github.com/googleapis/python-bigtable/issues/1147)) ([1a5b4b5](https://github.com/googleapis/python-bigtable/commit/1a5b4b514cadae5c83d61296314285d3774992c5))
+* Implement SQL support in test proxy ([#1106](https://github.com/googleapis/python-bigtable/issues/1106)) ([7a91bbf](https://github.com/googleapis/python-bigtable/commit/7a91bbfb9df23f7e93c40b88648840342af6f16f))
+* Modernized Bigtable Admin Client featuring selective GAPIC generation ([#1177](https://github.com/googleapis/python-bigtable/issues/1177)) ([58e7d37](https://github.com/googleapis/python-bigtable/commit/58e7d3782df6b13a42af053263afc575222a6b83))
+
+## [2.31.0](https://github.com/googleapis/python-bigtable/compare/v2.30.1...v2.31.0) (2025-05-22)
+
+
+### Features
+
+* Add deletion_protection support for LVs ([#1108](https://github.com/googleapis/python-bigtable/issues/1108)) ([c6d384d](https://github.com/googleapis/python-bigtable/commit/c6d384d4a104c182326e22dc3f10b7b905780dee))
+* Support authorized views ([#1034](https://github.com/googleapis/python-bigtable/issues/1034)) ([97a0198](https://github.com/googleapis/python-bigtable/commit/97a019833d82e617769c56761aa5548d3ab896b9))
+* Throw better error on invalid metadata response ([#1107](https://github.com/googleapis/python-bigtable/issues/1107)) ([2642317](https://github.com/googleapis/python-bigtable/commit/2642317077b723ca8fd62aa86322b524868c2c4d))
+
+
+### Bug Fixes
+
+* Re-add py-typed file for bigtable package ([#1085](https://github.com/googleapis/python-bigtable/issues/1085)) ([0c322c7](https://github.com/googleapis/python-bigtable/commit/0c322c79ecbe4cde3e79d8e83ac655a978d07877))
+
+## [2.30.1](https://github.com/googleapis/python-bigtable/compare/v2.30.0...v2.30.1) (2025-04-17)
+
+
+### Bug Fixes
+
+* Populate SQL app_profile_id header even when it is unset ([#1109](https://github.com/googleapis/python-bigtable/issues/1109)) ([17b75bd](https://github.com/googleapis/python-bigtable/commit/17b75bd746cb0a616f64a05eb0ed72b46de28a17))
+
+## [2.30.0](https://github.com/googleapis/python-bigtable/compare/v2.29.0...v2.30.0) (2025-03-18)
+
+
+### Features
+
+* Update ExecuteQuery to use Prepare ([#1100](https://github.com/googleapis/python-bigtable/issues/1100)) ([8a7abc1](https://github.com/googleapis/python-bigtable/commit/8a7abc1e9c34a9122b2d648e8a358a7097ed3a5d))
+
+
+### Bug Fixes
+
+* Allow protobuf 6.x ([#1092](https://github.com/googleapis/python-bigtable/issues/1092)) ([1015fa8](https://github.com/googleapis/python-bigtable/commit/1015fa83c505487f09820e3a37f76690bd00ab5d))
+* Remove setup.cfg configuration for creating universal wheels ([#1097](https://github.com/googleapis/python-bigtable/issues/1097)) ([95f4b82](https://github.com/googleapis/python-bigtable/commit/95f4b8233cba2a18633e64c5e0bc177e23767a83))
+
+## [2.29.0](https://github.com/googleapis/python-bigtable/compare/v2.28.1...v2.29.0) (2025-02-26)
+
+
+### Features
+
+* Add support for array and float32 SQL query params ([#1078](https://github.com/googleapis/python-bigtable/issues/1078)) ([89b8da8](https://github.com/googleapis/python-bigtable/commit/89b8da8a445aeb08854d9fa77cbc0e4fc042c87f))
+
+
+### Bug Fixes
+
+* Grpc channel refresh ([#1087](https://github.com/googleapis/python-bigtable/issues/1087)) ([f44b36b](https://github.com/googleapis/python-bigtable/commit/f44b36bf51e3e4e3b8a774f96e682d3f1f8d4b16))
+
+## [2.28.1](https://github.com/googleapis/python-bigtable/compare/v2.28.0...v2.28.1) (2025-01-17)
+
+
+### Bug Fixes
+
+* Allow empty headers for btql routing ([#1072](https://github.com/googleapis/python-bigtable/issues/1072)) ([e7ecfeb](https://github.com/googleapis/python-bigtable/commit/e7ecfeb8984a45c880d9483305964fff347eb4b8))
+
+## [2.28.0](https://github.com/googleapis/python-bigtable/compare/v2.27.0...v2.28.0) (2025-01-08)
+
+
+### Features
+
+* Add generated sync client ([#1017](https://github.com/googleapis/python-bigtable/issues/1017)) ([f974823](https://github.com/googleapis/python-bigtable/commit/f974823bf8a74c2f8b1bc69997b13bc1acaf8bef))
+
+## [2.27.0](https://github.com/googleapis/python-bigtable/compare/v2.26.0...v2.27.0) (2024-11-12)
+
+
+### Features
+
+* Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters ([#1023](https://github.com/googleapis/python-bigtable/issues/1023)) ([0809c6a](https://github.com/googleapis/python-bigtable/commit/0809c6ac274e909103ad160a8bcab95f8bb46f31))
+* Surface `retry` param to `Table.read_row` api ([#982](https://github.com/googleapis/python-bigtable/issues/982)) ([a8286d2](https://github.com/googleapis/python-bigtable/commit/a8286d2a510f654f9c270c3c761c02e4ab3817d4))
+
+
+### Bug Fixes
+
+* Registering duplicate instance ([#1033](https://github.com/googleapis/python-bigtable/issues/1033)) ([2bca8fb](https://github.com/googleapis/python-bigtable/commit/2bca8fb220eeb1906fc6a3cf1f879f3d41fbbff8))
+
+## [2.26.0](https://github.com/googleapis/python-bigtable/compare/v2.25.0...v2.26.0) (2024-08-12)
+
+
+### Features
+
+* Add fields and the BackupType proto for Hot Backups ([#1010](https://github.com/googleapis/python-bigtable/issues/1010)) ([b95801f](https://github.com/googleapis/python-bigtable/commit/b95801ffa8081e0072232247fbc5879105c109a6))
+* Add MergeToCell to Mutation APIs ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad))
+* Add min, max, hll aggregators and more types ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad))
+* Async execute query client ([#1011](https://github.com/googleapis/python-bigtable/issues/1011)) ([45bc8c4](https://github.com/googleapis/python-bigtable/commit/45bc8c4a0fe567ce5e0126a1a70e7eb3dca93e92))
+
+
+### Bug Fixes
+
+* Use single routing metadata header ([#1005](https://github.com/googleapis/python-bigtable/issues/1005)) ([20eeb0a](https://github.com/googleapis/python-bigtable/commit/20eeb0a68d7b44d07a6d84bc7a7e040ad63bb96d))
+
+
+### Documentation
+
+* Add clarification around SQL timestamps ([#1012](https://github.com/googleapis/python-bigtable/issues/1012)) ([6e80190](https://github.com/googleapis/python-bigtable/commit/6e801900bbe9385d3b579b8c3327c87c3617d92f))
+* Corrected various type documentation ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad))
+
+## [2.25.0](https://github.com/googleapis/python-bigtable/compare/v2.24.0...v2.25.0) (2024-07-18)
+
+
+### Features
+
+* Publish ProtoRows Message ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a))
+* Publish the Cloud Bigtable ExecuteQuery API ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a))
+
+
+### Bug Fixes
+
+* Allow protobuf 5.x ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a))
+
+## [2.24.0](https://github.com/googleapis/python-bigtable/compare/v2.23.1...v2.24.0) (2024-06-11)
+
+
+### Features
+
+* Add String type with Utf8Raw encoding to Bigtable API ([#968](https://github.com/googleapis/python-bigtable/issues/968)) ([2a2bbfd](https://github.com/googleapis/python-bigtable/commit/2a2bbfdba6737c508ab1073d37fef680ca2a8c2f))
+* Improve async sharding ([#977](https://github.com/googleapis/python-bigtable/issues/977)) ([fd1f7da](https://github.com/googleapis/python-bigtable/commit/fd1f7dafd38f7f0e714a3384a27176f485523682))
+
+
+### Bug Fixes
+
+* **backup:** Backup name regex ([#970](https://github.com/googleapis/python-bigtable/issues/970)) ([6ef122a](https://github.com/googleapis/python-bigtable/commit/6ef122ad49f43e3a22cde5cb6fdaefd947670136))
+* Improve rowset revision ([#979](https://github.com/googleapis/python-bigtable/issues/979)) ([da27527](https://github.com/googleapis/python-bigtable/commit/da275279a7e619e4cd3e72b10ac629d6e0e1fe47))
+
+## [2.23.1](https://github.com/googleapis/python-bigtable/compare/v2.23.0...v2.23.1) (2024-04-15)
+
+
+### Bug Fixes
+
+* Use insecure grpc channel with emulator ([#946](https://github.com/googleapis/python-bigtable/issues/946)) ([aa31706](https://github.com/googleapis/python-bigtable/commit/aa3170663f9bd09d70c99d4e76c07f7f293ad935))
+
+## [2.23.0](https://github.com/googleapis/python-bigtable/compare/v2.22.0...v2.23.0) (2024-02-07)
+
+
+### Features
+
+* Add async data client preview ([7088e39](https://github.com/googleapis/python-bigtable/commit/7088e39c6bac10e5f830e8fa68e181412910ec5a))
+* Adding feature flags for routing cookie and retry info ([#905](https://github.com/googleapis/python-bigtable/issues/905)) ([1859e67](https://github.com/googleapis/python-bigtable/commit/1859e67961629663a8749eea849b5b005fcbc09f))
+
+
+### Bug Fixes
+
+* Fix `ValueError` in `test__validate_universe_domain` ([#929](https://github.com/googleapis/python-bigtable/issues/929)) ([aa76a5a](https://github.com/googleapis/python-bigtable/commit/aa76a5aaa349386d5972d96e1255389e30df8764))
+
+## [2.22.0](https://github.com/googleapis/python-bigtable/compare/v2.21.0...v2.22.0) (2023-12-12)
+
+
+### Features
+
+* Add support for Cloud Bigtable Request Priorities in App Profiles ([#871](https://github.com/googleapis/python-bigtable/issues/871)) ([a4d551e](https://github.com/googleapis/python-bigtable/commit/a4d551e34006202ee96a395a2107d7acdc5881de))
+* Add support for Python 3.12 ([#888](https://github.com/googleapis/python-bigtable/issues/888)) ([4f050aa](https://github.com/googleapis/python-bigtable/commit/4f050aa5aed9a9dcf209779d5c10e5de8e2ff19e))
+* Introduce compatibility with native namespace packages ([#893](https://github.com/googleapis/python-bigtable/issues/893)) ([d218f4e](https://github.com/googleapis/python-bigtable/commit/d218f4ebd4ed6705721dca9318df955b40b0d0ac))
+* Publish CopyBackup protos to external customers ([#855](https://github.com/googleapis/python-bigtable/issues/855)) ([4105df7](https://github.com/googleapis/python-bigtable/commit/4105df762f1318c49bba030063897f0c50e4daee))
+
+
+### Bug Fixes
+
+* Add feature flag for improved mutate rows throttling ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b))
+* Add lock to flow control ([#899](https://github.com/googleapis/python-bigtable/issues/899)) ([e4e63c7](https://github.com/googleapis/python-bigtable/commit/e4e63c7b5b91273b3aae04fda59cc5a21c848de2))
+* Mutations batcher race condition ([#896](https://github.com/googleapis/python-bigtable/issues/896)) ([fe58f61](https://github.com/googleapis/python-bigtable/commit/fe58f617c7364d7e99e2ec50abd5f080852bf033))
+* Require google-cloud-core 1.4.4 ([#866](https://github.com/googleapis/python-bigtable/issues/866)) ([09f8a46](https://github.com/googleapis/python-bigtable/commit/09f8a4667d8b68a9f2048ba1aa57db4f775a2c03))
+* Use `retry_async` instead of `retry` in async client ([597efd1](https://github.com/googleapis/python-bigtable/commit/597efd11d15f20549010b4301be4d9768326e6a2))
+
+
+### Documentation
+
+* Minor formatting ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b))
+
+## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02)
+
+
+### Features
+
+* Add last_scanned_row_responses to FeatureFlags ([#845](https://github.com/googleapis/python-bigtable/issues/845)) ([14a6739](https://github.com/googleapis/python-bigtable/commit/14a673901f82fa247c8027730a0bba41e0ec4757))
+
+
+### Documentation
+
+* Minor formatting ([#851](https://github.com/googleapis/python-bigtable/issues/851)) ([5ebe231](https://github.com/googleapis/python-bigtable/commit/5ebe2312dab70210811fca68c6625d2546442afd))
+
+## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17)
+
+
+### Features
+
+* Add experimental reverse scan for public preview ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304))
+* Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304))
+
+
+### Bug Fixes
+
+* Add async context manager return types ([#828](https://github.com/googleapis/python-bigtable/issues/828)) ([475a160](https://github.com/googleapis/python-bigtable/commit/475a16072f3ad41357bdb765fff608a39141ec00))
+
+
+### Documentation
+
+* Fix formatting for reversed order field example ([#831](https://github.com/googleapis/python-bigtable/issues/831)) ([fddd0ba](https://github.com/googleapis/python-bigtable/commit/fddd0ba97155e112af92a98fd8f20e59b139d177))
+
+## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08)
+
+
+### Features
+
+* Add ChangeStreamConfig to CreateTable and UpdateTable ([#786](https://github.com/googleapis/python-bigtable/issues/786)) ([cef70f2](https://github.com/googleapis/python-bigtable/commit/cef70f243541820225f86a520e0b2abd3a7354f7))
+
+
+### Bug Fixes
+
+* Add a callback function on flush_rows ([#796](https://github.com/googleapis/python-bigtable/issues/796)) ([589aa5d](https://github.com/googleapis/python-bigtable/commit/589aa5d04f6b5a2bd310d0bf06aeb7058fb6fcd2))
+
+
+### Documentation
+
+* **samples:** Add region tags ([#788](https://github.com/googleapis/python-bigtable/issues/788)) ([ecf539c](https://github.com/googleapis/python-bigtable/commit/ecf539c4c976fd9e5505b8abf0b697b218f09fef))
+
+## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11)
+
+
+### Bug Fixes
+
+* Revert "Feat: Threaded MutationsBatcher" ([#773](https://github.com/googleapis/python-bigtable/issues/773)) ([a767cff](https://github.com/googleapis/python-bigtable/commit/a767cff95d990994f85f5fd05cc10f952087b49d))
+
+## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10)
+
+
+### Features
+
+* Publish RateLimitInfo and FeatureFlag protos ([#768](https://github.com/googleapis/python-bigtable/issues/768)) ([171fea6](https://github.com/googleapis/python-bigtable/commit/171fea6de57a47f92a2a56050f8bfe7518144df7))
+* Threaded MutationsBatcher ([#722](https://github.com/googleapis/python-bigtable/issues/722)) ([7521a61](https://github.com/googleapis/python-bigtable/commit/7521a617c121ead96a21ca47959a53b2db2da090))
+
+
+### Bug Fixes
+
+* Pass the "retry" when calling read_rows. ([#759](https://github.com/googleapis/python-bigtable/issues/759)) ([505273b](https://github.com/googleapis/python-bigtable/commit/505273b72bf83d8f92d0e0a92d62f22bce96cc3d))
+
+
+### Documentation
+
+* Fix delete from column family example ([#764](https://github.com/googleapis/python-bigtable/issues/764)) ([128b4e1](https://github.com/googleapis/python-bigtable/commit/128b4e1f3eea2dad903d84c8f2933b17a5f0d226))
+* Fix formatting of request arg in docstring ([#756](https://github.com/googleapis/python-bigtable/issues/756)) ([45d3e43](https://github.com/googleapis/python-bigtable/commit/45d3e4308c4f494228c2e6e18a36285c557cb0c3))
+
+## [2.17.0](https://github.com/googleapis/python-bigtable/compare/v2.16.0...v2.17.0) (2023-03-01)
+
+
+### Features
+
+* Add new_partitions field for CloseStream for Cloud Bigtable ChangeStream ([#740](https://github.com/googleapis/python-bigtable/issues/740)) ([1adcad4](https://github.com/googleapis/python-bigtable/commit/1adcad440368f6d7df6710a013e7fab076461aed))
+
+## [2.16.0](https://github.com/googleapis/python-bigtable/compare/v2.15.0...v2.16.0) (2023-02-27)
+
+
+### Features
+
+* Enable "rest" transport in Python for services supporting numeric enums ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86))
+* Publish the Cloud Bigtable Change Streams ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86))
+
+
+### Bug Fixes
+
+* Add context manager return types ([beb5bf3](https://github.com/googleapis/python-bigtable/commit/beb5bf3bca4b517d095de3faa17d20e4d89fb295))
+* **deps:** Require google-api-core>=1.34.0,>=2.11.0 ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86))
+
+
+### Documentation
+
+* Add documentation for enums ([beb5bf3](https://github.com/googleapis/python-bigtable/commit/beb5bf3bca4b517d095de3faa17d20e4d89fb295))
+
+## [2.15.0](https://github.com/googleapis/python-bigtable/compare/v2.14.1...v2.15.0) (2023-01-10)
+
+
+### Features
+
+* Add support for python 3.11 ([#718](https://github.com/googleapis/python-bigtable/issues/718)) ([803a15e](https://github.com/googleapis/python-bigtable/commit/803a15ef0cd3713411eeb5d21258c12bbe1dcab6))
+
+## [2.14.1](https://github.com/googleapis/python-bigtable/compare/v2.14.0...v2.14.1) (2022-12-06)
+
+
+### Bug Fixes
+
+* **deps:** Require google-api-core >=1.34.0, >=2.11.0 ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec))
+* Drop usage of pkg_resources ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec))
+* Fix timeout default values ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec))
+
+
+### Documentation
+
+* **samples:** Snippetgen should call await on the operation coroutine before calling result ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec))
+
+## [2.14.0](https://github.com/googleapis/python-bigtable/compare/v2.13.2...v2.14.0) (2022-11-30)
+
+
+### Features
+
+* Add typing to proto.Message based class attributes ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* remove enum value ReadRowsRequest.RequestStatsView.REQUEST_STATS_EFFICIENCY ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* remove field ReadIterationStats.deletes_seen ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* remove field RequestStats.read_efficiency_stats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* remove proto ReadEfficiencyStats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* rename field RequestStats.all_read_stats to full_read_stats_view ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* rename proto AllReadStats to FullReadStatsView ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+* rename proto ReadIteratorStats to ReadIterationStats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+
+
+### Bug Fixes
+
+* Add dict typing for client_options ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66))
+
+## [2.13.2](https://github.com/googleapis/python-bigtable/compare/v2.13.1...v2.13.2) (2022-10-20)
+
+
+### Bug Fixes
+
+* Respect deadlines for column family operations ([#687](https://github.com/googleapis/python-bigtable/issues/687)) ([df2e64a](https://github.com/googleapis/python-bigtable/commit/df2e64a79bbd8b28d0991706607af99d539320d1))
+
+## [2.13.1](https://github.com/googleapis/python-bigtable/compare/v2.13.0...v2.13.1) (2022-10-10)
+
+
+### Bug Fixes
+
+* **deps:** Allow protobuf 3.19.5 ([#682](https://github.com/googleapis/python-bigtable/issues/682)) ([0bb3420](https://github.com/googleapis/python-bigtable/commit/0bb3420decac74058ee099d72f8932556409f2aa))
+
+## [2.13.0](https://github.com/googleapis/python-bigtable/compare/v2.12.0...v2.13.0) (2022-09-29)
+
+
+### Features
+
+* Publish the RequestStats proto ([#676](https://github.com/googleapis/python-bigtable/issues/676)) ([199949b](https://github.com/googleapis/python-bigtable/commit/199949b2a930706654680b91a93f2a903bf112bf))
+
+
+### Bug Fixes
+
+* **deps:** Require protobuf >= 3.20.2 ([#679](https://github.com/googleapis/python-bigtable/issues/679)) ([030ef38](https://github.com/googleapis/python-bigtable/commit/030ef3868c442a8a21c4b4d6217b99cab09a1be7))
+
+## [2.12.0](https://github.com/googleapis/python-bigtable/compare/v2.11.3...v2.12.0) (2022-09-19)
+
+
+### Features
+
+* Publish CBT deletion_protection field in Table, UpdateTableRequest, and UpdateTable API ([#670](https://github.com/googleapis/python-bigtable/issues/670)) ([c57289c](https://github.com/googleapis/python-bigtable/commit/c57289c03335380694580202d746ca4f679dce9b))
+
+
+### Documentation
+
+* Remove unnecessary comment ([#674](https://github.com/googleapis/python-bigtable/issues/674)) ([9c62655](https://github.com/googleapis/python-bigtable/commit/9c62655de7fecd93ee7a1bb95b208d94798727cd))
+
+## [2.11.3](https://github.com/googleapis/python-bigtable/compare/v2.11.2...v2.11.3) (2022-08-17)
+
+
+### Performance Improvements
+
+* optimize row merging ([#628](https://github.com/googleapis/python-bigtable/issues/628)) ([c71ec70](https://github.com/googleapis/python-bigtable/commit/c71ec70e55f6e236e46127870a9ed4717eee5da5))
+
+## [2.11.2](https://github.com/googleapis/python-bigtable/compare/v2.11.1...v2.11.2) (2022-08-11)
+
+
+### Bug Fixes
+
+* **deps:** allow protobuf < 5.0.0 ([#631](https://github.com/googleapis/python-bigtable/issues/631)) ([fd54fc6](https://github.com/googleapis/python-bigtable/commit/fd54fc63340a3e01fae1ccc4c648dd90900f8a94))
+* **deps:** require proto-plus >= 1.22.0 ([fd54fc6](https://github.com/googleapis/python-bigtable/commit/fd54fc63340a3e01fae1ccc4c648dd90900f8a94))
+
+## [2.11.1](https://github.com/googleapis/python-bigtable/compare/v2.11.0...v2.11.1) (2022-08-08)
+
+
+### Bug Fixes
+
+* Retry the RST Stream error in mutate rows and read rows([#624](https://github.com/googleapis/python-bigtable/issues/624)) ([d24574a](https://github.com/googleapis/python-bigtable/commit/d24574a722de61bdeffa6588bcb08f56e62ba3bd))
+
+## [2.11.0](https://github.com/googleapis/python-bigtable/compare/v2.10.1...v2.11.0) (2022-08-04)
+
+
+### Features
+
+* add audience parameter ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e))
+* add satisfies_pzs output only field ([#614](https://github.com/googleapis/python-bigtable/issues/614)) ([7dc1469](https://github.com/googleapis/python-bigtable/commit/7dc1469fef2dc38f1509b35a37e9c97381ab7601))
+* Add storage_utilization_gib_per_node to Autoscaling target ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e))
+* Cloud Bigtable Undelete Table service and message proto files ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e))
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.32.0,>=2.8.0 ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e))
+* require python 3.7+ ([#610](https://github.com/googleapis/python-bigtable/issues/610)) ([10d00f5](https://github.com/googleapis/python-bigtable/commit/10d00f5af5d5878c26529f5e48a5fb8d8385696d))
+
+
+### Performance Improvements
+
+* improve row merging ([#619](https://github.com/googleapis/python-bigtable/issues/619)) ([b4853e5](https://github.com/googleapis/python-bigtable/commit/b4853e59d0efd8a7b37f3fcb06b14dbd9f5d20a4))
+
+## [2.10.1](https://github.com/googleapis/python-bigtable/compare/v2.10.0...v2.10.1) (2022-06-03)
+
+
+### Bug Fixes
+
+* **deps:** require protobuf <4.0.0dev ([#595](https://github.com/googleapis/python-bigtable/issues/595)) ([a4deaf7](https://github.com/googleapis/python-bigtable/commit/a4deaf7b1b5c4b7ce8f6dc5bb96d32ea8ff55c2d))
+
+
+### Documentation
+
+* fix changelog header to consistent size ([#596](https://github.com/googleapis/python-bigtable/issues/596)) ([51961c3](https://github.com/googleapis/python-bigtable/commit/51961c32686fe5851e957581b85adbe92a073e03))
+
+## [2.10.0](https://github.com/googleapis/python-bigtable/compare/v2.9.0...v2.10.0) (2022-05-30)
+
+
+### Features
+
+* refreshes Bigtable Admin API(s) protos ([#589](https://github.com/googleapis/python-bigtable/issues/589)) ([b508e33](https://github.com/googleapis/python-bigtable/commit/b508e3321937850d65242283e82f5413feb6081a))
+
+
+### Documentation
+
+* Add EncryptionInfo documentation ([#588](https://github.com/googleapis/python-bigtable/issues/588)) ([bedbf1b](https://github.com/googleapis/python-bigtable/commit/bedbf1b1bb304ff45f31ad20004ff96041ce716c))
+
+## [2.9.0](https://github.com/googleapis/python-bigtable/compare/v2.8.1...v2.9.0) (2022-04-14)
+
+
+### Features
+
+* App Profile multi cluster routing support with specified cluster ids ([#549](https://github.com/googleapis/python-bigtable/issues/549)) ([a0ed5b5](https://github.com/googleapis/python-bigtable/commit/a0ed5b5dfda1f3980b1a8eb349b2b5d8ab428a4b))
+* AuditConfig for IAM v1 ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44))
+
+
+### Bug Fixes
+
+* **deps:** require grpc-google-iam-v1 >=0.12.4 ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44))
+
+
+### Documentation
+
+* fix type in docstring for map fields ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44))
+
+## [2.8.1](https://github.com/googleapis/python-bigtable/compare/v2.8.0...v2.8.1) (2022-04-07)
+
+
+### Bug Fixes
+
+* Prevent sending full table scan when retrying ([#554](https://github.com/googleapis/python-bigtable/issues/554)) ([56f5357](https://github.com/googleapis/python-bigtable/commit/56f5357c09ac867491b934f6029776dcd74c6eac))
+
+## [2.8.0](https://github.com/googleapis/python-bigtable/compare/v2.7.1...v2.8.0) (2022-04-04)
+
+
+### Features
+
+* Add ListHotTablets API method and protobufs ([#542](https://github.com/googleapis/python-bigtable/issues/542)) ([483f139](https://github.com/googleapis/python-bigtable/commit/483f139f5065d55378bd850c33e89db460119fc1))
+
+
+### Documentation
+
+* explain mutate vs mutate_rows ([#543](https://github.com/googleapis/python-bigtable/issues/543)) ([84cfb0a](https://github.com/googleapis/python-bigtable/commit/84cfb0abdfabd8aa2f292fc0bb7e6deab50f87f1))
+* Remove the limitation that all clusters in a CMEK instance must use the same key ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1))
+* Update `cpu_utilization_percent` limit ([#547](https://github.com/googleapis/python-bigtable/issues/547)) ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1))
+
+## [2.7.1](https://github.com/googleapis/python-bigtable/compare/v2.7.0...v2.7.1) (2022-03-17)
+
+
+### Bug Fixes
+
+* Ensure message fields are copied when building retry request ([#533](https://github.com/googleapis/python-bigtable/issues/533)) ([ff7f190](https://github.com/googleapis/python-bigtable/commit/ff7f1901b6420e66e1388e757eeec20d30484ad9))
+
+## [2.7.0](https://github.com/googleapis/python-bigtable/compare/v2.6.0...v2.7.0) (2022-03-06)
+
+
+### Features
+
+* Add support for autoscaling ([#509](https://github.com/googleapis/python-bigtable/issues/509)) ([8f4e197](https://github.com/googleapis/python-bigtable/commit/8f4e197148644ded934190814ff44fa132a2dda6))
+
+
+### Bug Fixes
+
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#526](https://github.com/googleapis/python-bigtable/issues/526)) ([a8a92ee](https://github.com/googleapis/python-bigtable/commit/a8a92ee1b6bd284055fee3e1029a9a6aacbc5f1c))
+* **deps:** require proto-plus>=1.15.0 ([a8a92ee](https://github.com/googleapis/python-bigtable/commit/a8a92ee1b6bd284055fee3e1029a9a6aacbc5f1c))
+
+## [2.6.0](https://github.com/googleapis/python-bigtable/compare/v2.5.2...v2.6.0) (2022-02-26)
+
+
+### Features
+
+* add WarmAndPing request for channel priming ([#504](https://github.com/googleapis/python-bigtable/issues/504)) ([df5fc1f](https://github.com/googleapis/python-bigtable/commit/df5fc1f7d6ded88d9bce67f7cc6989981745931f))
+
+## [2.5.2](https://github.com/googleapis/python-bigtable/compare/v2.5.1...v2.5.2) (2022-02-24)
+
+
+### Bug Fixes
+
+* Pass app_profile_id when building updated request ([#512](https://github.com/googleapis/python-bigtable/issues/512)) ([2f8ba7a](https://github.com/googleapis/python-bigtable/commit/2f8ba7a4801b17b5afb6180a7ace1327a2d05a52))
+
+## [2.5.1](https://github.com/googleapis/python-bigtable/compare/v2.5.0...v2.5.1) (2022-02-17)
+
+
+### Bug Fixes
+
+* **deps:** move libcst to extras ([#508](https://github.com/googleapis/python-bigtable/issues/508)) ([4b4d7e2](https://github.com/googleapis/python-bigtable/commit/4b4d7e2796788b2cd3764f54ff532a9c9d092aec))
+
+## [2.5.0](https://github.com/googleapis/python-bigtable/compare/v2.4.0...v2.5.0) (2022-02-07)
+
+
+### Features
+
+* add 'Instance.create_time' field ([#449](https://github.com/googleapis/python-bigtable/issues/449)) ([b9ecfa9](https://github.com/googleapis/python-bigtable/commit/b9ecfa97281ae21dcf233e60c70cacc701f12c32))
+* add api key support ([#497](https://github.com/googleapis/python-bigtable/issues/497)) ([ee3a6c4](https://github.com/googleapis/python-bigtable/commit/ee3a6c4c5f810fab08671db3407195864ecc1972))
+* add Autoscaling API ([#475](https://github.com/googleapis/python-bigtable/issues/475)) ([97b3cdd](https://github.com/googleapis/python-bigtable/commit/97b3cddb908098e255e7a1209cdb985087b95a26))
+* add context manager support in client ([#440](https://github.com/googleapis/python-bigtable/issues/440)) ([a3d2cf1](https://github.com/googleapis/python-bigtable/commit/a3d2cf18b49cddc91e5e6448c46d6b936d86954d))
+* add support for Python 3.10 ([#437](https://github.com/googleapis/python-bigtable/issues/437)) ([3cf0814](https://github.com/googleapis/python-bigtable/commit/3cf08149411f3f4df41e9b5a9894dbfb101bd86f))
+
+
+### Bug Fixes
+
+* **deps:** drop packaging dependency ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+* **deps:** require google-api-core >= 1.28.0 ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+* improper types in pagers generation ([f9c7699](https://github.com/googleapis/python-bigtable/commit/f9c7699eb6d4071314abbb0477ba47370059e041))
+* improve type hints, mypy checks ([#448](https://github.com/googleapis/python-bigtable/issues/448)) ([a99bf88](https://github.com/googleapis/python-bigtable/commit/a99bf88417d6aec03923447c70c2752f6bb5c459))
+* resolve DuplicateCredentialArgs error when using credentials_file ([d6bff70](https://github.com/googleapis/python-bigtable/commit/d6bff70654b41e31d2ac83d307bdc6bbd111201e))
+
+
+### Documentation
+
+* clarify comments in ReadRowsRequest and RowFilter ([#494](https://github.com/googleapis/python-bigtable/issues/494)) ([1efd9b5](https://github.com/googleapis/python-bigtable/commit/1efd9b598802f766a3c4c8c78ec7b0ca208d3325))
+* list oneofs in docstring ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+
+## [2.4.0](https://www.github.com/googleapis/python-bigtable/compare/v2.3.3...v2.4.0) (2021-09-24)
+
+
+### Features
+
+* Publish new fields to support cluster group routing for Cloud Bigtable ([#407](https://www.github.com/googleapis/python-bigtable/issues/407)) ([66af554](https://www.github.com/googleapis/python-bigtable/commit/66af554a103eea0139cb313691d69f4c88a9e87f))
+
+
+### Bug Fixes
+
+* add 'dict' annotation type to 'request' ([160bfd3](https://www.github.com/googleapis/python-bigtable/commit/160bfd317a83561821acc0212d3514701a031ac6))
+
+## [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24)
+
+
+### Bug Fixes
+
+* enable self signed jwt for grpc ([#397](https://www.github.com/googleapis/python-bigtable/issues/397)) ([9d43a38](https://www.github.com/googleapis/python-bigtable/commit/9d43a388470746608d324ca8d72f41bb3a4492b7))
+
+## [2.3.2](https://www.github.com/googleapis/python-bigtable/compare/v2.3.1...v2.3.2) (2021-07-20)
+
+
+### Bug Fixes
+
+* **deps:** pin 'google-{api,cloud}-core', 'google-auth' to allow 2.x versions ([#379](https://www.github.com/googleapis/python-bigtable/issues/379)) ([95b2e13](https://www.github.com/googleapis/python-bigtable/commit/95b2e13b776dca4a6998313c41aa960ffe2e47e9))
+* directly append to pb for beter read row performance ([#382](https://www.github.com/googleapis/python-bigtable/issues/382)) ([7040e11](https://www.github.com/googleapis/python-bigtable/commit/7040e113b93bb2e0625c054486305235d8f14c2a))
+
+## [2.3.1](https://www.github.com/googleapis/python-bigtable/compare/v2.3.0...v2.3.1) (2021-07-13)
+
+
+### Bug Fixes
+
+* use public 'table_admin_client' property in backups methods ([#359](https://www.github.com/googleapis/python-bigtable/issues/359)) ([bc57c79](https://www.github.com/googleapis/python-bigtable/commit/bc57c79640b270ff89fd10ec243dd04559168c5c))
+
+## [2.3.0](https://www.github.com/googleapis/python-bigtable/compare/v2.2.0...v2.3.0) (2021-07-01)
+
+
+### Features
+
+* add always_use_jwt_access ([#333](https://www.github.com/googleapis/python-bigtable/issues/333)) ([f1fce5b](https://www.github.com/googleapis/python-bigtable/commit/f1fce5b0694d965202fc2a4fcf8bc6e09e78deae))
+
+
+### Bug Fixes
+
+* **deps:** add packaging requirement ([#326](https://www.github.com/googleapis/python-bigtable/issues/326)) ([d31c27b](https://www.github.com/googleapis/python-bigtable/commit/d31c27b01d1f7c351effc2856a8d4777a1a10690))
+* **deps:** require google-api-core >= 1.26.0 ([#344](https://www.github.com/googleapis/python-bigtable/issues/344)) ([ce4ceb6](https://www.github.com/googleapis/python-bigtable/commit/ce4ceb6d8fe74eff16cf9ca151e0b98502256a2f))
+* disable always_use_jwt_access ([#348](https://www.github.com/googleapis/python-bigtable/issues/348)) ([4623248](https://www.github.com/googleapis/python-bigtable/commit/4623248376deccf4651d4badf8966311ebe3c16a))
+
+
+### Documentation
+
+* add paramter mutation_timeout to instance.table docs ([#305](https://www.github.com/googleapis/python-bigtable/issues/305)) ([5bbd06e](https://www.github.com/googleapis/python-bigtable/commit/5bbd06e5413e8b7597ba128174b10fe45fd38380))
+* fix broken links in multiprocessing.rst ([#317](https://www.github.com/googleapis/python-bigtable/issues/317)) ([e329352](https://www.github.com/googleapis/python-bigtable/commit/e329352d7e6d81de1d1d770c73406a60d29d01bb))
+* omit mention of Python 2.7 in 'CONTRIBUTING.rst' ([#1127](https://www.github.com/googleapis/python-bigtable/issues/1127)) ([#329](https://www.github.com/googleapis/python-bigtable/issues/329)) ([6bf0c64](https://www.github.com/googleapis/python-bigtable/commit/6bf0c647bcebed641b4cbdc5eb70528c88b26a01)), closes [#1126](https://www.github.com/googleapis/python-bigtable/issues/1126)
+
+## [2.2.0](https://www.github.com/googleapis/python-bigtable/compare/v2.1.0...v2.2.0) (2021-04-30)
+
+
+### Features
+
+* backup restore to different instance ([#300](https://www.github.com/googleapis/python-bigtable/issues/300)) ([049a25f](https://www.github.com/googleapis/python-bigtable/commit/049a25f903bb6b062e41430b6e7ce6d7b164f22c))
+
+## [2.1.0](https://www.github.com/googleapis/python-bigtable/compare/v2.0.0...v2.1.0) (2021-04-21)
+
+
+### Features
+
+* customer managed keys (CMEK) ([#249](https://www.github.com/googleapis/python-bigtable/issues/249)) ([93df829](https://www.github.com/googleapis/python-bigtable/commit/93df82998cc0218cbc4a1bc2ab41a48b7478758d))
+
+## [2.0.0](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0) (2021-04-06)
+
+
+### âš BREAKING CHANGES
+
+* microgenerator changes (#203)
+
+### Features
+
+* microgenerator changes ([#203](https://www.github.com/googleapis/python-bigtable/issues/203)) ([b31bd87](https://www.github.com/googleapis/python-bigtable/commit/b31bd87c3fa8cad32768611a52d5effcc7d9b3e2))
+* publish new fields for CMEK ([#222](https://www.github.com/googleapis/python-bigtable/issues/222)) ([0fe5b63](https://www.github.com/googleapis/python-bigtable/commit/0fe5b638e45e711d25f55664689a9baf4d12dc57))
+
+
+### Bug Fixes
+
+* address issue in establishing an emulator connection ([#246](https://www.github.com/googleapis/python-bigtable/issues/246)) ([1a31826](https://www.github.com/googleapis/python-bigtable/commit/1a31826e2e378468e057160c07d850ebca1c5879))
+* fix unit test that could be broken by user's environment ([#239](https://www.github.com/googleapis/python-bigtable/issues/239)) ([cbd712e](https://www.github.com/googleapis/python-bigtable/commit/cbd712e6d3aded0c025525f97da1d667fbe2f061))
+* guard assignments of certain values against None ([#220](https://www.github.com/googleapis/python-bigtable/issues/220)) ([341f448](https://www.github.com/googleapis/python-bigtable/commit/341f448ce378375ab79bfc82f864fb6c88ed71a0))
+* **retry:** restore grpc_service_config for CreateBackup and {Restore,Snapshot}Table ([#240](https://www.github.com/googleapis/python-bigtable/issues/240)) ([79f1734](https://www.github.com/googleapis/python-bigtable/commit/79f1734c897e5e1b2fd02d043185c44b7ee34dc9))
+
+
+### Documentation
+
+* add backup docs ([#251](https://www.github.com/googleapis/python-bigtable/issues/251)) ([7d5c7aa](https://www.github.com/googleapis/python-bigtable/commit/7d5c7aa92cb476b07ac9efb5d231888c4c417783))
+
+
+### Dependencies
+
+* update gapic-generator-python to 0.40.11 ([#230](https://www.github.com/googleapis/python-bigtable/issues/230)) ([47d5dc1](https://www.github.com/googleapis/python-bigtable/commit/47d5dc1853f0be609e666e8a8fad0146f2905482))
+* upgrade gapic-generator-python to 0.43.1 ([#276](https://www.github.com/googleapis/python-bigtable/issues/276)) ([0e9fe54](https://www.github.com/googleapis/python-bigtable/commit/0e9fe5410e1b5d16ae0735ba1f606f7d1befafb9))
+
+## [2.0.0-dev1](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0-dev1) (2021-02-24)
+
+
+### âš BREAKING CHANGES
+
+* microgenerator changes (#203)
+
+### Features
+
+* microgenerator changes ([#203](https://www.github.com/googleapis/python-bigtable/issues/203)) ([b31bd87](https://www.github.com/googleapis/python-bigtable/commit/b31bd87c3fa8cad32768611a52d5effcc7d9b3e2))
+
+
+### Bug Fixes
+
+* guard assignments of certain values against None ([#220](https://www.github.com/googleapis/python-bigtable/issues/220)) ([341f448](https://www.github.com/googleapis/python-bigtable/commit/341f448ce378375ab79bfc82f864fb6c88ed71a0))
+
+## [1.7.0](https://www.github.com/googleapis/python-bigtable/compare/v1.6.1...v1.7.0) (2021-02-09)
+
+
+### Features
+
+* add keep alive timeout ([#182](https://www.github.com/googleapis/python-bigtable/issues/182)) ([e9637cb](https://www.github.com/googleapis/python-bigtable/commit/e9637cbd4461dcca509dca43ef116d6ff41b80c7))
+* support filtering on incrementable values ([#178](https://www.github.com/googleapis/python-bigtable/issues/178)) ([e221352](https://www.github.com/googleapis/python-bigtable/commit/e2213520951d3da97019a1d784e5bf31d94e3353))
+
+
+### Bug Fixes
+
+* Renaming region tags to not conflict with documentation snippets ([#190](https://www.github.com/googleapis/python-bigtable/issues/190)) ([dd0cdc5](https://www.github.com/googleapis/python-bigtable/commit/dd0cdc5bcfd92e18ab9a7255684a9f5b21198867))
+
+
+### Documentation
+
+* update python contributing guide ([#206](https://www.github.com/googleapis/python-bigtable/issues/206)) ([e301ac3](https://www.github.com/googleapis/python-bigtable/commit/e301ac3b61364d779fdb50a57ae8e2cb9952df9e))
+
+## [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01)
+
+
+### Documentation
+
+* update intersphinx mappings ([#172](https://www.github.com/googleapis/python-bigtable/issues/172)) ([7b09368](https://www.github.com/googleapis/python-bigtable/commit/7b09368d5121782c7f271b3575c838e8a2284c05))
+
+## [1.6.0](https://www.github.com/googleapis/python-bigtable/compare/v1.5.1...v1.6.0) (2020-11-16)
+
+
+### Features
+
+* add 'timeout' arg to 'Table.mutate_rows' ([#157](https://www.github.com/googleapis/python-bigtable/issues/157)) ([6d597a1](https://www.github.com/googleapis/python-bigtable/commit/6d597a1e5be05c993c9f86beca4c1486342caf94)), closes [/github.com/googleapis/python-bigtable/issues/7#issuecomment-715538708](https://www.github.com/googleapis//github.com/googleapis/python-bigtable/issues/7/issues/issuecomment-715538708) [#7](https://www.github.com/googleapis/python-bigtable/issues/7)
+* Backup Level IAM ([#160](https://www.github.com/googleapis/python-bigtable/issues/160)) ([44932cb](https://www.github.com/googleapis/python-bigtable/commit/44932cb8710e12279dbd4e9271577f8bee238980))
+
+## [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06)
+
+
+### Bug Fixes
+
+* harden version data gathering against DistributionNotFound ([#150](https://www.github.com/googleapis/python-bigtable/issues/150)) ([c815421](https://www.github.com/googleapis/python-bigtable/commit/c815421422f1c845983e174651a5292767cfe2e7))
+
+## [1.5.0](https://www.github.com/googleapis/python-bigtable/compare/v1.4.0...v1.5.0) (2020-09-22)
+
+
+### Features
+
+* add 'Rowset.add_row_range_with_prefix' ([#30](https://www.github.com/googleapis/python-bigtable/issues/30)) ([4796ac8](https://www.github.com/googleapis/python-bigtable/commit/4796ac85c877d75ed596cde7628dae31918ef726))
+* add response status to DirectRow.commit() ([#128](https://www.github.com/googleapis/python-bigtable/issues/128)) ([2478bb8](https://www.github.com/googleapis/python-bigtable/commit/2478bb864adbc71ef606e2b10b3bdfe3a7d44717)), closes [#127](https://www.github.com/googleapis/python-bigtable/issues/127)
+* pass 'client_options' to base class ctor ([#104](https://www.github.com/googleapis/python-bigtable/issues/104)) ([e55ca07](https://www.github.com/googleapis/python-bigtable/commit/e55ca07561f9c946276f3bde599e69947769f560)), closes [#69](https://www.github.com/googleapis/python-bigtable/issues/69)
+
+
+### Bug Fixes
+
+* pass timeout to 'PartialRowsData.response_iterator' ([#16](https://www.github.com/googleapis/python-bigtable/issues/16)) ([8f76434](https://www.github.com/googleapis/python-bigtable/commit/8f764343e01d50ad880363f5a4e5630122cbdb25))
+* retry if failure occurs on initial call in MutateRows ([#123](https://www.github.com/googleapis/python-bigtable/issues/123)) ([0c9cde8](https://www.github.com/googleapis/python-bigtable/commit/0c9cde8ade0e4f50d06bbbd1b4169ae5c545b2c0))
+* **python_samples:** README link fix, enforce samples=True ([#114](https://www.github.com/googleapis/python-bigtable/issues/114)) ([dfe658a](https://www.github.com/googleapis/python-bigtable/commit/dfe658a2b1270eda7a8a084aca28d65b3297a04f))
+
+
+### Documentation
+
+* add sample for writing data with Beam ([#80](https://www.github.com/googleapis/python-bigtable/issues/80)) ([6900290](https://www.github.com/googleapis/python-bigtable/commit/6900290e00daf04ca545284b3f0a591a2de11136))
+* clarify 'Table.read_rows' snippet ([#50](https://www.github.com/googleapis/python-bigtable/issues/50)) ([5ca8bbd](https://www.github.com/googleapis/python-bigtable/commit/5ca8bbd0fb9c4a7cef7b4cbb67d1ba9f2382f2d8))
+* document 'row_set' module explicitly ([#29](https://www.github.com/googleapis/python-bigtable/issues/29)) ([0e0291e](https://www.github.com/googleapis/python-bigtable/commit/0e0291e56cbaeec00ede5275e17af2968a12251c))
+* Pysamples new readme gen ([#112](https://www.github.com/googleapis/python-bigtable/issues/112)) ([3ecca7a](https://www.github.com/googleapis/python-bigtable/commit/3ecca7a7b52b0f4fc38db5c5016622b994c1a8aa))
+* remove indent from snippet code blocks ([#49](https://www.github.com/googleapis/python-bigtable/issues/49)) ([1fbadf9](https://www.github.com/googleapis/python-bigtable/commit/1fbadf906204c622b9cff3fa073d8fc43d3597f7))
+* switch links to client documentation ([#93](https://www.github.com/googleapis/python-bigtable/issues/93)) ([2c973e6](https://www.github.com/googleapis/python-bigtable/commit/2c973e6cce969e7003be0b3d7a164bdc61b91ef1))
+* update docs build (via synth) ([#99](https://www.github.com/googleapis/python-bigtable/issues/99)) ([c301b53](https://www.github.com/googleapis/python-bigtable/commit/c301b53db4f7d48fd76548a5cd3a01cc46ff1522)), closes [#700](https://www.github.com/googleapis/python-bigtable/issues/700)
+* update links to reflect new Github org ([#48](https://www.github.com/googleapis/python-bigtable/issues/48)) ([9bb11ed](https://www.github.com/googleapis/python-bigtable/commit/9bb11edc885958286b5b31fa18cfd0db95338cb4))
+* use correct storage type constant in docstrings ([#110](https://www.github.com/googleapis/python-bigtable/issues/110)) ([bc6db77](https://www.github.com/googleapis/python-bigtable/commit/bc6db77809a89fd6f3b2095cfe9b84d2da1bf304))
+* **samples:** filter cpu query to get metrics for the correct resources [([#4238](https://www.github.com/googleapis/python-bigtable/issues/4238))](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4238) ([#81](https://www.github.com/googleapis/python-bigtable/issues/81)) ([2c8c386](https://www.github.com/googleapis/python-bigtable/commit/2c8c3864c43a7ac9c85a0cd7c9cd4eec7434b42d))
+
+## [1.4.0](https://www.github.com/googleapis/python-bigtable/compare/v1.3.0...v1.4.0) (2020-07-21)
+
+
+### Features
+
+* **bigtable:** Managed Backups wrappers ([#57](https://www.github.com/googleapis/python-bigtable/issues/57)) ([a351734](https://www.github.com/googleapis/python-bigtable/commit/a351734ae16b4a689b89e6a42f63ea3ea5ad84ca))
+
+## [1.3.0](https://www.github.com/googleapis/python-bigtable/compare/v1.2.1...v1.3.0) (2020-07-16)
+
+
+### Features
+
+* **api_core:** support version 3 policy bindings ([#9869](https://www.github.com/googleapis/python-bigtable/issues/9869)) ([a9dee32](https://www.github.com/googleapis/python-bigtable/commit/a9dee327ab39e22a014b3c4126f1c9d1beebe2d1))
+* **bigtable:** add py2 deprecation warnings; standardize use of 'required' in docstrings (via synth) ([#10064](https://www.github.com/googleapis/python-bigtable/issues/10064)) ([5460de0](https://www.github.com/googleapis/python-bigtable/commit/5460de0f7e0d936a23289f679c2b1a3040a21247))
+* Create CODEOWNERS ([#27](https://www.github.com/googleapis/python-bigtable/issues/27)) ([2b63746](https://www.github.com/googleapis/python-bigtable/commit/2b6374600d911b3dfd567eafd964260eb00a2bc0))
+* **bigtable:** skip system tests failing with emulator ([#18](https://www.github.com/googleapis/python-bigtable/issues/18)) ([399d3d3](https://www.github.com/googleapis/python-bigtable/commit/399d3d3f960786f616ab6085f142a9703b0391e0))
+* **bigtable:** support requested_policy_version for Instance IAM ([#10001](https://www.github.com/googleapis/python-bigtable/issues/10001)) ([7e5d963](https://www.github.com/googleapis/python-bigtable/commit/7e5d963857fd8f7547778d5247b53c24de7a43f6)), closes [#3](https://www.github.com/googleapis/python-bigtable/issues/3)
+* update gapic-generator and go microgen, backups generated api ([#55](https://www.github.com/googleapis/python-bigtable/issues/55)) ([c38888d](https://www.github.com/googleapis/python-bigtable/commit/c38888de3d0b1c49c438a7d350f42bc1805809f2))
+
+
+### Bug Fixes
+
+* localdeps ([5d799b2](https://www.github.com/googleapis/python-bigtable/commit/5d799b2d99e79ee9d20ae6cf2663d670493a8db3))
+* test_utils ([43481a9](https://www.github.com/googleapis/python-bigtable/commit/43481a91275e93fadd22eaa7cba3891a00cb97f8))
+* **python:** change autodoc_default_flags to autodoc_default_options ([#58](https://www.github.com/googleapis/python-bigtable/issues/58)) ([5c1d618](https://www.github.com/googleapis/python-bigtable/commit/5c1d61827618d254c453b3871c0022a8d35bfbb2))
+
+
+### Documentation
+
+* add note about multiprocessing usage ([#26](https://www.github.com/googleapis/python-bigtable/issues/26)) ([1449589](https://www.github.com/googleapis/python-bigtable/commit/1449589e8b5b9037dae4e9b071ff7e7662992e18))
+* **bigtable:** clean up ([#32](https://www.github.com/googleapis/python-bigtable/issues/32)) ([9f4068c](https://www.github.com/googleapis/python-bigtable/commit/9f4068cf8eb4351c02a4862380547ecf2564d838))
+* add samples from bigtable ([#38](https://www.github.com/googleapis/python-bigtable/issues/38)) ([1121f0d](https://www.github.com/googleapis/python-bigtable/commit/1121f0d647dbfc6c70a459b0979465803fdfad7b)), closes [#371](https://www.github.com/googleapis/python-bigtable/issues/371) [#383](https://www.github.com/googleapis/python-bigtable/issues/383) [#383](https://www.github.com/googleapis/python-bigtable/issues/383) [#456](https://www.github.com/googleapis/python-bigtable/issues/456) [#456](https://www.github.com/googleapis/python-bigtable/issues/456) [#540](https://www.github.com/googleapis/python-bigtable/issues/540) [#540](https://www.github.com/googleapis/python-bigtable/issues/540) [#542](https://www.github.com/googleapis/python-bigtable/issues/542) [#542](https://www.github.com/googleapis/python-bigtable/issues/542) [#544](https://www.github.com/googleapis/python-bigtable/issues/544) [#544](https://www.github.com/googleapis/python-bigtable/issues/544) [#576](https://www.github.com/googleapis/python-bigtable/issues/576) [#599](https://www.github.com/googleapis/python-bigtable/issues/599) [#599](https://www.github.com/googleapis/python-bigtable/issues/599) [#656](https://www.github.com/googleapis/python-bigtable/issues/656) [#715](https://www.github.com/googleapis/python-bigtable/issues/715) [#715](https://www.github.com/googleapis/python-bigtable/issues/715) [#781](https://www.github.com/googleapis/python-bigtable/issues/781) [#781](https://www.github.com/googleapis/python-bigtable/issues/781) [#887](https://www.github.com/googleapis/python-bigtable/issues/887) [#887](https://www.github.com/googleapis/python-bigtable/issues/887) [#914](https://www.github.com/googleapis/python-bigtable/issues/914) [#914](https://www.github.com/googleapis/python-bigtable/issues/914) [#922](https://www.github.com/googleapis/python-bigtable/issues/922) [#922](https://www.github.com/googleapis/python-bigtable/issues/922) [#962](https://www.github.com/googleapis/python-bigtable/issues/962) [#962](https://www.github.com/googleapis/python-bigtable/issues/962) [#1004](https://www.github.com/googleapis/python-bigtable/issues/1004) [#1004](https://www.github.com/googleapis/python-bigtable/issues/1004) [#1003](https://www.github.com/googleapis/python-bigtable/issues/1003) [#1005](https://www.github.com/googleapis/python-bigtable/issues/1005) [#1005](https://www.github.com/googleapis/python-bigtable/issues/1005) [#1028](https://www.github.com/googleapis/python-bigtable/issues/1028) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1057](https://www.github.com/googleapis/python-bigtable/issues/1057) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1094](https://www.github.com/googleapis/python-bigtable/issues/1094) [#1094](https://www.github.com/googleapis/python-bigtable/issues/1094) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1156](https://www.github.com/googleapis/python-bigtable/issues/1156) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1549](https://www.github.com/googleapis/python-bigtable/issues/1549) [#1562](https://www.github.com/googleapis/python-bigtable/issues/1562) [#1555](https://www.github.com/googleapis/python-bigtable/issues/1555) [#1616](https://www.github.com/googleapis/python-bigtable/issues/1616) [#1616](https://www.github.com/googleapis/python-bigtable/issues/1616) [#1665](https://www.github.com/googleapis/python-bigtable/issues/1665) [#1670](https://www.github.com/googleapis/python-bigtable/issues/1670) [#1664](https://www.github.com/googleapis/python-bigtable/issues/1664) [#1674](https://www.github.com/googleapis/python-bigtable/issues/1674) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1764](https://www.github.com/googleapis/python-bigtable/issues/1764) [#1764](https://www.github.com/googleapis/python-bigtable/issues/1764) [#1770](https://www.github.com/googleapis/python-bigtable/issues/1770) [#1794](https://www.github.com/googleapis/python-bigtable/issues/1794) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1878](https://www.github.com/googleapis/python-bigtable/issues/1878) [#1890](https://www.github.com/googleapis/python-bigtable/issues/1890) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#2057](https://www.github.com/googleapis/python-bigtable/issues/2057) [#2057](https://www.github.com/googleapis/python-bigtable/issues/2057) [#2054](https://www.github.com/googleapis/python-bigtable/issues/2054) [#2054](https://www.github.com/googleapis/python-bigtable/issues/2054) [#2018](https://www.github.com/googleapis/python-bigtable/issues/2018) [#2018](https://www.github.com/googleapis/python-bigtable/issues/2018) [#2224](https://www.github.com/googleapis/python-bigtable/issues/2224) [#2201](https://www.github.com/googleapis/python-bigtable/issues/2201) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#3066](https://www.github.com/googleapis/python-bigtable/issues/3066) [#2707](https://www.github.com/googleapis/python-bigtable/issues/2707) [#3103](https://www.github.com/googleapis/python-bigtable/issues/3103) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#3459](https://www.github.com/googleapis/python-bigtable/issues/3459) [#3494](https://www.github.com/googleapis/python-bigtable/issues/3494) [#3070](https://www.github.com/googleapis/python-bigtable/issues/3070) [#3119](https://www.github.com/googleapis/python-bigtable/issues/3119) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3739](https://www.github.com/googleapis/python-bigtable/issues/3739) [#3739](https://www.github.com/googleapis/python-bigtable/issues/3739) [#3740](https://www.github.com/googleapis/python-bigtable/issues/3740) [#3783](https://www.github.com/googleapis/python-bigtable/issues/3783) [#3877](https://www.github.com/googleapis/python-bigtable/issues/3877)
+* **bigtable:** fix incorrect display_name update ([#46](https://www.github.com/googleapis/python-bigtable/issues/46)) ([1ac60be](https://www.github.com/googleapis/python-bigtable/commit/1ac60be05521b69c924118d40f88e07728a2f75e))
+* **bigtable:** remove missing argument from instance declaration ([#47](https://www.github.com/googleapis/python-bigtable/issues/47)) ([c966647](https://www.github.com/googleapis/python-bigtable/commit/c9666475dc31d581fdac0fc1c65e75ee9e27d832)), closes [#42](https://www.github.com/googleapis/python-bigtable/issues/42)
+
+## 1.2.1
+
+01-03-2020 10:05 PST
+
+
+### Implementation Changes
+- Add ability to use single-row transactions ([#10021](https://github.com/googleapis/google-cloud-python/pull/10021))
+
+## 1.2.0
+
+12-04-2019 12:21 PST
+
+
+### New Features
+- add table level IAM policy controls ([#9877](https://github.com/googleapis/google-cloud-python/pull/9877))
+- add 'client_options' / 'admin_client_options' to Client ([#9517](https://github.com/googleapis/google-cloud-python/pull/9517))
+
+### Documentation
+- change spacing in docs templates (via synth) ([#9739](https://github.com/googleapis/google-cloud-python/pull/9739))
+- add python 2 sunset banner to documentation ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036))
+
+### Internal
+- add trailing commas (via synth) ([#9557](https://github.com/googleapis/google-cloud-python/pull/9557))
+
+## 1.1.0
+
+10-15-2019 06:40 PDT
+
+
+### New Features
+- Add IAM Policy methods to table admin client (via synth). ([#9172](https://github.com/googleapis/google-cloud-python/pull/9172))
+
+### Dependencies
+- Pin 'google-cloud-core >= 1.0.3, < 2.0.0dev'. ([#9445](https://github.com/googleapis/google-cloud-python/pull/9445))
+
+### Documentation
+- Fix intersphinx reference to requests ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294))
+- Fix misspelling in docs. ([#9184](https://github.com/googleapis/google-cloud-python/pull/9184))
+
+## 1.0.0
+
+08-28-2019 12:49 PDT
+
+### Implementation Changes
+- Remove send/recv msg size limit (via synth). ([#8979](https://github.com/googleapis/google-cloud-python/pull/8979))
+
+### Documentation
+- Avoid creating table in 'list_tables' snippet; harden 'delete_instance' snippet. ([#8879](https://github.com/googleapis/google-cloud-python/pull/8879))
+- Add retry for DeadlineExceeded to 'test_bigtable_create_table' snippet. ([#8889](https://github.com/googleapis/google-cloud-python/pull/8889))
+- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035))
+
+### Internal / Testing Changes
+- Docs: Remove CI for gh-pages, use googleapis.dev for api_core refs. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085))
+
+## 0.34.0
+
+07-30-2019 10:05 PDT
+
+
+### Implementation Changes
+- Pick up changes to GAPIC client configuration (via synth). ([#8724](https://github.com/googleapis/google-cloud-python/pull/8724))
+- Add `Cell.__repr__`. ([#8683](https://github.com/googleapis/google-cloud-python/pull/8683))
+- Increase timeout for app profile update operation. ([#8417](https://github.com/googleapis/google-cloud-python/pull/8417))
+
+### New Features
+- Add methods returning Separate row types to remove confusion around return types of `row.commit`. ([#8662](https://github.com/googleapis/google-cloud-python/pull/8662))
+- Add `options_` argument to clients' `get_iam_policy` (via synth). ([#8652](https://github.com/googleapis/google-cloud-python/pull/8652))
+- Add `client_options` support, update list method docstrings (via synth). ([#8500](https://github.com/googleapis/google-cloud-python/pull/8500))
+
+### Dependencies
+- Bump minimum version for google-api-core to 1.14.0. ([#8709](https://github.com/googleapis/google-cloud-python/pull/8709))
+- Update pin for `grpc-google-iam-v1` to 0.12.3+. ([#8647](https://github.com/googleapis/google-cloud-python/pull/8647))
+- Allow kwargs to be passed to `create_channel` (via synth). ([#8458](https://github.com/googleapis/google-cloud-python/pull/8458))
+- Add `PartialRowsData.cancel`. ([#8176](https://github.com/googleapis/google-cloud-python/pull/8176))
+
+### Documentation
+- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805))
+- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705))
+- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288))
+- Add snppets illustrating use of application profiles. ([#7033](https://github.com/googleapis/google-cloud-python/pull/7033))
+
+### Internal / Testing Changes
+- Add nox session `docs` to remaining manual clients. ([#8478](https://github.com/googleapis/google-cloud-python/pull/8478))
+- All: Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464))
+- Force timeout for table creation to 90 seconds (in systests). ([#8450](https://github.com/googleapis/google-cloud-python/pull/8450))
+- Plug systest / snippet instance leaks. ([#8416](https://github.com/googleapis/google-cloud-python/pull/8416))
+- Declare encoding as utf-8 in pb2 files (via synth). ([#8346](https://github.com/googleapis/google-cloud-python/pull/8346))
+- Add disclaimer to auto-generated template files (via synth). ([#8308](https://github.com/googleapis/google-cloud-python/pull/8308))
+- Fix coverage in `types.py` (via synth). ([#8149](https://github.com/googleapis/google-cloud-python/pull/8149))
+- Integrate docstring / formatting tweaks (via synth). ([#8138](https://github.com/googleapis/google-cloud-python/pull/8138))
+- Use alabaster theme everwhere. ([#8021](https://github.com/googleapis/google-cloud-python/pull/8021))
+
+## 0.33.0
+
+05-16-2019 11:51 PDT
+
+
+### Implementation Changes
+- Fix typos in deprecation warnings. ([#7858](https://github.com/googleapis/google-cloud-python/pull/7858))
+- Add deprecation warnings for to-be-removed features. ([#7532](https://github.com/googleapis/google-cloud-python/pull/7532))
+- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535))
+- Improve `Policy` interchange w/ JSON, gRPC payloads. ([#7378](https://github.com/googleapis/google-cloud-python/pull/7378))
+
+### New Features
+- Add support for passing `client_info` to client. ([#7876](https://github.com/googleapis/google-cloud-python/pull/7876)) and ([#7898](https://github.com/googleapis/google-cloud-python/pull/7898))
+- Add `Table.mutation_timeout`, allowing override of config timeouts. ([#7424](https://github.com/googleapis/google-cloud-python/pull/7424))
+
+### Dependencies
+- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993))
+
+### Documentation
+- Remove duplicate snippet tags for Delete cluster. ([#7860](https://github.com/googleapis/google-cloud-python/pull/7860))
+- Fix rendering of instance admin snippets. ([#7797](https://github.com/googleapis/google-cloud-python/pull/7797))
+- Avoid leaking instances from snippets. ([#7800](https://github.com/googleapis/google-cloud-python/pull/7800))
+- Fix enum reference in documentation. ([#7724](https://github.com/googleapis/google-cloud-python/pull/7724))
+- Remove duplicate snippets. ([#7528](https://github.com/googleapis/google-cloud-python/pull/7528))
+- Add snippeds for Batcher, RowData, Row Operations, AppendRow. ([#7019](https://github.com/googleapis/google-cloud-python/pull/7019))
+- Add column family snippets. ([#7014](https://github.com/googleapis/google-cloud-python/pull/7014))
+- Add Row Set snippets. ([#7016](https://github.com/googleapis/google-cloud-python/pull/7016))
+- Update client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307))
+- Fix typos in Table docstrings. ([#7261](https://github.com/googleapis/google-cloud-python/pull/7261))
+- Update copyright headers (via synth). ([#7139](https://github.com/googleapis/google-cloud-python/pull/7139))
+- Fix linked classes in generated docstrings (via synth). ([#7060](https://github.com/googleapis/google-cloud-python/pull/7060))
+
+### Internal / Testing Changes
+- Run `instance_admin` system tests on a separate instance from `table_admin` and `data` system tests. ([#6579](https://github.com/googleapis/google-cloud-python/pull/6579))
+- Re-blacken. ([#7462](https://github.com/googleapis/google-cloud-python/pull/7462))
+- Copy lintified proto files (via synth). ([#7445](https://github.com/googleapis/google-cloud-python/pull/7445))
+- Remove unused message exports (via synth). ([#7264](https://github.com/googleapis/google-cloud-python/pull/7264))
+- Compare 0 using '!=', rather than 'is not'. ([#7312](https://github.com/googleapis/google-cloud-python/pull/7312))
+- Add protos as an artifact to library ([#7205](https://github.com/googleapis/google-cloud-python/pull/7205))
+- Protoc-generated serialization update. ([#7077](https://github.com/googleapis/google-cloud-python/pull/7077))
+- Blacken snippets. ([#7048](https://github.com/googleapis/google-cloud-python/pull/7048))
+- Bigtable client snippets ([#7020](https://github.com/googleapis/google-cloud-python/pull/7020))
+- Pick up order-of-enum fix from GAPIC generator. ([#6879](https://github.com/googleapis/google-cloud-python/pull/6879))
+- Plug systest instance leaks ([#7004](https://github.com/googleapis/google-cloud-python/pull/7004))
+
+## 0.32.1
+
+12-17-2018 16:38 PST
+
+
+### Documentation
+- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910))
+- Add snippets for table operations. ([#6484](https://github.com/googleapis/google-cloud-python/pull/6484))
+
+## 0.32.0
+
+12-10-2018 12:47 PST
+
+
+### Implementation Changes
+- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741))
+- Remove `deepcopy` from `PartialRowData.cells` property. ([#6648](https://github.com/googleapis/google-cloud-python/pull/6648))
+- Pick up fixes to GAPIC generator. ([#6630](https://github.com/googleapis/google-cloud-python/pull/6630))
+
+### Dependencies
+- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835))
+
+### Internal / Testing Changes
+- Blacken all gen'd libs ([#6792](https://github.com/googleapis/google-cloud-python/pull/6792))
+- Omit local deps ([#6701](https://github.com/googleapis/google-cloud-python/pull/6701))
+- Run black at end of synth.py ([#6698](https://github.com/googleapis/google-cloud-python/pull/6698))
+- Blackening Continued... ([#6667](https://github.com/googleapis/google-cloud-python/pull/6667))
+- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642))
+
+## 0.31.1
+
+11-02-2018 08:13 PDT
+
+### Implementation Changes
+- Fix anonymous usage under Bigtable emulator ([#6385](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6385))
+- Support `DirectRow` without a `Table` ([#6336](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6336))
+- Add retry parameter to `Table.read_rows()`. ([#6281](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6281))
+- Fix `ConditionalRow` interaction with `check_and_mutate_row` ([#6296](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6296))
+- Deprecate `channel` arg to `Client` ([#6279](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6279))
+
+### Dependencies
+- Update dependency: `google-api-core >= 1.4.1` ([#6391](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6391))
+- Update IAM version in dependencies ([#6362](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6362))
+
+### Documentation
+- Add `docs/snippets.py` and test ([#6012](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6012))
+- Normalize use of support level badges ([#6159](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6159))
+
+### Internal / Testing Changes
+- Fix client_info bug, update docstrings and timeouts. ([#6406)](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6406))
+- Remove now-spurious fixup from 'synth.py'. ([#6400](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6400))
+- Fix flaky systests / snippets ([#6367](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6367))
+- Add explicit coverage for `row_data._retry_read_rows_exception`. ([#6364](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6364))
+- Fix instance IAM test methods ([#6343](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6343))
+- Fix error from new flake8 version. ([#6309](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6309))
+- Use new Nox ([#6175](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6175))
+
+## 0.31.0
+
+### New Features
+- Upgrade support level from `alpha` to `beta`. ([#6129](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6129))
+
+### Implementation Changes
+- Improve admin operation timeouts. ([#6010](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6010))
+
+### Documentation
+- Prepare docs for repo split. ([#6014](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6014))
+
+### Internal / Testing Changes
+- Refactor `read_row` to call `read_rows` ([#6137](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102))
+- Harden instance teardown against '429 Too Many Requests'. ([#6102](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102))
+- Add `{RowSet,RowRange}.{__eq__,.__ne__}` ([#6025](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6025))
+- Regenerate low-level GAPIC code ([#6036](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6036))
+
+## 0.30.2
+
+### New Features
+- Add iam policy implementation for an instance. (#5838)
+
+### Implementation Changes
+- Fix smart retries for 'read_rows()' when reading the full table (#5966)
+
+### Documentation
+- Replace links to `/stable/` with `/latest/`. (#5901)
+
+### Internal / Testing Changes
+- Re-generate library using bigtable/synth.py (#5974)
+- Refactor `read_rows` infrastructure (#5963)
+
+## 0.30.1
+
+### Implementation changes
+
+- Fix non-admin access to table data. (#5875)
+- Synth bigtable and bigtable admin GAPIC clients. (#5867)
+
+### Testing and internal changes
+
+- Nox: use in-place installs for local packages. (#5865)
+
+## 0.30.0
+
+### New Features
+
+- Improve performance and capabilities of reads. `read_rows` now returns a generator; has automatic retries; and can read an arbitrary set of keys and ranges
+ - Consolidate read_rows and yield_rows (#5840)
+ - Implement row set for yield_rows (#5506)
+ - Improve read rows validation performance (#5390)
+ - Add retry for yield_rows (#4882)
+ - Require TimestampRanges to be milliseconds granularity (#5002)
+ - Provide better access to cell values (#4908)
+ - Add data app profile id (#5369)
+
+- Improve writes: Writes are usable in Beam
+ - Create MutationBatcher for bigtable (#5651)
+ - Allow DirectRow to be created without a table (#5567)
+ - Add data app profile id (#5369)
+
+- Improve table admin: Table creation now can also create families in a single RPC. Add an `exist()` method. Add `get_cluster_states` for information about replication
+ - Add 'Table.get_cluster_states' method (#5790)
+ - Optimize 'Table.exists' performance (#5749)
+ - Add column creation in 'Table.create()'. (#5576)
+ - Add 'Table.exists' method (#5545)
+ - Add split keys on create table - v2 (#5513)
+ - Avoid sharing table names across unrelated systests. (#5421)
+ - Add truncate table and drop by prefix on top of GAPIC integration (#5360)
+
+- Improve instance admin: Instance creation allows for the creation of multiple clusters. Instance label management is now enabled.
+ - Create app_profile_object (#5782)
+ - Add 'Instance.exists' method (#5802)
+ - Add 'InstanceAdminClient.list_clusters' method (#5715)
+ - Add 'Instance._state' property (#5736)
+ - Convert 'instance.labels' to return a dictionary (#5728)
+ - Reshape cluster.py, adding cluster() factory to instance.py (#5663)
+ - Convert 'Instance.update' to use 'instance.partial_instance_update' API (#5643)
+ - Refactor 'InstanceAdminClient.update_app_profile' to remove update_mask argument (#5684)
+ - Add the ability to create an instance with multiple clusters (#5622)
+ - Add 'instance_type', 'labels' to 'Instance' ctor (#5614)
+ - Add optional app profile to 'Instance.table' (#5605)
+ - Clean up Instance creation. (#5542)
+ - Make 'InstanceAdminClient.list_instances' return actual instance objects, not protos. (#5420)
+ - Add admin app profile methods on Instance (#5315)
+
+### Internal / Testing Changes
+- Rename releases to changelog and include from CHANGELOG.md (#5191)
+- Fix bad trove classifier
+- Integrate new generated low-level client (#5178)
+- Override gRPC max message lengths. (#5498)
+- Use client properties rather than private attrs (#5398)
+- Fix the broken Bigtable system test. (#5607)
+- Fix Py3 breakage in new system test. (#5474)
+- Modify system test for new GAPIC code (#5302)
+- Add Test runs for Python 3.7 and remove 3.4 (#5295)
+- Disable Bigtable system tests (#5381)
+- Modify system tests to use prerelease versions of grpcio (#5304)
+- Pass through 'session.posargs' when running Bigtable system tests. (#5418)
+- Harden 'test_list_instances' against simultaneous test runs. (#5476)
+- Shorten instance / cluster name to fix CI breakage. (#5641)
+- Fix failing systest: 'test_create_instance_w_two_clusters'. (#5836)
+- Add labels {'python-system': ISO-timestamp} to systest instances (#5729)
+- Shorten cluster ID in system test (#5719)
+- Harden 'test_list_instances' further. (#5696)
+- Improve testing of create instance (#5544)
+
+## 0.29.0
+
+### New features
+
+- Use `api_core.retry` for `mutate_row` (#4665, #4341)
+- Added a row generator on a table. (#4679)
+
+### Implementation changes
+
+- Remove gax usage from BigTable (#4873)
+- BigTable: Cell.from_pb() performance improvement (#4745)
+
+### Dependencies
+
+- Update dependency range for api-core to include v1.0.0 releases (#4944)
+
+### Documentation
+
+- Minor typo (#4758)
+- Row filter end points documentation error (#4667)
+- Removing "rename" from bigtable table.py comments (#4526)
+- Small docs/hygiene tweaks after #4256. (#4333)
+
+### Testing and internal changes
+
+- Install local dependencies when running lint (#4936)
+- Re-enable lint for tests, remove usage of pylint (#4921)
+- Normalize all setup.py files (#4909)
+- Timestamp system test fix (#4765)
+
+## 0.28.1
+
+### Implementation Changes
+
+- Bugfix: Distinguish between an unset column qualifier and an empty string
+ column qualifier while parsing a `ReadRows` response (#4252)
+
+### Features added
+
+- Add a ``retry`` strategy that will be used for retry-able errors
+ in ``Table.mutate_rows``. This will be used for gRPC errors of type
+ ``ABORTED``, ``DEADLINE_EXCEEDED`` and ``SERVICE_UNAVAILABLE``. (#4256)
+
+PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.1/
+
+## 0.28.0
+
+### Documentation
+
+- Fixed referenced types in `Table.row` docstring (#3934, h/t to
+ @MichaelTamm)
+- Added link to "Python Development Environment Setup Guide" in
+ project README (#4187, h/t to @michaelawyu)
+
+### Dependencies
+
+- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency
+ on `google-api-core` (#4221, #4280)
+
+PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.0/
diff --git a/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000000..039f43681204
--- /dev/null
+++ b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md
@@ -0,0 +1,95 @@
+
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst
new file mode 100644
index 000000000000..07ac8f2187fc
--- /dev/null
+++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst
@@ -0,0 +1,287 @@
+.. Generated by synthtool. DO NOT EDIT!
+############
+Contributing
+############
+
+#. **Please sign one of the contributor license agreements below.**
+#. Fork the repo, develop and test your code changes, add docs.
+#. Make sure that your commit messages clearly describe the changes.
+#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_)
+
+.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
+
+.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries.
+
+***************
+Adding Features
+***************
+
+In order to add a feature:
+
+- The feature must be documented in both the API and narrative
+ documentation.
+
+- The feature must work fully on the following CPython versions:
+ 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 and 3.14 on both UNIX and Windows.
+
+- The feature must not add unnecessary dependencies (where
+ "unnecessary" is of course subjective, but new dependencies should
+ be discussed).
+
+****************************
+Using a Development Checkout
+****************************
+
+You'll have to create a development environment using a Git checkout:
+
+- While logged into your GitHub account, navigate to the
+ ``python-bigtable`` `repo`_ on GitHub.
+
+- Fork and clone the ``python-bigtable`` repository to your GitHub account by
+ clicking the "Fork" button.
+
+- Clone your fork of ``python-bigtable`` from your GitHub account to your local
+ computer, substituting your account username and specifying the destination
+ as ``hack-on-python-bigtable``. E.g.::
+
+ $ cd ${HOME}
+ $ git clone git@github.com:USERNAME/python-bigtable.git hack-on-python-bigtable
+ $ cd hack-on-python-bigtable
+ # Configure remotes such that you can pull changes from the googleapis/python-bigtable
+ # repository into your local repository.
+ $ git remote add upstream git@github.com:googleapis/python-bigtable.git
+ # fetch and merge changes from upstream into main
+ $ git fetch upstream
+ $ git merge upstream/main
+
+Now your local repo is set up such that you will push changes to your GitHub
+repo, from which you can submit a pull request.
+
+To work on the codebase and run the tests, we recommend using ``nox``,
+but you can also use a ``virtualenv`` of your own creation.
+
+.. _repo: https://github.com/googleapis/python-bigtable
+
+Using ``nox``
+=============
+
+We use `nox `__ to instrument our tests.
+
+- To test your changes, run unit tests with ``nox``::
+ $ nox -s unit
+
+- To run a single unit test::
+
+ $ nox -s unit-3.14 -- -k
+
+
+ .. note::
+
+ The unit tests and system tests are described in the
+ ``noxfile.py`` files in each directory.
+
+.. nox: https://pypi.org/project/nox/
+
+*****************************************
+I'm getting weird errors... Can you help?
+*****************************************
+
+If the error mentions ``Python.h`` not being found,
+install ``python-dev`` and try again.
+On Debian/Ubuntu::
+
+ $ sudo apt-get install python-dev
+
+************
+Coding Style
+************
+- We use the automatic code formatter ``black``. You can run it using
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
+
+ $ nox -s blacken
+
+- PEP8 compliance is required, with exceptions defined in the linter configuration.
+ If you have ``nox`` installed, you can test that you have not introduced
+ any non-compliant code via::
+
+ $ nox -s lint
+
+- In order to make ``nox -s lint`` run faster, you can set some environment
+ variables::
+
+ export GOOGLE_CLOUD_TESTING_REMOTE="upstream"
+ export GOOGLE_CLOUD_TESTING_BRANCH="main"
+
+ By doing this, you are specifying the location of the most up-to-date
+ version of ``python-bigtable``. The
+ remote name ``upstream`` should point to the official ``googleapis``
+ checkout and the branch should be the default branch on that remote (``main``).
+
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
+Exceptions to PEP8:
+
+- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
+ "Function-Under-Test"), which is PEP8-incompliant, but more readable.
+ Some also use a local variable, ``MUT`` (short for "Module-Under-Test").
+
+********************
+Running System Tests
+********************
+
+- To run system tests, you can execute::
+
+ # Run all system tests
+ $ nox -s system
+
+ # Run a single system test
+ $ nox -s system-3.9 -- -k
+
+
+ .. note::
+
+ System tests are only configured to run under Python 3.9.
+ For expediency, we do not run them in older versions of Python 3.
+
+ This alone will not run the tests. You'll need to change some local
+ auth settings and change some configuration in your project to
+ run all the tests.
+
+- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__.
+
+*************
+Test Coverage
+*************
+
+- The codebase *must* have 100% test statement coverage after each commit.
+ You can test coverage via ``nox -s cover``.
+
+******************************************************
+Documentation Coverage and Building HTML Documentation
+******************************************************
+
+If you fix a bug, and the bug requires an API or behavior modification, all
+documentation in this package which references that API or behavior must be
+changed to reflect the bug fix, ideally in the same commit that fixes the bug
+or adds the feature.
+
+Build the docs via:
+
+ $ nox -s docs
+
+*************************
+Samples and code snippets
+*************************
+
+Code samples and snippets live in the `samples/` catalogue. Feel free to
+provide more examples, but make sure to write tests for those examples.
+Each folder containing example code requires its own `noxfile.py` script
+which automates testing. If you decide to create a new folder, you can
+base it on the `samples/snippets` folder (providing `noxfile.py` and
+the requirements files).
+
+The tests will run against a real Google Cloud Project, so you should
+configure them just like the System Tests.
+
+- To run sample tests, you can execute::
+
+ # Run all tests in a folder
+ $ cd samples/snippets
+ $ nox -s py-3.8
+
+ # Run a single sample test
+ $ cd samples/snippets
+ $ nox -s py-3.8 -- -k
+
+********************************************
+Note About ``README`` as it pertains to PyPI
+********************************************
+
+The `description on PyPI`_ for the project comes directly from the
+``README``. Due to the reStructuredText (``rst``) parser used by
+PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst``
+instead of
+``https://github.com/googleapis/python-bigtable/blob/main/CONTRIBUTING.rst``)
+may cause problems creating links or rendering the description.
+
+.. _description on PyPI: https://pypi.org/project/google-cloud-bigtable
+
+
+*************************
+Supported Python Versions
+*************************
+
+We support:
+
+- `Python 3.7`_
+- `Python 3.8`_
+- `Python 3.9`_
+- `Python 3.10`_
+- `Python 3.11`_
+- `Python 3.12`_
+- `Python 3.13`_
+- `Python 3.14`_
+
+.. _Python 3.7: https://docs.python.org/3.7/
+.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
+.. _Python 3.11: https://docs.python.org/3.11/
+.. _Python 3.12: https://docs.python.org/3.12/
+.. _Python 3.13: https://docs.python.org/3.13/
+.. _Python 3.14: https://docs.python.org/3.14/
+
+
+Supported versions can be found in our ``noxfile.py`` `config`_.
+
+.. _config: https://github.com/googleapis/python-bigtable/blob/main/noxfile.py
+
+
+We also explicitly decided to support Python 3 beginning with version 3.7.
+Reasons for this include:
+
+- Encouraging use of newest versions of Python 3
+- Taking the lead of `prominent`_ open-source `projects`_
+- `Unicode literal support`_ which allows for a cleaner codebase that
+ works in both Python 2 and Python 3
+
+.. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django
+.. _projects: http://flask.pocoo.org/docs/0.10/python3/
+.. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/
+
+**********
+Versioning
+**********
+
+This library follows `Semantic Versioning`_.
+
+.. _Semantic Versioning: http://semver.org/
+
+Some packages are currently in major version zero (``0.y.z``), which means that
+anything may change at any time and the public API should not be considered
+stable.
+
+******************************
+Contributor License Agreements
+******************************
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+ intellectual property**, then you'll need to sign an
+ `individual CLA `__.
+- **If you work for a company that wants to allow you to contribute your work**,
+ then you'll need to sign a
+ `corporate CLA `__.
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
diff --git a/packages/google-cloud-bigtable/LICENSE b/packages/google-cloud-bigtable/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/packages/google-cloud-bigtable/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in
new file mode 100644
index 000000000000..d6814cd60037
--- /dev/null
+++ b/packages/google-cloud-bigtable/MANIFEST.in
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+include README.rst LICENSE
+recursive-include google *.json *.proto py.typed
+recursive-include tests *
+global-exclude *.py[co]
+global-exclude __pycache__
+
+# Exclude scripts for samples readmegen
+prune scripts/readme-gen
diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst
new file mode 100644
index 000000000000..2ecbd0185ae7
--- /dev/null
+++ b/packages/google-cloud-bigtable/README.rst
@@ -0,0 +1,120 @@
+Python Client for Google Cloud Bigtable
+=======================================
+
+|GA| |pypi| |versions|
+
+`Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the
+same database that powers many core Google services, including Search,
+Analytics, Maps, and Gmail.
+
+- `Client Library Documentation`_
+- `Product Documentation`_
+
+.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg
+ :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability
+.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg
+ :target: https://pypi.org/project/google-cloud-bigtable/
+.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg
+ :target: https://pypi.org/project/google-cloud-bigtable/
+.. _Google Cloud Bigtable: https://cloud.google.com/bigtable
+.. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest
+.. _Product Documentation: https://cloud.google.com/bigtable/docs
+
+
+Async Data Client
+-------------------------
+
+:code:`v2.23.0` includes a release of the new :code:`BigtableDataClientAsync` client, accessible at the import path
+:code:`google.cloud.bigtable.data`.
+
+The new client brings a simplified API and increased performance using asyncio.
+The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations
+remaining exclusively in the existing synchronous client.
+
+Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com,
+or through the Github `issue tracker`_.
+
+
+ .. note::
+
+ It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's
+ performance benefits, the codebase should be designed to be async from the ground up.
+
+
+.. _issue tracker: https://github.com/googleapis/python-bigtable/issues
+
+
+Quick Start
+-----------
+
+In order to use this library, you first need to go through the following steps:
+
+1. `Select or create a Cloud Platform project.`_
+2. `Enable billing for your project.`_
+3. `Enable the Cloud Bigtable API.`_
+4. `Setup Authentication.`_
+
+.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project
+.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project
+.. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable
+.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html
+
+Installation
+~~~~~~~~~~~~
+
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+
+.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/
+
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Python >= 3.7
+
+Deprecated Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Python 2.7: the last released version which supported Python 2.7 was
+ version 1.7.0, released 2021-02-09.
+
+- Python 3.5: the last released version which supported Python 3.5 was
+ version 1.7.0, released 2021-02-09.
+
+- Python 3.6: the last released version which supported Python 3.6 was
+ version v2.10.1, released 2022-06-03.
+
+Mac/Linux
+^^^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ source /bin/activate
+ /bin/pip install google-cloud-bigtable
+
+
+Windows
+^^^^^^^
+
+.. code-block:: console
+
+ pip install virtualenv
+ virtualenv
+ \Scripts\activate
+ \Scripts\pip.exe install google-cloud-bigtable
+
+Next Steps
+~~~~~~~~~~
+
+- Read the `Client Library Documentation`_ for Cloud Bigtable API
+ to see other available methods on the client.
+- Read the `Product documentation`_ to learn
+ more about the product and see How-to Guides.
diff --git a/packages/google-cloud-bigtable/SECURITY.md b/packages/google-cloud-bigtable/SECURITY.md
new file mode 100644
index 000000000000..8b58ae9c01ae
--- /dev/null
+++ b/packages/google-cloud-bigtable/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
+
+The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
+
+We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/packages/google-cloud-bigtable/docs/README.rst b/packages/google-cloud-bigtable/docs/README.rst
new file mode 120000
index 000000000000..89a0106941ff
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/README.rst
@@ -0,0 +1 @@
+../README.rst
\ No newline at end of file
diff --git a/packages/google-cloud-bigtable/docs/_static/custom.css b/packages/google-cloud-bigtable/docs/_static/custom.css
new file mode 100644
index 000000000000..b0a295464b23
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/_static/custom.css
@@ -0,0 +1,20 @@
+div#python2-eol {
+ border-color: red;
+ border-width: medium;
+}
+
+/* Ensure minimum width for 'Parameters' / 'Returns' column */
+dl.field-list > dt {
+ min-width: 100px
+}
+
+/* Insert space between methods for readability */
+dl.method {
+ padding-top: 10px;
+ padding-bottom: 10px
+}
+
+/* Insert empty space between classes */
+dl.class {
+ padding-bottom: 50px
+}
diff --git a/packages/google-cloud-bigtable/docs/_templates/layout.html b/packages/google-cloud-bigtable/docs/_templates/layout.html
new file mode 100644
index 000000000000..6316a537f72b
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/_templates/layout.html
@@ -0,0 +1,50 @@
+
+{% extends "!layout.html" %}
+{%- block content %}
+{%- if theme_fixed_sidebar|lower == 'true' %}
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+ Library versions released prior to that date will continue to be available. For more information please
+ visit Python 2 support on Google Cloud.
+
+{%- else %}
+{{ super() }}
+{%- endif %}
+{%- endblock %}
diff --git a/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst b/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst
new file mode 100644
index 000000000000..8c6f4a5dc508
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst
@@ -0,0 +1,11 @@
+Admin Client
+============
+.. toctree::
+ :maxdepth: 2
+
+ services_
+ types_
+
+..
+ This should be the only handwritten RST file in this directory.
+ Everything else should be autogenerated.
diff --git a/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst b/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst
new file mode 100644
index 000000000000..42f7caad7cb1
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst
@@ -0,0 +1,10 @@
+BigtableInstanceAdmin
+---------------------------------------
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin
+ :members:
+ :inherited-members:
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst b/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst
new file mode 100644
index 000000000000..0fa4b276a616
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst
@@ -0,0 +1,10 @@
+BigtableTableAdmin
+------------------------------------
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin
+ :members:
+ :inherited-members:
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/admin_client/services_.rst b/packages/google-cloud-bigtable/docs/admin_client/services_.rst
new file mode 100644
index 000000000000..ea55c7da14a3
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/admin_client/services_.rst
@@ -0,0 +1,7 @@
+Services for Google Cloud Bigtable Admin v2 API
+===============================================
+.. toctree::
+ :maxdepth: 2
+
+ bigtable_instance_admin
+ bigtable_table_admin
diff --git a/packages/google-cloud-bigtable/docs/admin_client/types_.rst b/packages/google-cloud-bigtable/docs/admin_client/types_.rst
new file mode 100644
index 000000000000..ef32b9684bd4
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/admin_client/types_.rst
@@ -0,0 +1,10 @@
+Types for Google Cloud Bigtable Admin v2 API
+============================================
+
+.. automodule:: google.cloud.bigtable_admin_v2.types
+ :members:
+ :show-inheritance:
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.types
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/app-profile.rst b/packages/google-cloud-bigtable/docs/classic_client/app-profile.rst
new file mode 100644
index 000000000000..5c9d426c2062
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/app-profile.rst
@@ -0,0 +1,6 @@
+App Profile
+~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.app_profile
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/backup.rst b/packages/google-cloud-bigtable/docs/classic_client/backup.rst
new file mode 100644
index 000000000000..e75abd43143c
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/backup.rst
@@ -0,0 +1,6 @@
+Backup
+~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.backup
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/batcher.rst b/packages/google-cloud-bigtable/docs/classic_client/batcher.rst
new file mode 100644
index 000000000000..9ac335be1841
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/batcher.rst
@@ -0,0 +1,6 @@
+Mutations Batching
+~~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.batcher
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/client-intro.rst b/packages/google-cloud-bigtable/docs/classic_client/client-intro.rst
new file mode 100644
index 000000000000..2420684996a5
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/client-intro.rst
@@ -0,0 +1,90 @@
+Base for Everything
+===================
+
+To use the API, the :class:`Client `
+class defines a high-level interface which handles authorization
+and creating other objects:
+
+.. code:: python
+
+ from google.cloud.bigtable.client import Client
+ client = Client()
+
+Long-lived Defaults
+-------------------
+
+When creating a :class:`Client `, the
+``user_agent`` argument has sensible a default
+(:data:`DEFAULT_USER_AGENT `).
+However, you may over-ride it and the value will be used throughout all API
+requests made with the ``client`` you create.
+
+Configuration
+-------------
+
+- For an overview of authentication in ``google-cloud-python``,
+ see `Authentication `_.
+
+- In addition to any authentication configuration, you can also set the
+ :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the Google Cloud Console
+ project you'd like to interact with. If your code is running in Google App
+ Engine or Google Compute Engine the project will be detected automatically.
+ (Setting this environment variable is not required, you may instead pass the
+ ``project`` explicitly when constructing a
+ :class:`Client `).
+
+- After configuring your environment, create a
+ :class:`Client `
+
+ .. code::
+
+ >>> from google.cloud import bigtable
+ >>> client = bigtable.Client()
+
+ or pass in ``credentials`` and ``project`` explicitly
+
+ .. code::
+
+ >>> from google.cloud import bigtable
+ >>> client = bigtable.Client(project='my-project', credentials=creds)
+
+.. tip::
+
+ Be sure to use the **Project ID**, not the **Project Number**.
+
+Admin API Access
+----------------
+
+If you'll be using your client to make `Instance Admin`_ and `Table Admin`_
+API requests, you'll need to pass the ``admin`` argument:
+
+.. code:: python
+
+ client = bigtable.Client(admin=True)
+
+Read-Only Mode
+--------------
+
+If, on the other hand, you only have (or want) read access to the data,
+you can pass the ``read_only`` argument:
+
+.. code:: python
+
+ client = bigtable.Client(read_only=True)
+
+This will ensure that the
+:data:`READ_ONLY_SCOPE ` is used
+for API requests (so any accidental requests that would modify data will
+fail).
+
+Next Step
+---------
+
+After a :class:`Client `, the next highest-level
+object is an :class:`Instance `. You'll need
+one before you can interact with tables or data.
+
+Head next to learn about the :doc:`instance-api`.
+
+.. _Instance Admin: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto
+.. _Table Admin: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto
diff --git a/packages/google-cloud-bigtable/docs/classic_client/client.rst b/packages/google-cloud-bigtable/docs/classic_client/client.rst
new file mode 100644
index 000000000000..c48595c8ac0b
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/client.rst
@@ -0,0 +1,6 @@
+Client
+~~~~~~
+
+.. automodule:: google.cloud.bigtable.client
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/cluster.rst b/packages/google-cloud-bigtable/docs/classic_client/cluster.rst
new file mode 100644
index 000000000000..ad33aae5e0b8
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/cluster.rst
@@ -0,0 +1,6 @@
+Cluster
+~~~~~~~
+
+.. automodule:: google.cloud.bigtable.cluster
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/column-family.rst b/packages/google-cloud-bigtable/docs/classic_client/column-family.rst
new file mode 100644
index 000000000000..de6c1eb1f5df
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/column-family.rst
@@ -0,0 +1,49 @@
+Column Families
+===============
+
+When creating a
+:class:`ColumnFamily `, it is
+possible to set garbage collection rules for expired data.
+
+By setting a rule, cells in the table matching the rule will be deleted
+during periodic garbage collection (which executes opportunistically in the
+background).
+
+The types
+:class:`MaxAgeGCRule `,
+:class:`MaxVersionsGCRule `,
+:class:`GarbageCollectionRuleUnion ` and
+:class:`GarbageCollectionRuleIntersection `
+can all be used as the optional ``gc_rule`` argument in the
+:class:`ColumnFamily `
+constructor. This value is then used in the
+:meth:`create() ` and
+:meth:`update() ` methods.
+
+These rules can be nested arbitrarily, with a
+:class:`MaxAgeGCRule ` or
+:class:`MaxVersionsGCRule `
+at the lowest level of the nesting:
+
+.. code:: python
+
+ import datetime
+
+ max_age = datetime.timedelta(days=3)
+ rule1 = MaxAgeGCRule(max_age)
+ rule2 = MaxVersionsGCRule(1)
+
+ # Make a composite that matches anything older than 3 days **AND**
+ # with more than 1 version.
+ rule3 = GarbageCollectionIntersection(rules=[rule1, rule2])
+
+ # Make another composite that matches our previous intersection
+ # **OR** anything that has more than 3 versions.
+ rule4 = GarbageCollectionRule(max_num_versions=3)
+ rule5 = GarbageCollectionUnion(rules=[rule3, rule4])
+
+----
+
+.. automodule:: google.cloud.bigtable.column_family
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/data-api.rst b/packages/google-cloud-bigtable/docs/classic_client/data-api.rst
new file mode 100644
index 000000000000..9b50e9ec9a8e
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/data-api.rst
@@ -0,0 +1,351 @@
+Data API
+========
+
+.. note::
+ This page describes how to use the Data API with the synchronous Bigtable client.
+ Examples for using the Data API with the async client can be found in the
+ `Getting Started Guide`_.
+
+.. _Getting Started Guide: https://cloud.google.com/bigtable/docs/samples-python-hello
+
+After creating a :class:`Table ` and some
+column families, you are ready to store and retrieve data.
+
+Cells vs. Columns vs. Column Families
++++++++++++++++++++++++++++++++++++++
+
+* As explained in the :doc:`table overview `, tables can
+ have many column families.
+* As described below, a table can also have many rows which are
+ specified by row keys.
+* Within a row, data is stored in a cell. A cell simply has a value (as
+ bytes) and a timestamp. The number of cells in each row can be
+ different, depending on what was stored in each row.
+* Each cell lies in a column (**not** a column family). A column is really
+ just a more **specific** modifier within a column family. A column
+ can be present in every column family, in only one or anywhere in between.
+* Within a column family there can be many columns. For example, within
+ the column family ``foo`` we could have columns ``bar`` and ``baz``.
+ These would typically be represented as ``foo:bar`` and ``foo:baz``.
+
+Modifying Data
+++++++++++++++
+
+Since data is stored in cells, which are stored in rows, we
+use the metaphor of a **row** in classes that are used to modify
+(write, update, delete) data in a
+:class:`Table `.
+
+Direct vs. Conditional vs. Append
+---------------------------------
+
+There are three ways to modify data in a table, described by the
+`MutateRow`_, `CheckAndMutateRow`_ and `ReadModifyWriteRow`_ API
+methods.
+
+* The **direct** way is via `MutateRow`_ which involves simply
+ adding, overwriting or deleting cells. The
+ :class:`DirectRow ` class
+ handles direct mutations.
+* The **conditional** way is via `CheckAndMutateRow`_. This method
+ first checks if some filter is matched in a given row, then
+ applies one of two sets of mutations, depending on if a match
+ occurred or not. (These mutation sets are called the "true
+ mutations" and "false mutations".) The
+ :class:`ConditionalRow ` class
+ handles conditional mutations.
+* The **append** way is via `ReadModifyWriteRow`_. This simply
+ appends (as bytes) or increments (as an integer) data in a presumed
+ existing cell in a row. The
+ :class:`AppendRow ` class
+ handles append mutations.
+
+Row Factory
+-----------
+
+A single factory can be used to create any of the three row types.
+To create a :class:`DirectRow `:
+
+.. code:: python
+
+ row = table.row(row_key)
+
+Unlike the previous string values we've used before, the row key must
+be ``bytes``.
+
+To create a :class:`ConditionalRow `,
+first create a :class:`RowFilter ` and
+then
+
+.. code:: python
+
+ cond_row = table.row(row_key, filter_=filter_)
+
+To create an :class:`AppendRow `
+
+.. code:: python
+
+ append_row = table.row(row_key, append=True)
+
+Building Up Mutations
+---------------------
+
+In all three cases, a set of mutations (or two sets) are built up
+on a row before they are sent off in a batch via
+
+.. code:: python
+
+ row.commit()
+
+Direct Mutations
+----------------
+
+Direct mutations can be added via one of four methods
+
+* :meth:`set_cell() ` allows a
+ single value to be written to a column
+
+ .. code:: python
+
+ row.set_cell(column_family_id, column, value,
+ timestamp=timestamp)
+
+ If the ``timestamp`` is omitted, the current time on the Google Cloud
+ Bigtable server will be used when the cell is stored.
+
+ The value can either be bytes or an integer, which will be converted to
+ bytes as a signed 64-bit integer.
+
+* :meth:`delete_cell() ` deletes
+ all cells (i.e. for all timestamps) in a given column
+
+ .. code:: python
+
+ row.delete_cell(column_family_id, column)
+
+ Remember, this only happens in the ``row`` we are using.
+
+ If we only want to delete cells from a limited range of time, a
+ :class:`TimestampRange ` can
+ be used
+
+ .. code:: python
+
+ row.delete_cell(column_family_id, column,
+ time_range=time_range)
+
+* :meth:`delete_cells() ` does
+ the same thing as
+ :meth:`delete_cell() `,
+ but accepts a list of columns in a column family rather than a single one.
+
+ .. code:: python
+
+ row.delete_cells(column_family_id, [column1, column2],
+ time_range=time_range)
+
+ In addition, if we want to delete cells from every column in a column family,
+ the special :attr:`ALL_COLUMNS `
+ value can be used
+
+ .. code:: python
+
+ row.delete_cells(column_family_id, row.ALL_COLUMNS,
+ time_range=time_range)
+
+* :meth:`delete() ` will delete the
+ entire row
+
+ .. code:: python
+
+ row.delete()
+
+Conditional Mutations
+---------------------
+
+Making **conditional** modifications is essentially identical
+to **direct** modifications: it uses the exact same methods
+to accumulate mutations.
+
+However, each mutation added must specify a ``state``: will the mutation be
+applied if the filter matches or if it fails to match.
+
+For example:
+
+.. code:: python
+
+ cond_row.set_cell(column_family_id, column, value,
+ timestamp=timestamp, state=True)
+
+will add to the set of true mutations.
+
+Append Mutations
+----------------
+
+Append mutations can be added via one of two methods
+
+* :meth:`append_cell_value() `
+ appends a bytes value to an existing cell:
+
+ .. code:: python
+
+ append_row.append_cell_value(column_family_id, column, bytes_value)
+
+* :meth:`increment_cell_value() `
+ increments an integer value in an existing cell:
+
+ .. code:: python
+
+ append_row.increment_cell_value(column_family_id, column, int_value)
+
+ Since only bytes are stored in a cell, the cell value is decoded as
+ a signed 64-bit integer before being incremented. (This happens on
+ the Google Cloud Bigtable server, not in the library.)
+
+Notice that no timestamp was specified. This is because **append** mutations
+operate on the latest value of the specified column.
+
+If there are no cells in the specified column, then the empty string (bytes
+case) or zero (integer case) are the assumed values.
+
+Starting Fresh
+--------------
+
+If accumulated mutations need to be dropped, use
+
+.. code:: python
+
+ row.clear()
+
+Reading Data
+++++++++++++
+
+Read Single Row from a Table
+----------------------------
+
+To make a `ReadRows`_ API request for a single row key, use
+:meth:`Table.read_row() `:
+
+.. code:: python
+
+ >>> row_data = table.read_row(row_key)
+ >>> row_data.cells
+ {
+ u'fam1': {
+ b'col1': [
+ ,
+ ,
+ ],
+ b'col2': [
+ ,
+ ],
+ },
+ u'fam2': {
+ b'col3': [
+ ,
+ ,
+ ,
+ ],
+ },
+ }
+ >>> cell = row_data.cells[u'fam1'][b'col1'][0]
+ >>> cell
+
+ >>> cell.value
+ b'val1'
+ >>> cell.timestamp
+ datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=)
+
+Rather than returning a :class:`DirectRow `
+or similar class, this method returns a
+:class:`PartialRowData `
+instance. This class is used for reading and parsing data rather than for
+modifying data (as :class:`DirectRow ` is).
+
+A filter can also be applied to the results:
+
+.. code:: python
+
+ row_data = table.read_row(row_key, filter_=filter_val)
+
+The allowable ``filter_`` values are the same as those used for a
+:class:`ConditionalRow `. For
+more information, see the
+:meth:`Table.read_row() ` documentation.
+
+Stream Many Rows from a Table
+-----------------------------
+
+To make a `ReadRows`_ API request for a stream of rows, use
+:meth:`Table.read_rows() `:
+
+.. code:: python
+
+ row_data = table.read_rows()
+
+Using gRPC over HTTP/2, a continual stream of responses will be delivered.
+In particular
+
+* :meth:`consume_next() `
+ pulls the next result from the stream, parses it and stores it on the
+ :class:`PartialRowsData ` instance
+* :meth:`consume_all() `
+ pulls results from the stream until there are no more
+* :meth:`cancel() ` closes
+ the stream
+
+See the :class:`PartialRowsData `
+documentation for more information.
+
+As with
+:meth:`Table.read_row() `, an optional
+``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key``
+can be supplied for the stream, a ``limit`` can be set and a boolean
+``allow_row_interleaving`` can be specified to allow faster streamed results
+at the potential cost of non-sequential reads.
+
+See the :meth:`Table.read_rows() `
+documentation for more information on the optional arguments.
+
+Sample Keys in a Table
+----------------------
+
+Make a `SampleRowKeys`_ API request with
+:meth:`Table.sample_row_keys() `:
+
+.. code:: python
+
+ keys_iterator = table.sample_row_keys()
+
+The returned row keys will delimit contiguous sections of the table of
+approximately equal size, which can be used to break up the data for
+distributed tasks like mapreduces.
+
+As with
+:meth:`Table.read_rows() `, the
+returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream.
+
+The next key in the result can be accessed via
+
+.. code:: python
+
+ next_key = keys_iterator.next()
+
+or all keys can be iterated over via
+
+.. code:: python
+
+ for curr_key in keys_iterator:
+ do_something(curr_key)
+
+Just as with reading, the stream can be canceled:
+
+.. code:: python
+
+ keys_iterator.cancel()
+
+.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L42-L72
+.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L184-L199
+.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L230-L256
+.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L339-L386
+.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L401-L430
diff --git a/packages/google-cloud-bigtable/docs/classic_client/encryption-info.rst b/packages/google-cloud-bigtable/docs/classic_client/encryption-info.rst
new file mode 100644
index 000000000000..46f19880fcac
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/encryption-info.rst
@@ -0,0 +1,6 @@
+Encryption Info
+~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.encryption_info
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/instance-api.rst b/packages/google-cloud-bigtable/docs/classic_client/instance-api.rst
new file mode 100644
index 000000000000..88b4eb4dc914
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/instance-api.rst
@@ -0,0 +1,130 @@
+Instance Admin API
+==================
+
+After creating a :class:`Client `, you can
+interact with individual instances for a project.
+
+List Instances
+--------------
+
+If you want a comprehensive list of all existing instances, make a
+`ListInstances`_ API request with
+:meth:`Client.list_instances() `:
+
+.. code:: python
+
+ instances = client.list_instances()
+
+Instance Factory
+----------------
+
+To create an :class:`Instance ` object:
+
+.. code:: python
+
+ instance = client.instance(instance_id, display_name=display_name)
+
+- ``display_name`` is optional. When not provided, ``display_name`` defaults
+ to the ``instance_id`` value.
+
+You can also use :meth:`Client.instance` to create a local wrapper for
+instances that have already been created with the API, or through the web
+console:
+
+.. code:: python
+
+ instance = client.instance(existing_instance_id)
+ instance.reload()
+
+Create a new Instance
+---------------------
+
+After creating the instance object, make a `CreateInstance`_ API request
+with :meth:`create() `:
+
+.. code:: python
+
+ instance.display_name = 'My very own instance'
+ instance.create()
+
+Check on Current Operation
+--------------------------
+
+.. note::
+
+ When modifying an instance (via a `CreateInstance`_ request), the Bigtable
+ API will return a `long-running operation`_ and a corresponding
+ :class:`Operation ` object
+ will be returned by
+ :meth:`create() `.
+
+You can check if a long-running operation (for a
+:meth:`create() ` has finished
+by making a `GetOperation`_ request with
+:meth:`Operation.finished() `:
+
+.. code:: python
+
+ >>> operation = instance.create()
+ >>> operation.finished()
+ True
+
+.. note::
+
+ Once an :class:`Operation ` object
+ has returned :data:`True` from
+ :meth:`finished() `, the
+ object should not be re-used. Subsequent calls to
+ :meth:`finished() `
+ will result in a :class:`ValueError `.
+
+Get metadata for an existing Instance
+-------------------------------------
+
+After creating the instance object, make a `GetInstance`_ API request
+with :meth:`reload() `:
+
+.. code:: python
+
+ instance.reload()
+
+This will load ``display_name`` for the existing ``instance`` object.
+
+Update an existing Instance
+---------------------------
+
+After creating the instance object, make an `UpdateInstance`_ API request
+with :meth:`update() `:
+
+.. code:: python
+
+ instance.display_name = 'New display_name'
+ instance.update()
+
+Delete an existing Instance
+---------------------------
+
+Make a `DeleteInstance`_ API request with
+:meth:`delete() `:
+
+.. code:: python
+
+ instance.delete()
+
+Next Step
+---------
+
+Now we go down the hierarchy from
+:class:`Instance ` to a
+:class:`Table `.
+
+Head next to learn about the :doc:`table-api`.
+
+.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance
+.. _CreateInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#create-a-new-instance
+.. _GetInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#get-metadata-for-an-existing-instance
+.. _UpdateInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#update-an-existing-instance
+.. _DeleteInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#delete-an-existing-instance
+.. _ListInstances: https://googleapis.dev/python/bigtable/latest/instance-api.html#list-instances
+.. _GetOperation: https://googleapis.dev/python/bigtable/latest/instance-api.html#check-on-current-operation
+.. _long-running operation: https://github.com/googleapis/googleapis/blob/main/google/longrunning/operations.proto#L128-L162
diff --git a/packages/google-cloud-bigtable/docs/classic_client/instance.rst b/packages/google-cloud-bigtable/docs/classic_client/instance.rst
new file mode 100644
index 000000000000..f9be9672fc64
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/instance.rst
@@ -0,0 +1,6 @@
+Instance
+~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.instance
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/row-data.rst b/packages/google-cloud-bigtable/docs/classic_client/row-data.rst
new file mode 100644
index 000000000000..503f9b1cbdfd
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/row-data.rst
@@ -0,0 +1,6 @@
+Row Data
+~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.row_data
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/row-filters.rst b/packages/google-cloud-bigtable/docs/classic_client/row-filters.rst
new file mode 100644
index 000000000000..9884ce400d52
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/row-filters.rst
@@ -0,0 +1,67 @@
+Bigtable Row Filters
+====================
+
+It is possible to use a
+:class:`RowFilter `
+when adding mutations to a
+:class:`ConditionalRow ` and when
+reading row data with :meth:`read_row() `
+or :meth:`read_rows() `.
+
+As laid out in the `RowFilter definition`_, the following basic filters
+are provided:
+
+* :class:`SinkFilter <.row_filters.SinkFilter>`
+* :class:`PassAllFilter <.row_filters.PassAllFilter>`
+* :class:`BlockAllFilter <.row_filters.BlockAllFilter>`
+* :class:`RowKeyRegexFilter <.row_filters.RowKeyRegexFilter>`
+* :class:`RowSampleFilter <.row_filters.RowSampleFilter>`
+* :class:`FamilyNameRegexFilter <.row_filters.FamilyNameRegexFilter>`
+* :class:`ColumnQualifierRegexFilter <.row_filters.ColumnQualifierRegexFilter>`
+* :class:`TimestampRangeFilter <.row_filters.TimestampRangeFilter>`
+* :class:`ColumnRangeFilter <.row_filters.ColumnRangeFilter>`
+* :class:`ValueRegexFilter <.row_filters.ValueRegexFilter>`
+* :class:`ValueRangeFilter <.row_filters.ValueRangeFilter>`
+* :class:`CellsRowOffsetFilter <.row_filters.CellsRowOffsetFilter>`
+* :class:`CellsRowLimitFilter <.row_filters.CellsRowLimitFilter>`
+* :class:`CellsColumnLimitFilter <.row_filters.CellsColumnLimitFilter>`
+* :class:`StripValueTransformerFilter <.row_filters.StripValueTransformerFilter>`
+* :class:`ApplyLabelFilter <.row_filters.ApplyLabelFilter>`
+
+In addition, these filters can be combined into composite filters with
+
+* :class:`RowFilterChain <.row_filters.RowFilterChain>`
+* :class:`RowFilterUnion <.row_filters.RowFilterUnion>`
+* :class:`ConditionalRowFilter <.row_filters.ConditionalRowFilter>`
+
+These rules can be nested arbitrarily, with a basic filter at the lowest
+level. For example:
+
+.. code:: python
+
+ # Filter in a specified column (matching any column family).
+ col1_filter = ColumnQualifierRegexFilter(b'columnbia')
+
+ # Create a filter to label results.
+ label1 = u'label-red'
+ label1_filter = ApplyLabelFilter(label1)
+
+ # Combine the filters to label all the cells in columnbia.
+ chain1 = RowFilterChain(filters=[col1_filter, label1_filter])
+
+ # Create a similar filter to label cells blue.
+ col2_filter = ColumnQualifierRegexFilter(b'columnseeya')
+ label2 = u'label-blue'
+ label2_filter = ApplyLabelFilter(label2)
+ chain2 = RowFilterChain(filters=[col2_filter, label2_filter])
+
+ # Bring our two labeled columns together.
+ row_filter = RowFilterUnion(filters=[chain1, chain2])
+
+----
+
+.. automodule:: google.cloud.bigtable.row_filters
+ :members:
+ :show-inheritance:
+
+.. _RowFilter definition: https://googleapis.dev/python/bigtable/latest/row-filters.html?highlight=rowfilter#google.cloud.bigtable.row_filters.RowFilter
diff --git a/packages/google-cloud-bigtable/docs/classic_client/row-set.rst b/packages/google-cloud-bigtable/docs/classic_client/row-set.rst
new file mode 100644
index 000000000000..5f7a16a029ed
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/row-set.rst
@@ -0,0 +1,6 @@
+Row Set
+~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.row_set
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/row.rst b/packages/google-cloud-bigtable/docs/classic_client/row.rst
new file mode 100644
index 000000000000..33686608b363
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/row.rst
@@ -0,0 +1,7 @@
+Bigtable Row
+============
+
+.. automodule:: google.cloud.bigtable.row
+ :members:
+ :show-inheritance:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/snippets.py b/packages/google-cloud-bigtable/docs/classic_client/snippets.py
new file mode 100644
index 000000000000..fa3aa3627970
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/snippets.py
@@ -0,0 +1,793 @@
+#!/usr/bin/env python
+
+# Copyright 2018, Google LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Testable usage examples for Google Cloud Bigtable API wrapper
+
+Each example function takes a ``client`` argument (which must be an instance
+of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task
+with the API.
+
+To facilitate running the examples as system tests, each example is also passed
+a ``to_delete`` list; the function adds to the list any objects created which
+need to be deleted during teardown.
+
+.. note::
+ This file is under progress and will be updated with more guidance from
+ the team. Unit tests will be added with guidance from the team.
+
+"""
+
+import datetime
+import pytest
+
+from google.api_core.exceptions import DeadlineExceeded
+from google.api_core.exceptions import NotFound
+from google.api_core.exceptions import TooManyRequests
+from google.api_core.exceptions import ServiceUnavailable
+from test_utils.system import unique_resource_id
+from test_utils.retry import RetryErrors
+
+from google.cloud._helpers import UTC
+from google.cloud.bigtable import Client
+from google.cloud.bigtable import enums
+
+
+UNIQUE_SUFFIX = unique_resource_id("-")
+INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX
+CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX
+APP_PROFILE_ID = "app-prof" + UNIQUE_SUFFIX
+TABLE_ID = "tabl-1" + UNIQUE_SUFFIX
+ROUTING_POLICY_TYPE = enums.RoutingPolicyType.ANY
+LOCATION_ID = "us-central1-f"
+ALT_LOCATION_ID = "us-central1-a"
+PRODUCTION = enums.Instance.Type.PRODUCTION
+SERVER_NODES = 3
+STORAGE_TYPE = enums.StorageType.SSD
+LABEL_KEY = "python-snippet"
+LABEL_STAMP = (
+ datetime.datetime.utcnow()
+ .replace(microsecond=0, tzinfo=UTC)
+ .strftime("%Y-%m-%dt%H-%M-%S")
+)
+LABELS = {LABEL_KEY: str(LABEL_STAMP)}
+INSTANCES_TO_DELETE = []
+
+retry_429_503 = RetryErrors((ServiceUnavailable, TooManyRequests), max_tries=9)
+retry_504 = RetryErrors(DeadlineExceeded, max_tries=4)
+
+
+class Config(object):
+ """Run-time configuration to be modified at set-up.
+
+ This is a mutable stand-in to allow test set-up to modify
+ global state.
+ """
+
+ CLIENT = None
+ INSTANCE = None
+ TABLE = None
+
+
+def setup_module():
+ client = Config.CLIENT = Client(admin=True)
+ Config.INSTANCE = client.instance(
+ INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS
+ )
+ cluster = Config.INSTANCE.cluster(
+ CLUSTER_ID,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVER_NODES,
+ default_storage_type=STORAGE_TYPE,
+ )
+ operation = Config.INSTANCE.create(clusters=[cluster])
+ # We want to make sure the operation completes.
+ operation.result(timeout=100)
+ Config.TABLE = Config.INSTANCE.table(TABLE_ID)
+ retry_504(Config.TABLE.create)()
+
+
+def teardown_module():
+ retry_429_503(Config.INSTANCE.delete)()
+
+ for instance in INSTANCES_TO_DELETE:
+ try:
+ retry_429_503(instance.delete)()
+ except NotFound:
+ pass
+
+
+def test_bigtable_create_instance():
+ # [START bigtable_api_create_prod_instance]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import enums
+
+ my_instance_id = "inst-my-" + UNIQUE_SUFFIX
+ my_cluster_id = "clus-my-" + UNIQUE_SUFFIX
+ location_id = "us-central1-f"
+ serve_nodes = 1
+ storage_type = enums.StorageType.SSD
+ production = enums.Instance.Type.PRODUCTION
+ labels = {"prod-label": "prod-label"}
+
+ client = Client(admin=True)
+ instance = client.instance(my_instance_id, instance_type=production, labels=labels)
+ cluster = instance.cluster(
+ my_cluster_id,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ default_storage_type=storage_type,
+ )
+ operation = instance.create(clusters=[cluster])
+
+ # We want to make sure the operation completes.
+ operation.result(timeout=100)
+
+ # [END bigtable_api_create_prod_instance]
+
+ try:
+ assert instance.exists()
+ finally:
+ retry_429_503(instance.delete)()
+
+
+def test_bigtable_create_additional_cluster():
+ # [START bigtable_api_create_cluster]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import enums
+
+ # Assuming that there is an existing instance with `INSTANCE_ID`
+ # on the server already.
+ # to create an instance see
+ # 'https://cloud.google.com/bigtable/docs/creating-instance'
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ cluster_id = "clus-my-" + UNIQUE_SUFFIX
+ location_id = "us-central1-a"
+ serve_nodes = 1
+ storage_type = enums.StorageType.SSD
+
+ cluster = instance.cluster(
+ cluster_id,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ default_storage_type=storage_type,
+ )
+ operation = cluster.create()
+ # We want to make sure the operation completes.
+ operation.result(timeout=100)
+ # [END bigtable_api_create_cluster]
+
+ try:
+ assert cluster.exists()
+ finally:
+ retry_429_503(cluster.delete)()
+
+
+def test_bigtable_create_reload_delete_app_profile():
+ import re
+
+ # [START bigtable_api_create_app_profile]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import enums
+
+ routing_policy_type = enums.RoutingPolicyType.ANY
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ description = "routing policy-multy"
+
+ app_profile = instance.app_profile(
+ app_profile_id=APP_PROFILE_ID,
+ routing_policy_type=routing_policy_type,
+ description=description,
+ cluster_id=CLUSTER_ID,
+ )
+
+ app_profile = app_profile.create(ignore_warnings=True)
+ # [END bigtable_api_create_app_profile]
+
+ # [START bigtable_api_app_profile_name]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = instance.app_profile(APP_PROFILE_ID)
+
+ app_profile_name = app_profile.name
+ # [END bigtable_api_app_profile_name]
+ _profile_name_re = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[^/]+)/"
+ r"appProfiles/(?P"
+ r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+ )
+ assert _profile_name_re.match(app_profile_name)
+
+ # [START bigtable_api_app_profile_exists]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = instance.app_profile(APP_PROFILE_ID)
+
+ app_profile_exists = app_profile.exists()
+ # [END bigtable_api_app_profile_exists]
+ assert app_profile_exists
+
+ # [START bigtable_api_reload_app_profile]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = instance.app_profile(APP_PROFILE_ID)
+
+ app_profile.reload()
+ # [END bigtable_api_reload_app_profile]
+ assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE
+
+ # [START bigtable_api_update_app_profile]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = instance.app_profile(APP_PROFILE_ID)
+ app_profile.reload()
+
+ description = "My new app profile"
+ app_profile.description = description
+ app_profile.update()
+ # [END bigtable_api_update_app_profile]
+ assert app_profile.description == description
+
+ # [START bigtable_api_delete_app_profile]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = instance.app_profile(APP_PROFILE_ID)
+ app_profile.reload()
+
+ app_profile.delete(ignore_warnings=True)
+ # [END bigtable_api_delete_app_profile]
+ assert not app_profile.exists()
+
+
+def test_bigtable_list_instances():
+ # [START bigtable_api_list_instances]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ (instances_list, failed_locations_list) = client.list_instances()
+ # [END bigtable_api_list_instances]
+
+ assert len(instances_list) > 0
+
+
+def test_bigtable_list_clusters_on_instance():
+ # [START bigtable_api_list_clusters_on_instance]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ (clusters_list, failed_locations_list) = instance.list_clusters()
+ # [END bigtable_api_list_clusters_on_instance]
+
+ assert len(clusters_list) > 0
+
+
+def test_bigtable_list_clusters_in_project():
+ # [START bigtable_api_list_clusters_in_project]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ (clusters_list, failed_locations_list) = client.list_clusters()
+ # [END bigtable_api_list_clusters_in_project]
+
+ assert len(clusters_list) > 0
+
+
+def test_bigtable_list_app_profiles():
+ app_profile = Config.INSTANCE.app_profile(
+ app_profile_id="app-prof-" + UNIQUE_SUFFIX,
+ routing_policy_type=enums.RoutingPolicyType.ANY,
+ )
+ app_profile = app_profile.create(ignore_warnings=True)
+
+ # [START bigtable_api_list_app_profiles]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ app_profiles_list = instance.list_app_profiles()
+ # [END bigtable_api_list_app_profiles]
+
+ try:
+ assert len(app_profiles_list) > 0
+ finally:
+ retry_429_503(app_profile.delete)(ignore_warnings=True)
+
+
+def test_bigtable_instance_exists():
+ # [START bigtable_api_check_instance_exists]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance_exists = instance.exists()
+ # [END bigtable_api_check_instance_exists]
+
+ assert instance_exists
+
+
+def test_bigtable_cluster_exists():
+ # [START bigtable_api_check_cluster_exists]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+ cluster_exists = cluster.exists()
+ # [END bigtable_api_check_cluster_exists]
+
+ assert cluster_exists
+
+
+def test_bigtable_reload_instance():
+ # [START bigtable_api_reload_instance]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ # [END bigtable_api_reload_instance]
+
+ assert instance.type_ == PRODUCTION.value
+
+
+def test_bigtable_reload_cluster():
+ # [START bigtable_api_reload_cluster]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+ cluster.reload()
+ # [END bigtable_api_reload_cluster]
+
+ assert cluster.serve_nodes == SERVER_NODES
+
+
+def test_bigtable_update_instance():
+ # [START bigtable_api_update_instance]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ display_name = "My new instance"
+ instance.display_name = display_name
+ instance.update()
+ # [END bigtable_api_update_instance]
+
+ assert instance.display_name == display_name
+
+
+def test_bigtable_update_cluster():
+ # [START bigtable_api_update_cluster]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+ cluster.serve_nodes = 4
+ cluster.update()
+ # [END bigtable_api_update_cluster]
+
+ assert cluster.serve_nodes == 4
+
+
+def test_bigtable_cluster_disable_autoscaling():
+ # [START bigtable_api_cluster_disable_autoscaling]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ # Create a cluster with autoscaling enabled
+ cluster = instance.cluster(
+ CLUSTER_ID, min_serve_nodes=1, max_serve_nodes=2, cpu_utilization_percent=10
+ )
+ instance.create(clusters=[cluster])
+
+ # Disable autoscaling
+ cluster.disable_autoscaling(serve_nodes=4)
+ # [END bigtable_api_cluster_disable_autoscaling]
+
+ assert cluster.serve_nodes == 4
+
+
+def test_bigtable_create_table():
+ # [START bigtable_api_create_table]
+ from google.api_core import exceptions
+ from google.api_core import retry
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table("table_my")
+ # Define the GC policy to retain only the most recent 2 versions.
+ max_versions_rule = column_family.MaxVersionsGCRule(2)
+
+ # Could include other retriable exception types
+ # Could configure deadline, etc.
+ predicate_504 = retry.if_exception_type(exceptions.DeadlineExceeded)
+ retry_504 = retry.Retry(predicate_504)
+
+ retry_504(table.create)(column_families={"cf1": max_versions_rule})
+ # [END bigtable_api_create_table]
+
+ try:
+ assert table.exists()
+ finally:
+ retry_429_503(table.delete)()
+
+
+def test_bigtable_list_tables():
+ # [START bigtable_api_list_tables]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ tables_list = instance.list_tables()
+ # [END bigtable_api_list_tables]
+
+ # Check if returned list has expected table
+ table_names = [table.name for table in tables_list]
+ assert Config.TABLE.name in table_names
+
+
+def test_bigtable_delete_cluster():
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster_id = "clus-my-" + UNIQUE_SUFFIX
+ serve_nodes = 1
+ cluster = instance.cluster(
+ cluster_id,
+ location_id=ALT_LOCATION_ID,
+ serve_nodes=serve_nodes,
+ default_storage_type=STORAGE_TYPE,
+ )
+ operation = cluster.create()
+ # We want to make sure the operation completes.
+ operation.result(timeout=1000)
+
+ # [START bigtable_api_delete_cluster]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster_to_delete = instance.cluster(cluster_id)
+
+ cluster_to_delete.delete()
+ # [END bigtable_api_delete_cluster]
+
+ assert not cluster_to_delete.exists()
+
+
+def test_bigtable_delete_instance():
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+
+ instance_id = "snipt-inst-del" + UNIQUE_SUFFIX
+ instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS)
+ serve_nodes = 1
+ cluster = instance.cluster(
+ "clus-to-delete" + UNIQUE_SUFFIX,
+ location_id=ALT_LOCATION_ID,
+ serve_nodes=serve_nodes,
+ default_storage_type=STORAGE_TYPE,
+ )
+ operation = instance.create(clusters=[cluster])
+
+ # We want to make sure the operation completes.
+ operation.result(timeout=100)
+
+ # Make sure this instance gets deleted after the test case.
+ INSTANCES_TO_DELETE.append(instance)
+
+ # [START bigtable_api_delete_instance]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+
+ instance_to_delete = client.instance(instance_id)
+ instance_to_delete.delete()
+ # [END bigtable_api_delete_instance]
+
+ assert not instance_to_delete.exists()
+
+ # Skip deleting it during module teardown if the assertion succeeds.
+ INSTANCES_TO_DELETE.remove(instance)
+
+
+def test_bigtable_test_iam_permissions():
+ # [START bigtable_api_test_iam_permissions]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ permissions = ["bigtable.clusters.create", "bigtable.tables.create"]
+ permissions_allowed = instance.test_iam_permissions(permissions)
+ # [END bigtable_api_test_iam_permissions]
+
+ assert permissions_allowed == permissions
+
+
+def test_bigtable_set_iam_policy_then_get_iam_policy():
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_set_iam_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ new_policy = Policy()
+ new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = instance.set_iam_policy(new_policy)
+ # [END bigtable_api_set_iam_policy]
+
+ assert len(policy_latest.bigtable_admins) > 0
+
+ # [START bigtable_api_get_iam_policy]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ policy = instance.get_iam_policy()
+ # [END bigtable_api_get_iam_policy]
+
+ assert len(policy.bigtable_admins) > 0
+
+
+def test_bigtable_project_path():
+ import re
+
+ # [START bigtable_api_project_path]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ project_path = client.project_path
+ # [END bigtable_api_project_path]
+
+
+def test_bigtable_table_data_client():
+ # [START bigtable_api_table_data_client]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ table_data_client = client.table_data_client
+ # [END bigtable_api_table_data_client]
+
+
+def test_bigtable_table_admin_client():
+ # [START bigtable_api_table_admin_client]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ table_admin_client = client.table_admin_client
+ # [END bigtable_api_table_admin_client]
+
+
+def test_bigtable_instance_admin_client():
+ # [START bigtable_api_instance_admin_client]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance_admin_client = client.instance_admin_client
+ # [END bigtable_api_instance_admin_client]
+
+
+def test_bigtable_admins_policy():
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_admins_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ new_policy = Policy()
+ new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = instance.set_iam_policy(new_policy)
+ policy = policy_latest.bigtable_admins
+ # [END bigtable_api_admins_policy]
+
+ assert len(policy) > 0
+
+
+def test_bigtable_readers_policy():
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_readers_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ new_policy = Policy()
+ new_policy[BIGTABLE_READER_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = instance.set_iam_policy(new_policy)
+ policy = policy_latest.bigtable_readers
+ # [END bigtable_api_readers_policy]
+
+ assert len(policy) > 0
+
+
+def test_bigtable_users_policy():
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_users_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ new_policy = Policy()
+ new_policy[BIGTABLE_USER_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = instance.set_iam_policy(new_policy)
+ policy = policy_latest.bigtable_users
+ # [END bigtable_api_users_policy]
+
+ assert len(policy) > 0
+
+
+def test_bigtable_viewers_policy():
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_viewers_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance.reload()
+ new_policy = Policy()
+ new_policy[BIGTABLE_VIEWER_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = instance.set_iam_policy(new_policy)
+ policy = policy_latest.bigtable_viewers
+ # [END bigtable_api_viewers_policy]
+
+ assert len(policy) > 0
+
+
+def test_bigtable_instance_name():
+ import re
+
+ # [START bigtable_api_instance_name]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance_name = instance.name
+ # [END bigtable_api_instance_name]
+
+
+def test_bigtable_cluster_name():
+ import re
+
+ # [START bigtable_api_cluster_name]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+ cluster_name = cluster.name
+ # [END bigtable_api_cluster_name]
+
+
+def test_bigtable_instance_from_pb():
+ # [START bigtable_api_instance_from_pb]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ name = instance.name
+ instance_pb = data_v2_pb2.Instance(
+ name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS
+ )
+
+ instance2 = instance.from_pb(instance_pb, client)
+ # [END bigtable_api_instance_from_pb]
+
+ assert instance2.name == instance.name
+
+
+def test_bigtable_cluster_from_pb():
+ # [START bigtable_api_cluster_from_pb]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+
+ name = cluster.name
+ cluster_state = cluster.state
+ serve_nodes = 1
+ cluster_pb = data_v2_pb2.Cluster(
+ name=name,
+ location=LOCATION_ID,
+ state=cluster_state,
+ serve_nodes=serve_nodes,
+ default_storage_type=STORAGE_TYPE,
+ )
+
+ cluster2 = cluster.from_pb(cluster_pb, instance)
+ # [END bigtable_api_cluster_from_pb]
+
+ assert cluster2.name == cluster.name
+
+
+def test_bigtable_instance_state():
+ # [START bigtable_api_instance_state]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ instance_state = instance.state
+ # [END bigtable_api_instance_state]
+
+ assert not instance_state
+
+
+def test_bigtable_cluster_state():
+ # [START bigtable_api_cluster_state]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ cluster = instance.cluster(CLUSTER_ID)
+ cluster_state = cluster.state
+ # [END bigtable_api_cluster_state]
+
+ assert not cluster_state
+
+
+if __name__ == "__main__":
+ pytest.main()
diff --git a/packages/google-cloud-bigtable/docs/classic_client/snippets_table.py b/packages/google-cloud-bigtable/docs/classic_client/snippets_table.py
new file mode 100644
index 000000000000..893135275f6d
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/snippets_table.py
@@ -0,0 +1,1340 @@
+#!/usr/bin/env python
+
+# Copyright 2018, Google LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Testable usage examples for Google Cloud Bigtable API wrapper
+
+Each example function takes a ``client`` argument (which must be an instance
+of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task
+with the API.
+
+To facilitate running the examples as system tests, each example is also passed
+a ``to_delete`` list; the function adds to the list any objects created which
+need to be deleted during teardown.
+
+.. note::
+ This file is under progress and will be updated with more guidance from
+ the team. Unit tests will be added with guidance from the team.
+
+"""
+
+import datetime
+import pytest
+
+from google.api_core.exceptions import TooManyRequests
+from google.api_core.exceptions import ServiceUnavailable
+from test_utils.system import unique_resource_id
+from test_utils.retry import RetryErrors
+
+from google.cloud._helpers import UTC
+from google.cloud.bigtable import Client
+from google.cloud.bigtable import enums
+from google.cloud.bigtable import column_family
+
+
+INSTANCE_ID = "snippet" + unique_resource_id("-")
+CLUSTER_ID = "clus-1" + unique_resource_id("-")
+TABLE_ID = "tabl-1" + unique_resource_id("-")
+COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-")
+LOCATION_ID = "us-central1-f"
+ALT_LOCATION_ID = "us-central1-a"
+PRODUCTION = enums.Instance.Type.PRODUCTION
+SERVER_NODES = 3
+STORAGE_TYPE = enums.StorageType.SSD
+LABEL_KEY = "python-snippet"
+LABEL_STAMP = (
+ datetime.datetime.utcnow()
+ .replace(microsecond=0, tzinfo=UTC)
+ .strftime("%Y-%m-%dt%H-%M-%S")
+)
+LABELS = {LABEL_KEY: str(LABEL_STAMP)}
+COLUMN_FAMILY_ID = "col_fam_id1"
+COL_NAME1 = b"col-name1"
+CELL_VAL1 = b"cell-val"
+ROW_KEY1 = b"row_key_id1"
+COLUMN_FAMILY_ID2 = "col_fam_id2"
+COL_NAME2 = b"col-name2"
+CELL_VAL2 = b"cell-val2"
+ROW_KEY2 = b"row_key_id2"
+
+retry_429_503 = RetryErrors((ServiceUnavailable, TooManyRequests), max_tries=9)
+
+
+class Config(object):
+ """Run-time configuration to be modified at set-up.
+
+ This is a mutable stand-in to allow test set-up to modify
+ global state.
+ """
+
+ CLIENT = None
+ INSTANCE = None
+ TABLE = None
+
+
+def setup_module():
+ client = Config.CLIENT = Client(admin=True)
+ Config.INSTANCE = client.instance(
+ INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS
+ )
+ cluster = Config.INSTANCE.cluster(
+ CLUSTER_ID,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVER_NODES,
+ default_storage_type=STORAGE_TYPE,
+ )
+ operation = Config.INSTANCE.create(clusters=[cluster])
+ # We want to make sure the operation completes.
+ operation.result(timeout=100)
+ Config.TABLE = Config.INSTANCE.table(TABLE_ID)
+ Config.TABLE.create()
+ gc_rule = column_family.MaxVersionsGCRule(2)
+ column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, gc_rule=gc_rule)
+ column_family1.create()
+ gc_rule2 = column_family.MaxVersionsGCRule(4)
+ column_family2 = Config.TABLE.column_family(COLUMN_FAMILY_ID2, gc_rule=gc_rule2)
+ column_family2.create()
+
+
+def teardown_module():
+ retry_429_503(Config.INSTANCE.delete)()
+
+
+def test_bigtable_create_table():
+ # [START bigtable_api_create_table]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ # Create table without Column families.
+ table1 = instance.table("table_id1")
+ table1.create()
+
+ # Create table with Column families.
+ table2 = instance.table("table_id2")
+ # Define the GC policy to retain only the most recent 2 versions.
+ max_versions_rule = column_family.MaxVersionsGCRule(2)
+ table2.create(column_families={"cf1": max_versions_rule})
+
+ # [END bigtable_api_create_table]
+ assert table1.exists()
+ assert table2.exists()
+ table1.delete()
+ table2.delete()
+
+
+def test_bigtable_sample_row_keys():
+ table_sample = Config.INSTANCE.table("table_id1_samplerow")
+ initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"]
+ table_sample.create(initial_split_keys=initial_split_keys)
+ assert table_sample.exists()
+
+ # [START bigtable_api_sample_row_keys]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ table = instance.table("table_id1_samplerow")
+ data = table.sample_row_keys()
+ actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data])
+ # [END bigtable_api_sample_row_keys]
+ initial_split_keys.append(b"")
+ assert list(actual_keys) == initial_split_keys
+ table.delete()
+
+
+def test_bigtable_write_read_drop_truncate():
+ # [START bigtable_api_mutate_rows]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_keys = [
+ b"row_key_1",
+ b"row_key_2",
+ b"row_key_3",
+ b"row_key_4",
+ b"row_key_20",
+ b"row_key_22",
+ b"row_key_200",
+ ]
+ col_name = b"col-name1"
+ rows = []
+ for i, row_key in enumerate(row_keys):
+ value = "value_{}".format(i).encode()
+ row = table.row(row_key)
+ row.set_cell(
+ COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow()
+ )
+ rows.append(row)
+ response = table.mutate_rows(rows)
+ # validate that all rows written successfully
+ for i, status in enumerate(response):
+ if status.code != 0:
+ print("Row number {} failed to write".format(i))
+ # [END bigtable_api_mutate_rows]
+ assert len(response) == len(rows)
+ # [START bigtable_api_read_row]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key = "row_key_1"
+ row = table.read_row(row_key)
+ # [END bigtable_api_read_row]
+ assert row.row_key.decode("utf-8") == row_key
+ # [START bigtable_api_read_rows]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ # Read full table
+ partial_rows = table.read_rows()
+
+ # Read row's value
+ total_rows = []
+ for row in partial_rows:
+ cell = row.cells[COLUMN_FAMILY_ID][col_name][0]
+ print(cell.value.decode("utf-8"))
+ total_rows.append(cell)
+ # [END bigtable_api_read_rows]
+ assert len(total_rows) == len(rows)
+ # [START bigtable_api_drop_by_prefix]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key_prefix = b"row_key_2"
+ table.drop_by_prefix(row_key_prefix, timeout=200)
+ # [END bigtable_api_drop_by_prefix]
+ dropped_row_keys = [b"row_key_2", b"row_key_20", b"row_key_22", b"row_key_200"]
+ for row in table.read_rows():
+ assert row.row_key.decode("utf-8") not in dropped_row_keys
+
+ # [START bigtable_api_truncate_table]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ table.truncate(timeout=200)
+ # [END bigtable_api_truncate_table]
+ rows_data_after_truncate = []
+ for row in table.read_rows():
+ rows_data_after_truncate.append(row.row_key)
+ assert rows_data_after_truncate == []
+
+
+def test_bigtable_mutations_batcher():
+ # [START bigtable_api_mutations_batcher]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ batcher = table.mutations_batcher()
+ # [END bigtable_api_mutations_batcher]
+
+ # Below code will be used while creating batcher.py snippets.
+ # So not removing this code as of now.
+ row_keys = [
+ b"row_key_1",
+ b"row_key_2",
+ b"row_key_3",
+ b"row_key_4",
+ b"row_key_20",
+ b"row_key_22",
+ b"row_key_200",
+ ]
+ column_name = "column_name".encode()
+ # Add a single row
+ row_key = row_keys[0]
+ row = table.row(row_key)
+ row.set_cell(
+ COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow()
+ )
+ batcher.mutate(row)
+ # Add a collections of rows
+ rows = []
+ for i in range(1, len(row_keys)):
+ row = table.row(row_keys[i])
+ value = "value_{}".format(i).encode()
+ row.set_cell(
+ COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow()
+ )
+ rows.append(row)
+ batcher.mutate_rows(rows)
+ # batcher will flush current batch if it
+ # reaches the max flush_count
+
+ # Manually send the current batch to Cloud Bigtable
+ batcher.flush()
+ rows_on_table = []
+ for row in table.read_rows():
+ rows_on_table.append(row.row_key)
+ assert len(rows_on_table) == len(row_keys)
+ table.truncate(timeout=200)
+
+
+def test_bigtable_table_column_family():
+ # [START bigtable_api_table_column_family]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ table = instance.table(TABLE_ID)
+ column_family_obj = table.column_family(COLUMN_FAMILY_ID)
+ # [END bigtable_api_table_column_family]
+
+ assert column_family_obj.column_family_id == COLUMN_FAMILY_ID
+
+
+def test_bigtable_list_tables():
+ # [START bigtable_api_list_tables]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ tables_list = instance.list_tables()
+ # [END bigtable_api_list_tables]
+ assert len(tables_list) != 0
+
+
+def test_bigtable_table_name():
+ import re
+
+ # [START bigtable_api_table_name]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ table = instance.table(TABLE_ID)
+ table_name = table.name
+ # [END bigtable_api_table_name]
+ _table_name_re = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[^/]+)/tables/"
+ r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+ )
+ assert _table_name_re.match(table_name)
+
+
+def test_bigtable_list_column_families():
+ # [START bigtable_api_list_column_families]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ column_family_list = table.list_column_families()
+ # [END bigtable_api_list_column_families]
+
+ assert len(column_family_list) > 0
+
+
+def test_bigtable_get_cluster_states():
+ # [START bigtable_api_get_cluster_states]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ get_cluster_states = table.get_cluster_states()
+ # [END bigtable_api_get_cluster_states]
+
+ assert CLUSTER_ID in get_cluster_states
+
+
+def test_bigtable_table_test_iam_permissions():
+ table_policy = Config.INSTANCE.table("table_id_iam_policy")
+ table_policy.create()
+ assert table_policy.exists
+
+ # [START bigtable_api_table_test_iam_permissions]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table("table_id_iam_policy")
+
+ permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
+ permissions_allowed = table.test_iam_permissions(permissions)
+ # [END bigtable_api_table_test_iam_permissions]
+ assert permissions_allowed == permissions
+
+
+def test_bigtable_table_set_iam_policy_then_get_iam_policy():
+ table_policy = Config.INSTANCE.table("table_id_iam_policy")
+ assert table_policy.exists
+ service_account_email = Config.CLIENT._credentials.service_account_email
+
+ # [START bigtable_api_table_set_iam_policy]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table("table_id_iam_policy")
+ new_policy = Policy()
+ new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]
+
+ policy_latest = table.set_iam_policy(new_policy)
+ # [END bigtable_api_table_set_iam_policy]
+ assert len(policy_latest.bigtable_admins) > 0
+
+ # [START bigtable_api_table_get_iam_policy]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table("table_id_iam_policy")
+ policy = table.get_iam_policy()
+ # [END bigtable_api_table_get_iam_policy]
+ assert len(policy.bigtable_admins) > 0
+
+
+def test_bigtable_table_exists():
+ # [START bigtable_api_check_table_exists]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ table_exists = table.exists()
+ # [END bigtable_api_check_table_exists]
+ assert table_exists
+
+
+def test_bigtable_delete_table():
+ table_del = Config.INSTANCE.table("table_id_del")
+ table_del.create()
+ assert table_del.exists()
+
+ # [START bigtable_api_delete_table]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table("table_id_del")
+
+ table.delete()
+ # [END bigtable_api_delete_table]
+ assert not table.exists()
+
+
+def test_bigtable_table_row():
+ # [START bigtable_api_table_row]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_keys = [b"row_key_1", b"row_key_2"]
+ row1_obj = table.row(row_keys[0])
+ row2_obj = table.row(row_keys[1])
+ # [END bigtable_api_table_row]
+
+ row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row1_obj.commit()
+ row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row2_obj.commit()
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+
+ assert written_row_keys == row_keys
+
+ table.truncate(timeout=300)
+
+
+def test_bigtable_table_append_row():
+ # [START bigtable_api_table_append_row]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_keys = [b"row_key_1", b"row_key_2"]
+ row1_obj = table.append_row(row_keys[0])
+ row2_obj = table.append_row(row_keys[1])
+ # [END bigtable_api_table_append_row]
+
+ row1_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row1_obj.commit()
+ row2_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row2_obj.commit()
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+
+ assert written_row_keys == row_keys
+
+ table.truncate(timeout=300)
+
+
+def test_bigtable_table_direct_row():
+ # [START bigtable_api_table_direct_row]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_keys = [b"row_key_1", b"row_key_2"]
+ row1_obj = table.direct_row(row_keys[0])
+ row2_obj = table.direct_row(row_keys[1])
+ # [END bigtable_api_table_direct_row]
+
+ row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row1_obj.commit()
+ row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row2_obj.commit()
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+
+ assert written_row_keys == row_keys
+
+ table.truncate(timeout=300)
+
+
+def test_bigtable_table_conditional_row():
+ # [START bigtable_api_table_conditional_row]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.row_filters import PassAllFilter
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_keys = [b"row_key_1", b"row_key_2"]
+ filter_ = PassAllFilter(True)
+ row1_obj = table.conditional_row(row_keys[0], filter_=filter_)
+ row2_obj = table.conditional_row(row_keys[1], filter_=filter_)
+ # [END bigtable_api_table_conditional_row]
+
+ row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False)
+ row1_obj.commit()
+ row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False)
+ row2_obj.commit()
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+
+ assert written_row_keys == row_keys
+
+ table.truncate(timeout=300)
+
+
+def test_bigtable_column_family_name():
+ # [START bigtable_api_column_family_name]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ column_families = table.list_column_families()
+ column_family_obj = column_families[COLUMN_FAMILY_ID]
+ column_family_name = column_family_obj.name
+ # [END bigtable_api_column_family_name]
+ import re
+
+ _cf_name_re = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[^/]+)/tables/"
+ r"(?P
[^/]+)/columnFamilies/"
+ r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+ )
+ assert _cf_name_re.match(column_family_name)
+
+
+def test_bigtable_create_update_delete_column_family():
+ # [START bigtable_api_create_column_family]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ column_family_id = "column_family_id1"
+ gc_rule = column_family.MaxVersionsGCRule(2)
+ column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_column_family]
+ column_families = table.list_column_families()
+ assert column_families[column_family_id].gc_rule == gc_rule
+
+ # [START bigtable_api_update_column_family]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ # Already existing column family id
+ column_family_id = "column_family_id1"
+ # Define the GC rule to retain data with max age of 5 days
+ max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
+ column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule)
+ column_family_obj.update()
+ # [END bigtable_api_update_column_family]
+
+ updated_families = table.list_column_families()
+ assert updated_families[column_family_id].gc_rule == max_age_rule
+
+ # [START bigtable_api_delete_column_family]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ column_family_id = "column_family_id1"
+ column_family_obj = table.column_family(column_family_id)
+ column_family_obj.delete()
+ # [END bigtable_api_delete_column_family]
+ column_families = table.list_column_families()
+ assert column_family_id not in column_families
+
+
+def test_bigtable_add_row_add_row_range_add_row_range_from_keys():
+ row_keys = [
+ b"row_key_1",
+ b"row_key_2",
+ b"row_key_3",
+ b"row_key_4",
+ b"row_key_5",
+ b"row_key_6",
+ b"row_key_7",
+ b"row_key_8",
+ b"row_key_9",
+ ]
+
+ rows = []
+ for row_key in row_keys:
+ row = Config.TABLE.row(row_key)
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ rows.append(row)
+ Config.TABLE.mutate_rows(rows)
+
+ # [START bigtable_api_add_row_key]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.row_set import RowSet
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_set = RowSet()
+ row_set.add_row_key(b"row_key_5")
+ # [END bigtable_api_add_row_key]
+
+ read_rows = table.read_rows(row_set=row_set)
+ expected_row_keys = [b"row_key_5"]
+ found_row_keys = [row.row_key for row in read_rows]
+ assert found_row_keys == expected_row_keys
+
+ # [START bigtable_api_add_row_range]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.row_set import RowSet
+ from google.cloud.bigtable.row_set import RowRange
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_set = RowSet()
+ row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7"))
+ # [END bigtable_api_add_row_range]
+
+ read_rows = table.read_rows(row_set=row_set)
+ expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"]
+ found_row_keys = [row.row_key for row in read_rows]
+ assert found_row_keys == expected_row_keys
+
+ # [START bigtable_api_row_range_from_keys]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.row_set import RowSet
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_set = RowSet()
+ row_set.add_row_range_from_keys(start_key=b"row_key_3", end_key=b"row_key_7")
+ # [END bigtable_api_row_range_from_keys]
+
+ read_rows = table.read_rows(row_set=row_set)
+ expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"]
+ found_row_keys = [row.row_key for row in read_rows]
+ assert found_row_keys == expected_row_keys
+ table.truncate(timeout=200)
+
+
+def test_bigtable_add_row_range_with_prefix():
+ row_keys = [
+ b"row_key_1",
+ b"row_key_2",
+ b"row_key_3",
+ b"sample_row_key_1",
+ b"sample_row_key_2",
+ ]
+
+ rows = []
+ for row_key in row_keys:
+ row = Config.TABLE.row(row_key)
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ rows.append(row)
+ Config.TABLE.mutate_rows(rows)
+
+ # [START bigtable_api_add_row_range_with_prefix]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable.row_set import RowSet
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_set = RowSet()
+ row_set.add_row_range_with_prefix("row")
+ # [END bigtable_api_add_row_range_with_prefix]
+
+ read_rows = table.read_rows(row_set=row_set)
+ expected_row_keys = [
+ b"row_key_1",
+ b"row_key_2",
+ b"row_key_3",
+ ]
+ found_row_keys = [row.row_key for row in read_rows]
+ assert found_row_keys == expected_row_keys
+ table.truncate(timeout=200)
+
+
+def test_bigtable_batcher_mutate_flush_mutate_rows():
+ # [START bigtable_api_batcher_mutate]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ # Batcher for max row bytes, max_row_bytes=1024 is optional.
+ batcher = table.mutations_batcher(max_row_bytes=1024)
+
+ # Add a single row
+ row_key = b"row_key_1"
+ row = table.row(row_key)
+ row.set_cell(
+ COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow()
+ )
+
+ # In batcher, mutate will flush current batch if it
+ # reaches the max_row_bytes
+ batcher.mutate(row)
+ batcher.flush()
+ # [END bigtable_api_batcher_mutate]
+
+ # [START bigtable_api_batcher_flush]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ # Batcher for max row bytes, max_row_bytes=1024 is optional.
+ batcher = table.mutations_batcher(max_row_bytes=1024)
+
+ # Add a single row
+ row_key = b"row_key"
+ row = table.row(row_key)
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, "value-0")
+
+ # In batcher, mutate will flush current batch if it
+ # reaches the max_row_bytes
+ batcher.mutate(row)
+ batcher.flush()
+ # [END bigtable_api_batcher_flush]
+
+ rows_on_table = []
+ for row in table.read_rows():
+ rows_on_table.append(row.row_key)
+ assert len(rows_on_table) == 2
+ table.truncate(timeout=200)
+
+ # [START bigtable_api_batcher_mutate_rows]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ batcher = table.mutations_batcher()
+
+ row1 = table.row(b"row_key_1")
+ row2 = table.row(b"row_key_2")
+ row3 = table.row(b"row_key_3")
+ row4 = table.row(b"row_key_4")
+
+ row1.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val1")
+ row2.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val2")
+ row3.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val3")
+ row4.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val4")
+
+ batcher.mutate_rows([row1, row2, row3, row4])
+
+ # batcher will flush current batch if it
+ # reaches the max flush_count
+ # Manually send the current batch to Cloud Bigtable
+ batcher.flush()
+ # [END bigtable_api_batcher_mutate_rows]
+
+ rows_on_table = []
+ for row in table.read_rows():
+ rows_on_table.append(row.row_key)
+ assert len(rows_on_table) == 4
+ table.truncate(timeout=200)
+
+
+def test_bigtable_create_family_gc_max_age():
+ # [START bigtable_api_create_family_gc_max_age]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ # Define the GC rule to retain data with max age of 5 days
+ max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
+
+ column_family_obj = table.column_family("cf1", max_age_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_family_gc_max_age]
+ rule = str(column_family_obj.to_pb())
+ assert "max_age" in rule
+ assert "seconds: 432000" in rule
+ column_family_obj.delete()
+
+
+def test_bigtable_create_family_gc_max_versions():
+ # [START bigtable_api_create_family_gc_max_versions]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ # Define the GC policy to retain only the most recent 2 versions
+ max_versions_rule = column_family.MaxVersionsGCRule(2)
+
+ column_family_obj = table.column_family("cf2", max_versions_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_family_gc_max_versions]
+ rule = str(column_family_obj.to_pb())
+ assert "max_num_versions: 2" in rule
+ column_family_obj.delete()
+
+
+def test_bigtable_create_family_gc_union():
+ # [START bigtable_api_create_family_gc_union]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ max_versions_rule = column_family.MaxVersionsGCRule(2)
+ max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
+
+ union_rule = column_family.GCRuleUnion([max_versions_rule, max_age_rule])
+
+ column_family_obj = table.column_family("cf3", union_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_family_gc_union]
+ rule = str(column_family_obj.to_pb())
+ assert "union" in rule
+ assert "max_age" in rule
+ assert "seconds: 432000" in rule
+ assert "max_num_versions: 2" in rule
+ column_family_obj.delete()
+
+
+def test_bigtable_create_family_gc_intersection():
+ # [START bigtable_api_create_family_gc_intersection]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ max_versions_rule = column_family.MaxVersionsGCRule(2)
+ max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
+
+ intersection_rule = column_family.GCRuleIntersection(
+ [max_versions_rule, max_age_rule]
+ )
+
+ column_family_obj = table.column_family("cf4", intersection_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_family_gc_intersection]
+
+ rule = str(column_family_obj.to_pb())
+ assert "intersection" in rule
+ assert "max_num_versions: 2" in rule
+ assert "max_age" in rule
+ assert "seconds: 432000" in rule
+ column_family_obj.delete()
+
+
+def test_bigtable_create_family_gc_nested():
+ # [START bigtable_api_create_family_gc_nested]
+ from google.cloud.bigtable import Client
+ from google.cloud.bigtable import column_family
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ # Create a column family with nested GC policies.
+ # Create a nested GC rule:
+ # Drop cells that are either older than the 10 recent versions
+ # OR
+ # Drop cells that are older than a month AND older than the
+ # 2 recent versions
+ rule1 = column_family.MaxVersionsGCRule(10)
+ rule2 = column_family.GCRuleIntersection(
+ [
+ column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
+ column_family.MaxVersionsGCRule(2),
+ ]
+ )
+
+ nested_rule = column_family.GCRuleUnion([rule1, rule2])
+
+ column_family_obj = table.column_family("cf5", nested_rule)
+ column_family_obj.create()
+
+ # [END bigtable_api_create_family_gc_nested]
+
+ rule = str(column_family_obj.to_pb())
+ assert "intersection" in rule
+ assert "max_num_versions: 2" in rule
+ assert "max_age" in rule
+ assert "seconds: 432000" in rule
+ column_family_obj.delete()
+
+
+def test_bigtable_row_data_cells_cell_value_cell_values():
+ value = b"value_in_col1"
+ row = Config.TABLE.row(b"row_key_1")
+ row.set_cell(
+ COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
+ )
+ row.commit()
+
+ row.set_cell(
+ COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
+ )
+ row.commit()
+
+ # [START bigtable_api_row_data_cells]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key = "row_key_1"
+ row_data = table.read_row(row_key)
+
+ cells = row_data.cells
+ # [END bigtable_api_row_data_cells]
+
+ actual_cell_value = cells[COLUMN_FAMILY_ID][COL_NAME1][0].value
+ assert actual_cell_value == value
+
+ # [START bigtable_api_row_cell_value]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key = "row_key_1"
+ row_data = table.read_row(row_key)
+
+ cell_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)
+ # [END bigtable_api_row_cell_value]
+ assert cell_value == value
+
+ # [START bigtable_api_row_cell_values]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key = "row_key_1"
+ row_data = table.read_row(row_key)
+
+ cell_values = row_data.cell_values(COLUMN_FAMILY_ID, COL_NAME1)
+ # [END bigtable_api_row_cell_values]
+
+ for actual_value, timestamp in cell_values:
+ assert actual_value == value
+
+ value2 = b"value_in_col2"
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME2, value2)
+ row.commit()
+
+ # [START bigtable_api_row_find_cells]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row_key = "row_key_1"
+ row = table.read_row(row_key)
+
+ cells = row.find_cells(COLUMN_FAMILY_ID, COL_NAME2)
+ # [END bigtable_api_row_find_cells]
+
+ assert cells[0].value == value2
+ table.truncate(timeout=200)
+
+
+def test_bigtable_row_setcell_rowkey():
+ # [START bigtable_api_row_set_cell]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row = table.row(ROW_KEY1)
+
+ cell_val = b"cell-val"
+ row.set_cell(
+ COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow()
+ )
+ # [END bigtable_api_row_set_cell]
+
+ response = table.mutate_rows([row])
+ # validate that all rows written successfully
+ for i, status in enumerate(response):
+ assert status.code == 0
+
+ # [START bigtable_api_row_row_key]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row = table.row(ROW_KEY1)
+ row_key = row.row_key
+ # [END bigtable_api_row_row_key]
+ assert row_key == ROW_KEY1
+
+ # [START bigtable_api_row_table]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row = table.row(ROW_KEY1)
+ table1 = row.table
+ # [END bigtable_api_row_table]
+
+ assert table1 == table
+ table.truncate(timeout=200)
+
+
+def test_bigtable_row_delete():
+ table_row_del = Config.INSTANCE.table(TABLE_ID)
+ row_obj = table_row_del.row(b"row_key_1")
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
+ row_obj.commit()
+ written_row_keys = []
+ for row in table_row_del.read_rows():
+ written_row_keys.append(row.row_key)
+ assert written_row_keys == [b"row_key_1"]
+
+ # [START bigtable_api_row_delete]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_1"
+ row_obj = table.row(row_key)
+
+ row_obj.delete()
+ row_obj.commit()
+ # [END bigtable_api_row_delete]
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+ assert len(written_row_keys) == 0
+
+
+def test_bigtable_row_delete_cell():
+ table_row_del_cell = Config.INSTANCE.table(TABLE_ID)
+ row_key1 = b"row_key_1"
+ row_obj = table_row_del_cell.row(row_key1)
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row_obj.commit()
+
+ written_row_keys = []
+ for row in table_row_del_cell.read_rows():
+ written_row_keys.append(row.row_key)
+ assert written_row_keys == [row_key1]
+
+ # [START bigtable_api_row_delete_cell]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_1"
+ row_obj = table.row(row_key)
+
+ row_obj.delete_cell(COLUMN_FAMILY_ID, COL_NAME1)
+ row_obj.commit()
+ # [END bigtable_api_row_delete_cell]
+
+ for row in table.read_rows():
+ assert not row.row_key
+
+
+def test_bigtable_row_delete_cells():
+ table_row_del_cells = Config.INSTANCE.table(TABLE_ID)
+ row_key1 = b"row_key_1"
+ row_obj = table_row_del_cells.row(row_key1)
+
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
+ row_obj.commit()
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2)
+ row_obj.commit()
+
+ written_row_keys = []
+ for row in table_row_del_cells.read_rows():
+ written_row_keys.append(row.row_key)
+ assert written_row_keys == [row_key1]
+
+ # [START bigtable_api_row_delete_cells]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_1"
+ row_obj = table.row(row_key)
+
+ row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2])
+ row_obj.commit()
+ # [END bigtable_api_row_delete_cells]
+
+ for row in table.read_rows():
+ assert not row.row_key
+
+
+def test_bigtable_row_clear():
+ table_row_clear = Config.INSTANCE.table(TABLE_ID)
+ row_obj = table_row_clear.row(b"row_key_1")
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
+
+ mutation_size = row_obj.get_mutations_size()
+ assert mutation_size > 0
+
+ # [START bigtable_api_row_clear]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_1"
+ row_obj = table.row(row_key)
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
+
+ row_obj.clear()
+ # [END bigtable_api_row_clear]
+
+ mutation_size = row_obj.get_mutations_size()
+ assert mutation_size == 0
+
+
+def test_bigtable_row_clear_get_mutations_size():
+ # [START bigtable_api_row_get_mutations_size]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key_id = b"row_key_1"
+ row_obj = table.row(row_key_id)
+
+ mutation_size = row_obj.get_mutations_size()
+ # [END bigtable_api_row_get_mutations_size]
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
+ mutation_size = row_obj.get_mutations_size()
+ assert mutation_size > 0
+
+ row_obj.clear()
+ mutation_size = row_obj.get_mutations_size()
+ assert mutation_size == 0
+
+
+def test_bigtable_row_setcell_commit_rowkey():
+ # [START bigtable_api_row_set_cell]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_1"
+ cell_val = b"cell-val"
+ row_obj = table.row(row_key)
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
+ # [END bigtable_api_row_set_cell]
+ row_obj.commit()
+
+ # [START bigtable_api_row_commit]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key = b"row_key_2"
+ cell_val = b"cell-val"
+ row_obj = table.row(row_key)
+ row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
+ row_obj.commit()
+ # [END bigtable_api_row_commit]
+
+ written_row_keys = []
+ for row in table.read_rows():
+ written_row_keys.append(row.row_key)
+
+ assert written_row_keys == [b"row_key_1", b"row_key_2"]
+
+ # [START bigtable_api_row_row_key]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+
+ row_key_id = b"row_key_2"
+ row_obj = table.row(row_key_id)
+ row_key = row_obj.row_key
+ # [END bigtable_api_row_row_key]
+ assert row_key == row_key_id
+ table.truncate(timeout=300)
+
+
+def test_bigtable_row_append_cell_value():
+ row = Config.TABLE.row(ROW_KEY1)
+
+ cell_val1 = b"1"
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val1)
+ row.commit()
+
+ # [START bigtable_api_row_append_cell_value]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row = table.row(ROW_KEY1, append=True)
+
+ cell_val2 = b"2"
+ row.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, cell_val2)
+ # [END bigtable_api_row_append_cell_value]
+ row.commit()
+
+ row_data = table.read_row(ROW_KEY1)
+ actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)
+ assert actual_value == cell_val1 + cell_val2
+
+ # [START bigtable_api_row_commit]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row = Config.TABLE.row(ROW_KEY2)
+ cell_val = 1
+ row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
+ row.commit()
+ # [END bigtable_api_row_commit]
+
+ # [START bigtable_api_row_increment_cell_value]
+ from google.cloud.bigtable import Client
+
+ client = Client(admin=True)
+ instance = client.instance(INSTANCE_ID)
+ table = instance.table(TABLE_ID)
+ row = table.row(ROW_KEY2, append=True)
+
+ int_val = 3
+ row.increment_cell_value(COLUMN_FAMILY_ID, COL_NAME1, int_val)
+ # [END bigtable_api_row_increment_cell_value]
+ row.commit()
+
+ row_data = table.read_row(ROW_KEY2)
+ actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)
+
+ import struct
+
+ _PACK_I64 = struct.Struct(">q").pack
+ assert actual_value == _PACK_I64(cell_val + int_val)
+ table.truncate(timeout=200)
+
+
+if __name__ == "__main__":
+ pytest.main()
diff --git a/packages/google-cloud-bigtable/docs/classic_client/table-api.rst b/packages/google-cloud-bigtable/docs/classic_client/table-api.rst
new file mode 100644
index 000000000000..1bbf851462bb
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/table-api.rst
@@ -0,0 +1,153 @@
+Table Admin API
+===============
+
+After creating an :class:`Instance `, you can
+interact with individual tables, groups of tables or column families within
+a table.
+
+List Tables
+-----------
+
+If you want a comprehensive list of all existing tables in a instance, make a
+`ListTables`_ API request with
+:meth:`Instance.list_tables() `:
+
+.. code:: python
+
+ >>> instance.list_tables()
+ [,
+ ]
+
+Table Factory
+-------------
+
+To create a :class:`Table ` object:
+
+.. code:: python
+
+ table = instance.table(table_id)
+
+Even if this :class:`Table ` already
+has been created with the API, you'll want this object to use as a
+parent of a :class:`ColumnFamily `
+or :class:`Row `.
+
+Create a new Table
+------------------
+
+After creating the table object, make a `CreateTable`_ API request
+with :meth:`create() `:
+
+.. code:: python
+
+ table.create()
+
+If you would like to initially split the table into several tablets (tablets are
+similar to HBase regions):
+
+.. code:: python
+
+ table.create(initial_split_keys=['s1', 's2'])
+
+Delete an existing Table
+------------------------
+
+Make a `DeleteTable`_ API request with
+:meth:`delete() `:
+
+.. code:: python
+
+ table.delete()
+
+List Column Families in a Table
+-------------------------------
+
+Though there is no **official** method for retrieving `column families`_
+associated with a table, the `GetTable`_ API method returns a
+table object with the names of the column families.
+
+To retrieve the list of column families use
+:meth:`list_column_families() `:
+
+.. code:: python
+
+ column_families = table.list_column_families()
+
+Column Family Factory
+---------------------
+
+To create a
+:class:`ColumnFamily ` object:
+
+.. code:: python
+
+ column_family = table.column_family(column_family_id)
+
+There is no real reason to use this factory unless you intend to
+create or delete a column family.
+
+In addition, you can specify an optional ``gc_rule`` (a
+:class:`GarbageCollectionRule `
+or similar):
+
+.. code:: python
+
+ column_family = table.column_family(column_family_id,
+ gc_rule=gc_rule)
+
+This rule helps the backend determine when and how to clean up old cells
+in the column family.
+
+See :doc:`column-family` for more information about
+:class:`GarbageCollectionRule `
+and related classes.
+
+Create a new Column Family
+--------------------------
+
+After creating the column family object, make a `CreateColumnFamily`_ API
+request with
+:meth:`ColumnFamily.create() `
+
+.. code:: python
+
+ column_family.create()
+
+Delete an existing Column Family
+--------------------------------
+
+Make a `DeleteColumnFamily`_ API request with
+:meth:`ColumnFamily.delete() `
+
+.. code:: python
+
+ column_family.delete()
+
+Update an existing Column Family
+--------------------------------
+
+Make an `UpdateColumnFamily`_ API request with
+:meth:`ColumnFamily.delete() `
+
+.. code:: python
+
+ column_family.update()
+
+Next Step
+---------
+
+Now we go down the final step of the hierarchy from
+:class:`Table ` to
+:class:`Row ` as well as streaming
+data directly via a :class:`Table `.
+
+Head next to learn about the :doc:`data-api`.
+
+.. _ListTables: https://googleapis.dev/python/bigtable/latest/table-api.html#list-tables
+.. _CreateTable: https://googleapis.dev/python/bigtable/latest/table-api.html#create-a-new-table
+.. _DeleteTable: https://googleapis.dev/python/bigtable/latest/table-api.html#delete-an-existing-table
+.. _GetTable: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L97-L102
+.. _CreateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#create-a-new-column-family
+.. _UpdateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#update-an-existing-column-family
+.. _DeleteColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#delete-an-existing-column-family
+.. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers
diff --git a/packages/google-cloud-bigtable/docs/classic_client/table.rst b/packages/google-cloud-bigtable/docs/classic_client/table.rst
new file mode 100644
index 000000000000..c230725d1351
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/table.rst
@@ -0,0 +1,6 @@
+Table
+~~~~~
+
+.. automodule:: google.cloud.bigtable.table
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/classic_client/usage.rst b/packages/google-cloud-bigtable/docs/classic_client/usage.rst
new file mode 100644
index 000000000000..7a47f4d4a418
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/classic_client/usage.rst
@@ -0,0 +1,38 @@
+Classic Client
+==============
+
+.. toctree::
+ :maxdepth: 2
+
+ client-intro
+
+ instance-api
+ table-api
+ data-api
+
+ client
+ cluster
+ instance
+ table
+ app-profile
+ backup
+ column-family
+ encryption-info
+ row
+ row-data
+ row-filters
+ row-set
+ batcher
+
+
+In the hierarchy of API concepts
+
+* a :class:`Client ` owns an
+ :class:`Instance `
+* an :class:`Instance ` owns a
+ :class:`Table `
+* a :class:`Table ` owns a
+ :class:`ColumnFamily `
+* a :class:`Table ` owns a
+ :class:`Row `
+ (and all the cells in the row)
diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py
new file mode 100644
index 000000000000..d8f0352cdd1c
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/conf.py
@@ -0,0 +1,384 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# google-cloud-bigtable documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath(".."))
+
+# For plugins that can not read conf.py.
+# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
+sys.path.insert(0, os.path.abspath("."))
+
+__version__ = ""
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = "1.5.5"
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.coverage",
+ "sphinx.ext.doctest",
+ "sphinx.ext.napoleon",
+ "sphinx.ext.todo",
+ "sphinx.ext.viewcode",
+ "recommonmark",
+]
+
+# autodoc/autosummary flags
+autoclass_content = "both"
+autodoc_default_options = {"members": True}
+autosummary_generate = True
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = [".rst", ".md"]
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The root toctree document.
+root_doc = "index"
+
+# General information about the project.
+project = "google-cloud-bigtable"
+copyright = "2019, Google"
+author = "Google APIs"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = __version__
+# The short X.Y version.
+version = ".".join(release.split(".")[0:2])
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = [
+ "_build",
+ "**/.nox/**/*",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/snippets/README.rst",
+]
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "alabaster"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ "description": "Google Cloud Client Libraries for google-cloud-bigtable",
+ "github_user": "googleapis",
+ "github_repo": "python-bigtable",
+ "github_banner": True,
+ "font_family": "'Roboto', Georgia, sans",
+ "head_font_family": "'Roboto', Georgia, serif",
+ "code_font_family": "'Roboto Mono', 'Consolas', monospace",
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = "google-cloud-bigtable-doc"
+
+# -- Options for warnings ------------------------------------------------------
+
+
+suppress_warnings = [
+ # Temporarily suppress this to avoid "more than one target found for
+ # cross-reference" warning, which are intractable for us to avoid while in
+ # a mono-repo.
+ # See https://github.com/sphinx-doc/sphinx/blob
+ # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
+ "ref.python"
+]
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
+ # Latex figure (float) alignment
+ #'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (
+ root_doc,
+ "google-cloud-bigtable.tex",
+ "google-cloud-bigtable Documentation",
+ author,
+ "manual",
+ )
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (
+ root_doc,
+ "google-cloud-bigtable",
+ "google-cloud-bigtable Documentation",
+ [author],
+ 1,
+ )
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ root_doc,
+ "google-cloud-bigtable",
+ "google-cloud-bigtable Documentation",
+ author,
+ "google-cloud-bigtable",
+ "google-cloud-bigtable Library",
+ "APIs",
+ )
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
+ "google.api_core": (
+ "https://googleapis.dev/python/google-api-core/latest/",
+ None,
+ ),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
+ "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
+}
+
+
+# Napoleon settings
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True
+napoleon_use_admonition_for_examples = False
+napoleon_use_admonition_for_notes = False
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst
new file mode 100644
index 000000000000..7d731297049f
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst
@@ -0,0 +1,11 @@
+Authorized View Async
+~~~~~~~~~~~~~~~~~~~~~
+
+ .. note::
+
+ It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's
+ performance benefits, the codebase should be designed to be async from the ground up.
+
+.. autoclass:: google.cloud.bigtable.data._async.client.AuthorizedViewAsync
+ :members:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_client.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_client.rst
new file mode 100644
index 000000000000..2ddcc090cbda
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/async_data_client.rst
@@ -0,0 +1,12 @@
+Bigtable Data Client Async
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ .. note::
+
+ It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's
+ performance benefits, the codebase should be designed to be async from the ground up.
+
+
+.. autoclass:: google.cloud.bigtable.data.BigtableDataClientAsync
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_execute_query_iterator.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_execute_query_iterator.rst
new file mode 100644
index 000000000000..b911fab7fc6f
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/async_data_execute_query_iterator.rst
@@ -0,0 +1,6 @@
+Execute Query Iterator Async
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIteratorAsync
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_mutations_batcher.rst
new file mode 100644
index 000000000000..3e81f885a338
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/async_data_mutations_batcher.rst
@@ -0,0 +1,6 @@
+Mutations Batcher Async
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data._async.mutations_batcher
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst
new file mode 100644
index 000000000000..37c396570fba
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst
@@ -0,0 +1,11 @@
+Table Async
+~~~~~~~~~~~
+
+ .. note::
+
+ It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's
+ performance benefits, the codebase should be designed to be async from the ground up.
+
+.. autoclass:: google.cloud.bigtable.data._async.client.TableAsync
+ :members:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_exceptions.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_exceptions.rst
new file mode 100644
index 000000000000..6180ef222f37
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_exceptions.rst
@@ -0,0 +1,6 @@
+Custom Exceptions
+~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.exceptions
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_metadata.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_metadata.rst
new file mode 100644
index 000000000000..69add630de3f
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_metadata.rst
@@ -0,0 +1,6 @@
+Execute Query Metadata
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.execute_query.metadata
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_values.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_values.rst
new file mode 100644
index 000000000000..6c4fb71c1337
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_values.rst
@@ -0,0 +1,6 @@
+Execute Query Values
+~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.execute_query.values
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_mutations.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_mutations.rst
new file mode 100644
index 000000000000..9d7a9eab2e3f
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_mutations.rst
@@ -0,0 +1,6 @@
+Mutations
+~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.mutations
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_read_modify_write_rules.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_read_modify_write_rules.rst
new file mode 100644
index 000000000000..2f28ddf3f723
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_read_modify_write_rules.rst
@@ -0,0 +1,6 @@
+Read Modify Write Rules
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.read_modify_write_rules
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_read_rows_query.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_read_rows_query.rst
new file mode 100644
index 000000000000..4e3e796d9fd8
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_read_rows_query.rst
@@ -0,0 +1,6 @@
+Read Rows Query
+~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.read_rows_query
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_row.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_row.rst
new file mode 100644
index 000000000000..63bc711434f4
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_row.rst
@@ -0,0 +1,6 @@
+Rows and Cells
+~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data.row
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/common_data_row_filters.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_row_filters.rst
new file mode 100644
index 000000000000..22bda8a26131
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/common_data_row_filters.rst
@@ -0,0 +1,62 @@
+Bigtable Row Filters
+====================
+
+It is possible to use a
+:class:`RowFilter `
+when constructing a :class:`ReadRowsQuery `
+
+The following basic filters
+are provided:
+
+* :class:`SinkFilter <.data.row_filters.SinkFilter>`
+* :class:`PassAllFilter <.data.row_filters.PassAllFilter>`
+* :class:`BlockAllFilter <.data.row_filters.BlockAllFilter>`
+* :class:`RowKeyRegexFilter <.data.row_filters.RowKeyRegexFilter>`
+* :class:`RowSampleFilter <.data.row_filters.RowSampleFilter>`
+* :class:`FamilyNameRegexFilter <.data.row_filters.FamilyNameRegexFilter>`
+* :class:`ColumnQualifierRegexFilter <.data.row_filters.ColumnQualifierRegexFilter>`
+* :class:`TimestampRangeFilter <.data.row_filters.TimestampRangeFilter>`
+* :class:`ColumnRangeFilter <.data.row_filters.ColumnRangeFilter>`
+* :class:`ValueRegexFilter <.data.row_filters.ValueRegexFilter>`
+* :class:`ValueRangeFilter <.data.row_filters.ValueRangeFilter>`
+* :class:`CellsRowOffsetFilter <.data.row_filters.CellsRowOffsetFilter>`
+* :class:`CellsRowLimitFilter <.data.row_filters.CellsRowLimitFilter>`
+* :class:`CellsColumnLimitFilter <.data.row_filters.CellsColumnLimitFilter>`
+* :class:`StripValueTransformerFilter <.data.row_filters.StripValueTransformerFilter>`
+* :class:`ApplyLabelFilter <.data.row_filters.ApplyLabelFilter>`
+
+In addition, these filters can be combined into composite filters with
+
+* :class:`RowFilterChain <.data.row_filters.RowFilterChain>`
+* :class:`RowFilterUnion <.data.row_filters.RowFilterUnion>`
+* :class:`ConditionalRowFilter <.data.row_filters.ConditionalRowFilter>`
+
+These rules can be nested arbitrarily, with a basic filter at the lowest
+level. For example:
+
+.. code:: python
+
+ # Filter in a specified column (matching any column family).
+ col1_filter = ColumnQualifierRegexFilter(b'columnbia')
+
+ # Create a filter to label results.
+ label1 = u'label-red'
+ label1_filter = ApplyLabelFilter(label1)
+
+ # Combine the filters to label all the cells in columnbia.
+ chain1 = RowFilterChain(filters=[col1_filter, label1_filter])
+
+ # Create a similar filter to label cells blue.
+ col2_filter = ColumnQualifierRegexFilter(b'columnseeya')
+ label2 = u'label-blue'
+ label2_filter = ApplyLabelFilter(label2)
+ chain2 = RowFilterChain(filters=[col2_filter, label2_filter])
+
+ # Bring our two labeled columns together.
+ row_filter = RowFilterUnion(filters=[chain1, chain2])
+
+----
+
+.. automodule:: google.cloud.bigtable.data.row_filters
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst
new file mode 100644
index 000000000000..708dafc621cd
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst
@@ -0,0 +1,41 @@
+Data Client
+===========
+
+Sync Surface
+------------
+
+.. toctree::
+ :maxdepth: 3
+
+ sync_data_client
+ sync_data_table
+ sync_data_authorized_view
+ sync_data_mutations_batcher
+ sync_data_execute_query_iterator
+
+Async Surface
+-------------
+
+.. toctree::
+ :maxdepth: 3
+
+ async_data_client
+ async_data_table
+ async_data_authorized_view
+ async_data_mutations_batcher
+ async_data_execute_query_iterator
+
+Common Classes
+--------------
+
+.. toctree::
+ :maxdepth: 3
+
+ common_data_read_rows_query
+ common_data_row
+ common_data_row_filters
+ common_data_mutations
+ common_data_read_modify_write_rules
+ common_data_exceptions
+ common_data_execute_query_values
+ common_data_execute_query_metadata
diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst
new file mode 100644
index 000000000000..c0ac29721d5d
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst
@@ -0,0 +1,6 @@
+Authorized View
+~~~~~~~~~~~~~~~
+
+.. autoclass:: google.cloud.bigtable.data._sync_autogen.client.AuthorizedView
+ :members:
+ :inherited-members:
diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst
new file mode 100644
index 000000000000..cf7c00dad5b2
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst
@@ -0,0 +1,6 @@
+Bigtable Data Client
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: google.cloud.bigtable.data.BigtableDataClient
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst
new file mode 100644
index 000000000000..6eb9f84db6b6
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst
@@ -0,0 +1,6 @@
+Execute Query Iterator
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIterator
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst
new file mode 100644
index 000000000000..2b7d1bfe094d
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst
@@ -0,0 +1,6 @@
+Mutations Batcher
+~~~~~~~~~~~~~~~~~
+
+.. automodule:: google.cloud.bigtable.data._sync_autogen.mutations_batcher
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst
new file mode 100644
index 000000000000..95c91eb27981
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst
@@ -0,0 +1,6 @@
+Table
+~~~~~
+
+.. autoclass:: google.cloud.bigtable.data.Table
+ :members:
+ :show-inheritance:
diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst
new file mode 100644
index 000000000000..0694c8bb00e0
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/index.rst
@@ -0,0 +1,27 @@
+.. include:: README.rst
+
+.. include:: multiprocessing.rst
+
+Client Types
+-------------
+.. toctree::
+ :maxdepth: 3
+
+ data_client/data_client_usage
+ classic_client/usage
+ admin_client/admin_client_usage
+
+Changelog
+---------
+
+For a list of all ``google-cloud-bigtable`` releases:
+
+.. toctree::
+ :maxdepth: 2
+
+ changelog
+
+.. toctree::
+ :hidden:
+
+ summary_overview.md
diff --git a/packages/google-cloud-bigtable/docs/multiprocessing.rst b/packages/google-cloud-bigtable/docs/multiprocessing.rst
new file mode 100644
index 000000000000..536d17b2ea65
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/multiprocessing.rst
@@ -0,0 +1,7 @@
+.. note::
+
+ Because this client uses :mod:`grpc` library, it is safe to
+ share instances across threads. In multiprocessing scenarios, the best
+ practice is to create client instances *after* the invocation of
+ :func:`os.fork` by :class:`multiprocessing.pool.Pool` or
+ :class:`multiprocessing.Process`.
diff --git a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py
new file mode 100644
index 000000000000..fbb753daf6ac
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py
@@ -0,0 +1,277 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This script will run after ``nox -s docfx`` is run. docfx is the api doc format used by
+google cloud. It is described here: https://github.com/googleapis/docuploader?tab=readme-ov-file#requirements-for-docfx-yaml-tarballs.
+
+One of the file used by docfx is toc.yml which is used to generate the table of contents sidebar.
+This script will patch file to create subfolders for each of the clients
+"""
+
+
+import glob
+import yaml
+import os
+import shutil
+
+# set working directory to /docs
+os.chdir(f"{os.path.dirname(os.path.abspath(__file__))}/{os.pardir}")
+
+
+def add_sections(toc_file_path, section_list, output_file_path=None):
+ """
+ Add new sections to the autogenerated docfx table of contents file
+
+ Takes in a list of TocSection objects, which should point to a directory of rst files
+ within the main /docs directory, which represents a self-contained section of content
+
+ :param toc_file_path: path to the autogenerated toc file
+ :param section_list: list of TocSection objects to add
+ :param output_file_path: path to save the updated toc file. If None, save to the input file
+ """
+ # remove any sections that are already in the toc
+ remove_sections(toc_file_path, [section.title for section in section_list])
+ # add new sections
+ current_toc = yaml.safe_load(open(toc_file_path, "r"))
+ for section in section_list:
+ print(f"Adding section {section.title}...")
+ current_toc[0]["items"].insert(-1, section.to_dict())
+ section.copy_markdown()
+ # save file
+ if output_file_path is None:
+ output_file_path = toc_file_path
+ with open(output_file_path, "w") as f:
+ yaml.dump(current_toc, f)
+
+
+def remove_sections(toc_file_path, section_list, output_file_path=None):
+ """
+ Remove sections from the autogenerated docfx table of contents file
+
+ Takes in a list of string section names to remove from the toc file
+
+ :param toc_file_path: path to the autogenerated toc file
+ :param section_list: list of section names to remove
+ :param output_file_path: path to save the updated toc file. If None, save to the input file
+ """
+ current_toc = yaml.safe_load(open(toc_file_path, "r"))
+ print(f"Removing sections {section_list}...")
+ new_items = [d for d in current_toc[0]["items"] if d["name"] not in section_list]
+ current_toc[0]["items"] = new_items
+ # save file
+ if output_file_path is None:
+ output_file_path = toc_file_path
+ with open(output_file_path, "w") as f:
+ yaml.dump(current_toc, f)
+
+
+class TocSection:
+ def __init__(self, dir_name, index_file_name):
+ """
+ :param dir_name: name of the directory containing the rst files
+ :param index_file_name: name of an index file within dir_name. This file
+ will not be included in the table of contents, but provides an ordered
+ list of the other files which should be included
+ """
+ self.dir_name = dir_name
+ self.index_file_name = index_file_name
+ index_file_path = os.path.join(dir_name, index_file_name)
+ # find set of files referenced by the index file
+ with open(index_file_path, "r") as f:
+ self.title = None
+ in_toc = False
+ self.items = []
+ for line in f:
+ # ignore empty lines
+ if not line.strip():
+ continue
+ # add files explictly included in the toc
+ if line.startswith(".. include::"):
+ file_base = os.path.splitext(line.split("::")[1].strip())[0]
+ self.items.append(
+ self.extract_toc_entry(
+ file_base, file_title=file_base.capitalize()
+ )
+ )
+ continue
+ if line.startswith(".. toctree::"):
+ in_toc = True
+ continue
+ # ignore directives
+ if ":" in line:
+ continue
+ # set tile as first line with no directive
+ if self.title is None:
+ self.title = line.strip()
+ if not in_toc:
+ continue
+ # bail when toc indented block is done
+ if not line.startswith(" ") and not line.startswith("\t"):
+ in_toc = False
+ continue
+ # extract entries
+ self.items.append(self.extract_toc_entry(line.strip()))
+
+ def extract_toc_entry(self, file_name, file_title=None):
+ """
+ Given the name of a file, extract the title and href for the toc entry,
+ and return as a dictionary
+ """
+ # load the file to get the title
+ with open(f"{self.dir_name}/{file_name}.rst", "r") as f2:
+ if file_title is None:
+ # use first line as title if not provided
+ file_title = f2.readline().strip()
+ return {"name": file_title, "href": f"{file_name}.md"}
+
+ def to_dict(self):
+ """
+ Convert the TocSection object to a dictionary that can be written to a yaml file
+ """
+ return {"name": self.title, "items": self.items}
+
+ def copy_markdown(self):
+ """
+ Copy markdown files from _build/markdown/dir_name to _build/html/docfx_yaml
+
+ This is necessary because the markdown files in sub-directories
+ are not copied over by the docfx build by default
+ """
+ for file in os.listdir("_build/markdown/" + self.dir_name):
+ shutil.copy(
+ f"_build/markdown/{self.dir_name}/{file}",
+ f"_build/html/docfx_yaml",
+ )
+
+ def validate_section(self, toc):
+ # Make sure each rst file is listed in the toc.
+ items_in_toc = [
+ d["items"] for d in toc[0]["items"] if d["name"] == self.title and ".rst"
+ ][0]
+ items_in_dir = [f for f in os.listdir(self.dir_name) if f.endswith(".rst")]
+ # subtract 1 for index
+ assert len(items_in_toc) == len(items_in_dir) - 1
+ for file in items_in_dir:
+ if file != self.index_file_name:
+ base_name, _ = os.path.splitext(file)
+ assert any(d["href"] == f"{base_name}.md" for d in items_in_toc)
+ # make sure the markdown files are present in the docfx_yaml directory
+ md_files = [d["href"] for d in items_in_toc]
+ for file in md_files:
+ assert os.path.exists(f"_build/html/docfx_yaml/{file}")
+
+
+class UIDFilteredTocSection(TocSection):
+ def __init__(self, toc_file_path, section_name, title, uid_prefix):
+ """Creates a filtered section denoted by section_name in the toc_file_path to items with the given UID prefix.
+
+ The section is then renamed to the title.
+ """
+ current_toc = yaml.safe_load(open(toc_file_path, "r"))
+ self.uid_prefix = uid_prefix
+
+ # Since we are looking for a specific section_name there should only
+ # be one match.
+ section_items = [
+ d for d in current_toc[0]["items"] if d["name"] == section_name
+ ][0]["items"]
+ filtered_items = [d for d in section_items if d["uid"].startswith(uid_prefix)]
+ self.items = filtered_items
+ self.title = title
+
+ def copy_markdown(self):
+ """
+ No-op because we are filtering on UIDs, not markdown files.
+ """
+ pass
+
+ def validate_section(self, toc):
+ uids_in_toc = set()
+
+ # A UID-filtered TOC tree looks like the following:
+ # - items:
+ # items:
+ # name:
+ # uid:
+ #
+ # Walk through the TOC tree to find all UIDs recursively.
+ def find_uids_in_items(items):
+ uids_in_toc.add(items["uid"])
+ for subitem in items.get("items", []):
+ find_uids_in_items(subitem)
+
+ items_in_toc = [d["items"] for d in toc[0]["items"] if d["name"] == self.title][
+ 0
+ ]
+ for item in items_in_toc:
+ find_uids_in_items(item)
+
+ # Now that we have all the UIDs, first match all of them
+ # with corresponding .yml files.
+ for uid in uids_in_toc:
+ assert os.path.exists(f"_build/html/docfx_yaml/{uid}.yml")
+
+ # Also validate that every uid yml file that starts with the uid_prefix
+ # exists in the section.
+ for filename in glob.glob(
+ f"{self.uid_prefix}*.yml", root_dir="_build/html/docfx_yaml"
+ ):
+ assert filename[:-4] in uids_in_toc
+
+
+def validate_toc(toc_file_path, expected_section_list, added_sections):
+ current_toc = yaml.safe_load(open(toc_file_path, "r"))
+ # make sure the set of sections matches what we expect
+ found_sections = [d["name"] for d in current_toc[0]["items"]]
+ assert (
+ found_sections == expected_section_list
+ ), f"Expected {expected_section_list}, found {found_sections}"
+ # make sure each customs ection is in the toc
+ for section in added_sections:
+ assert section.title in found_sections
+ section.validate_section(current_toc)
+ print("Toc validation passed")
+
+
+if __name__ == "__main__":
+ # Add secrtions for the async_data_client and classic_client directories
+ toc_path = "_build/html/docfx_yaml/toc.yml"
+
+ custom_sections = [
+ TocSection(dir_name="data_client", index_file_name="data_client_usage.rst"),
+ UIDFilteredTocSection(
+ toc_file_path=toc_path,
+ section_name="Bigtable Admin V2",
+ title="Admin Client",
+ uid_prefix="google.cloud.bigtable_admin_v2",
+ ),
+ TocSection(dir_name="classic_client", index_file_name="usage.rst"),
+ ]
+ add_sections(toc_path, custom_sections)
+ # Remove the Bigtable section, since it has duplicated data
+ remove_sections(toc_path, ["Bigtable", "Bigtable Admin V2"])
+ # run validation to make sure yaml is structured as we expect
+ validate_toc(
+ toc_file_path=toc_path,
+ expected_section_list=[
+ "Overview",
+ "bigtable APIs",
+ "Changelog",
+ "Multiprocessing",
+ "Data Client",
+ "Admin Client",
+ "Classic Client",
+ ],
+ added_sections=custom_sections,
+ )
diff --git a/packages/google-cloud-bigtable/docs/summary_overview.md b/packages/google-cloud-bigtable/docs/summary_overview.md
new file mode 100644
index 000000000000..2379e8b6bc1f
--- /dev/null
+++ b/packages/google-cloud-bigtable/docs/summary_overview.md
@@ -0,0 +1,22 @@
+[
+This is a templated file. Adding content to this file may result in it being
+reverted. Instead, if you want to place additional content, create an
+"overview_content.md" file in `docs/` directory. The Sphinx tool will
+pick up on the content and merge the content.
+]: #
+
+# Cloud Bigtable API
+
+Overview of the APIs available for Cloud Bigtable API.
+
+## All entries
+
+Classes, methods and properties & attributes for
+Cloud Bigtable API.
+
+[classes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_class.html)
+
+[methods](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_method.html)
+
+[properties and
+attributes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_property.html)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py
new file mode 100644
index 000000000000..7331ff24150c
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Bigtable API package."""
+
+from google.cloud.bigtable.client import Client
+
+from google.cloud.bigtable import gapic_version as package_version
+
+__version__: str
+
+__version__ = package_version.__version__
+
+__all__ = ["__version__", "Client"]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py
new file mode 100644
index 000000000000..8cde66146f9a
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py
@@ -0,0 +1,377 @@
+# Copyright 2018 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-friendly container for Google Cloud Bigtable AppProfile."""
+
+
+import re
+
+from google.cloud.bigtable.enums import RoutingPolicyType
+from google.cloud.bigtable_admin_v2.types import instance
+from google.protobuf import field_mask_pb2
+from google.api_core.exceptions import NotFound
+
+_APP_PROFILE_NAME_RE = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[^/]+)/"
+ r"appProfiles/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+)
+
+
+class AppProfile(object):
+ """Representation of a Google Cloud Bigtable AppProfile.
+
+ We can use a :class:`AppProfile` to:
+
+ * :meth:`reload` itself
+ * :meth:`create` itself
+ * :meth:`update` itself
+ * :meth:`delete` itself
+
+ :type app_profile_id: str
+ :param app_profile_id: The ID of the AppProfile. Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type: routing_policy_type: int
+ :param: routing_policy_type: (Optional) The type of the routing policy.
+ Possible values are represented
+ by the following constants:
+ :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY`
+ :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE`
+
+ :type: description: str
+ :param: description: (Optional) Long form description of the use
+ case for this AppProfile.
+
+ :type: cluster_id: str
+ :param: cluster_id: (Optional) Unique cluster_id which is only required
+ when routing_policy_type is
+ ROUTING_POLICY_TYPE_SINGLE.
+
+ :type: multi_cluster_ids: list
+ :param: multi_cluster_ids: (Optional) The set of clusters to route to.
+ The order is ignored; clusters will be tried in order of distance.
+ If left empty, all clusters are eligible.
+
+ :type: allow_transactional_writes: bool
+ :param: allow_transactional_writes: (Optional) If true, allow
+ transactional writes for
+ ROUTING_POLICY_TYPE_SINGLE.
+ """
+
+ def __init__(
+ self,
+ app_profile_id,
+ instance,
+ routing_policy_type=None,
+ description=None,
+ cluster_id=None,
+ multi_cluster_ids=None,
+ allow_transactional_writes=None,
+ ):
+ self.app_profile_id = app_profile_id
+ self._instance = instance
+ self.routing_policy_type = routing_policy_type
+ self.description = description
+ self.cluster_id = cluster_id
+ self.multi_cluster_ids = multi_cluster_ids
+ self.allow_transactional_writes = allow_transactional_writes
+
+ @property
+ def name(self):
+ """AppProfile name used in requests.
+
+ .. note::
+
+ This property will not change if ``app_profile_id`` does not, but
+ the return value is not cached.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_app_profile_name]
+ :end-before: [END bigtable_api_app_profile_name]
+ :dedent: 4
+
+ The AppProfile name is of the form
+ ``"projects/../instances/../app_profile/{app_profile_id}"``
+
+ :rtype: str
+ :returns: The AppProfile name.
+ """
+ return self.instance_admin_client.app_profile_path(
+ self._instance._client.project,
+ self._instance.instance_id,
+ self.app_profile_id,
+ )
+
+ @property
+ def instance_admin_client(self):
+ """Shortcut to instance_admin_client
+
+ :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin`
+ :returns: A BigtableInstanceAdmin instance.
+ """
+ return self._instance._client.instance_admin_client
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ # NOTE: This does not compare the configuration values, such as
+ # the routing_policy_type. Instead, it only compares
+ # identifying values instance, AppProfile ID and client. This is
+ # intentional, since the same AppProfile can be in different
+ # states if not synchronized.
+ return (
+ other.app_profile_id == self.app_profile_id
+ and other._instance == self._instance
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ @classmethod
+ def from_pb(cls, app_profile_pb, instance):
+ """Creates an instance app_profile from a protobuf.
+
+ :type app_profile_pb: :class:`instance.app_profile_pb`
+ :param app_profile_pb: An instance protobuf object.
+
+ :type instance: :class:`google.cloud.bigtable.instance.Instance`
+ :param instance: The instance that owns the cluster.
+
+ :rtype: :class:`AppProfile`
+ :returns: The AppProfile parsed from the protobuf response.
+
+ :raises: :class:`ValueError ` if the AppProfile
+ name does not match
+ ``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}``
+ or if the parsed instance ID does not match the istance ID
+ on the client.
+ or if the parsed project ID does not match the project ID
+ on the client.
+ """
+ match_app_profile_name = _APP_PROFILE_NAME_RE.match(app_profile_pb.name)
+ if match_app_profile_name is None:
+ raise ValueError(
+ "AppProfile protobuf name was not in the " "expected format.",
+ app_profile_pb.name,
+ )
+ if match_app_profile_name.group("instance") != instance.instance_id:
+ raise ValueError(
+ "Instance ID on app_profile does not match the "
+ "instance ID on the client"
+ )
+ if match_app_profile_name.group("project") != instance._client.project:
+ raise ValueError(
+ "Project ID on app_profile does not match the "
+ "project ID on the client"
+ )
+ app_profile_id = match_app_profile_name.group("app_profile_id")
+
+ result = cls(app_profile_id, instance)
+ result._update_from_pb(app_profile_pb)
+ return result
+
+ def _update_from_pb(self, app_profile_pb):
+ """Refresh self from the server-provided protobuf.
+ Helper for :meth:`from_pb` and :meth:`reload`.
+ """
+ self.routing_policy_type = None
+ self.allow_transactional_writes = None
+ self.cluster_id = None
+ self.multi_cluster_ids = None
+ self.description = app_profile_pb.description
+
+ routing_policy_type = None
+ if app_profile_pb._pb.HasField("multi_cluster_routing_use_any"):
+ routing_policy_type = RoutingPolicyType.ANY
+ self.allow_transactional_writes = False
+ if app_profile_pb.multi_cluster_routing_use_any.cluster_ids:
+ self.multi_cluster_ids = (
+ app_profile_pb.multi_cluster_routing_use_any.cluster_ids
+ )
+ else:
+ routing_policy_type = RoutingPolicyType.SINGLE
+ self.cluster_id = app_profile_pb.single_cluster_routing.cluster_id
+ self.allow_transactional_writes = (
+ app_profile_pb.single_cluster_routing.allow_transactional_writes
+ )
+ self.routing_policy_type = routing_policy_type
+
+ def _to_pb(self):
+ """Create an AppProfile proto buff message for API calls
+ :rtype: :class:`.instance.AppProfile`
+ :returns: The converted current object.
+
+ :raises: :class:`ValueError ` if the AppProfile
+ routing_policy_type is not set
+ """
+ if not self.routing_policy_type:
+ raise ValueError("AppProfile required routing policy.")
+
+ single_cluster_routing = None
+ multi_cluster_routing_use_any = None
+
+ if self.routing_policy_type == RoutingPolicyType.ANY:
+ multi_cluster_routing_use_any = (
+ instance.AppProfile.MultiClusterRoutingUseAny(
+ cluster_ids=self.multi_cluster_ids
+ )
+ )
+ else:
+ single_cluster_routing = instance.AppProfile.SingleClusterRouting(
+ cluster_id=self.cluster_id,
+ allow_transactional_writes=self.allow_transactional_writes,
+ )
+
+ app_profile_pb = instance.AppProfile(
+ name=self.name,
+ description=self.description,
+ multi_cluster_routing_use_any=multi_cluster_routing_use_any,
+ single_cluster_routing=single_cluster_routing,
+ )
+ return app_profile_pb
+
+ def reload(self):
+ """Reload the metadata for this cluster
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_reload_app_profile]
+ :end-before: [END bigtable_api_reload_app_profile]
+ :dedent: 4
+ """
+
+ app_profile_pb = self.instance_admin_client.get_app_profile(
+ request={"name": self.name}
+ )
+
+ # NOTE: _update_from_pb does not check that the project and
+ # app_profile ID on the response match the request.
+ self._update_from_pb(app_profile_pb)
+
+ def exists(self):
+ """Check whether the AppProfile already exists.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_app_profile_exists]
+ :end-before: [END bigtable_api_app_profile_exists]
+ :dedent: 4
+
+ :rtype: bool
+ :returns: True if the AppProfile exists, else False.
+ """
+ try:
+ self.instance_admin_client.get_app_profile(request={"name": self.name})
+ return True
+ # NOTE: There could be other exceptions that are returned to the user.
+ except NotFound:
+ return False
+
+ def create(self, ignore_warnings=None):
+ """Create this AppProfile.
+
+ .. note::
+
+ Uses the ``instance`` and ``app_profile_id`` on the current
+ :class:`AppProfile` in addition to the ``routing_policy_type``,
+ ``description``, ``cluster_id`` and ``allow_transactional_writes``.
+ To change them before creating, reset the values via
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_app_profile]
+ :end-before: [END bigtable_api_create_app_profile]
+ :dedent: 4
+
+ :type: ignore_warnings: bool
+ :param: ignore_warnings: (Optional) If true, ignore safety checks when
+ creating the AppProfile.
+ """
+ return self.from_pb(
+ self.instance_admin_client.create_app_profile(
+ request={
+ "parent": self._instance.name,
+ "app_profile_id": self.app_profile_id,
+ "app_profile": self._to_pb(),
+ "ignore_warnings": ignore_warnings,
+ }
+ ),
+ self._instance,
+ )
+
+ def update(self, ignore_warnings=None):
+ """Update this app_profile.
+
+ .. note::
+
+ Update any or all of the following values:
+ ``routing_policy_type``
+ ``description``
+ ``cluster_id``
+ ``multi_cluster_ids``
+ ``allow_transactional_writes``
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_update_app_profile]
+ :end-before: [END bigtable_api_update_app_profile]
+ :dedent: 4
+ """
+ update_mask_pb = field_mask_pb2.FieldMask()
+
+ if self.description is not None:
+ update_mask_pb.paths.append("description")
+
+ if self.routing_policy_type == RoutingPolicyType.ANY:
+ update_mask_pb.paths.append("multi_cluster_routing_use_any")
+ else:
+ update_mask_pb.paths.append("single_cluster_routing")
+
+ return self.instance_admin_client.update_app_profile(
+ request={
+ "app_profile": self._to_pb(),
+ "update_mask": update_mask_pb,
+ "ignore_warnings": ignore_warnings,
+ }
+ )
+
+ def delete(self, ignore_warnings=None):
+ """Delete this AppProfile.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_delete_app_profile]
+ :end-before: [END bigtable_api_delete_app_profile]
+ :dedent: 4
+
+ :type: ignore_warnings: bool
+ :param: ignore_warnings: If true, ignore safety checks when deleting
+ the AppProfile.
+
+ :raises: google.api_core.exceptions.GoogleAPICallError: If the request
+ failed for any reason. google.api_core.exceptions.RetryError:
+ If the request failed due to a retryable error and retry
+ attempts failed. ValueError: If the parameters are invalid.
+ """
+ self.instance_admin_client.delete_app_profile(
+ request={"name": self.name, "ignore_warnings": ignore_warnings}
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py
new file mode 100644
index 000000000000..f6fa24421f02
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py
@@ -0,0 +1,489 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A user-friendly wrapper for a Google Cloud Bigtable Backup."""
+
+import re
+
+from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore
+from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
+from google.cloud.bigtable_admin_v2.types import table
+from google.cloud.bigtable.encryption_info import EncryptionInfo
+from google.cloud.bigtable.policy import Policy
+from google.cloud.exceptions import NotFound # type: ignore
+from google.protobuf import field_mask_pb2
+
+_BACKUP_NAME_RE = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[a-z][-a-z0-9]*)/"
+ r"clusters/(?P[a-z][-a-z0-9]*)/"
+ r"backups/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+)
+
+_TABLE_NAME_RE = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[a-z][-a-z0-9]*)/"
+ r"tables/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$"
+)
+
+
+class Backup(object):
+ """Representation of a Google Cloud Bigtable Backup.
+
+ A :class: `Backup` can be used to:
+
+ * :meth:`create` the backup
+ * :meth:`update` the backup
+ * :meth:`delete` the backup
+
+ :type backup_id: str
+ :param backup_id: The ID of the backup.
+
+ :type instance: :class:`~google.cloud.bigtable.instance.Instance`
+ :param instance: The Instance that owns this Backup.
+
+ :type cluster_id: str
+ :param cluster_id: (Optional) The ID of the Cluster that contains this Backup.
+ Required for calling 'delete', 'exists' etc. methods.
+
+ :type table_id: str
+ :param table_id: (Optional) The ID of the Table that the Backup is for.
+ Required if the 'create' method will be called.
+
+ :type expire_time: :class:`datetime.datetime`
+ :param expire_time: (Optional) The expiration time after which the Backup
+ will be automatically deleted. Required if the `create`
+ method will be called.
+ """
+
+ def __init__(
+ self,
+ backup_id,
+ instance,
+ cluster_id=None,
+ table_id=None,
+ expire_time=None,
+ encryption_info=None,
+ ):
+ self.backup_id = backup_id
+ self._instance = instance
+ self._cluster = cluster_id
+ self.table_id = table_id
+ self._expire_time = expire_time
+ self._encryption_info = encryption_info
+
+ self._parent = None
+ self._source_table = None
+ self._start_time = None
+ self._end_time = None
+ self._size_bytes = None
+ self._state = None
+
+ @property
+ def name(self):
+ """Backup name used in requests.
+
+ The Backup name is of the form
+
+ ``"projects/../instances/../clusters/../backups/{backup_id}"``
+
+ :rtype: str
+ :returns: The Backup name.
+
+ :raises: ValueError: If the 'cluster' has not been set.
+ """
+ if not self._cluster:
+ raise ValueError('"cluster" parameter must be set')
+
+ return BaseBigtableTableAdminClient.backup_path(
+ project=self._instance._client.project,
+ instance=self._instance.instance_id,
+ cluster=self._cluster,
+ backup=self.backup_id,
+ )
+
+ @property
+ def cluster(self):
+ """The ID of the [parent] cluster used in requests.
+
+ :rtype: str
+ :returns: The ID of the cluster containing the Backup.
+ """
+ return self._cluster
+
+ @cluster.setter
+ def cluster(self, cluster_id):
+ self._cluster = cluster_id
+
+ @property
+ def parent(self):
+ """Name of the parent cluster used in requests.
+
+ .. note::
+ This property will return None if ``cluster`` is not set.
+
+ The parent name is of the form
+
+ ``"projects/{project}/instances/{instance_id}/clusters/{cluster}"``
+
+ :rtype: str
+ :returns: A full path to the parent cluster.
+ """
+ if not self._parent and self._cluster:
+ self._parent = BaseBigtableTableAdminClient.cluster_path(
+ project=self._instance._client.project,
+ instance=self._instance.instance_id,
+ cluster=self._cluster,
+ )
+ return self._parent
+
+ @property
+ def source_table(self):
+ """The full name of the Table from which this Backup is created.
+
+ .. note::
+ This property will return None if ``table_id`` is not set.
+
+ The table name is of the form
+
+ ``"projects/../instances/../tables/{source_table}"``
+
+ :rtype: str
+ :returns: The Table name.
+ """
+ if not self._source_table and self.table_id:
+ self._source_table = BaseBigtableTableAdminClient.table_path(
+ project=self._instance._client.project,
+ instance=self._instance.instance_id,
+ table=self.table_id,
+ )
+ return self._source_table
+
+ @property
+ def expire_time(self):
+ """Expiration time used in the creation requests.
+
+ :rtype: :class:`datetime.datetime`
+ :returns: A 'datetime' object representing the expiration time of
+ this Backup.
+ """
+ return self._expire_time
+
+ @expire_time.setter
+ def expire_time(self, new_expire_time):
+ self._expire_time = new_expire_time
+
+ @property
+ def encryption_info(self):
+ """Encryption info for this Backup.
+
+ :rtype: :class:`google.cloud.bigtable.encryption.EncryptionInfo`
+ :returns: The encryption information for this backup.
+ """
+ return self._encryption_info
+
+ @property
+ def start_time(self):
+ """The time this Backup was started.
+
+ :rtype: :class:`datetime.datetime`
+ :returns: A 'datetime' object representing the time when the creation
+ of this Backup had started.
+ """
+ return self._start_time
+
+ @property
+ def end_time(self):
+ """The time this Backup was finished.
+
+ :rtype: :class:`datetime.datetime`
+ :returns: A 'datetime' object representing the time when the creation
+ of this Backup was finished.
+ """
+ return self._end_time
+
+ @property
+ def size_bytes(self):
+ """The size of this Backup, in bytes.
+
+ :rtype: int
+ :returns: The size of this Backup, in bytes.
+ """
+ return self._size_bytes
+
+ @property
+ def state(self):
+ """The current state of this Backup.
+
+ :rtype: :class:`~google.cloud.bigtable_admin_v2.types.table.Backup.State`
+ :returns: The current state of this Backup.
+ """
+ return self._state
+
+ @classmethod
+ def from_pb(cls, backup_pb, instance):
+ """Creates a Backup instance from a protobuf message.
+
+ :type backup_pb: :class:`table.Backup`
+ :param backup_pb: A Backup protobuf object.
+
+ :type instance: :class:`Instance `
+ :param instance: The Instance that owns the Backup.
+
+ :rtype: :class:`~google.cloud.bigtable.backup.Backup`
+ :returns: The backup parsed from the protobuf response.
+ :raises: ValueError: If the backup name does not match the expected
+ format or the parsed project ID does not match the
+ project ID on the Instance's client, or if the
+ parsed instance ID does not match the Instance ID.
+ """
+ match = _BACKUP_NAME_RE.match(backup_pb.name)
+ if match is None:
+ raise ValueError(
+ "Backup protobuf name was not in the expected format.", backup_pb.name
+ )
+ if match.group("project") != instance._client.project:
+ raise ValueError(
+ "Project ID of the Backup does not match the Project ID "
+ "of the instance's client"
+ )
+
+ instance_id = match.group("instance_id")
+ if instance_id != instance.instance_id:
+ raise ValueError(
+ "Instance ID of the Backup does not match the Instance ID "
+ "of the instance"
+ )
+ backup_id = match.group("backup_id")
+ cluster_id = match.group("cluster_id")
+
+ match = _TABLE_NAME_RE.match(backup_pb.source_table)
+ table_id = match.group("table_id") if match else None
+
+ expire_time = backup_pb._pb.expire_time
+ encryption_info = EncryptionInfo._from_pb(backup_pb.encryption_info)
+
+ backup = cls(
+ backup_id,
+ instance,
+ cluster_id=cluster_id,
+ table_id=table_id,
+ expire_time=expire_time,
+ encryption_info=encryption_info,
+ )
+ backup._start_time = backup_pb._pb.start_time
+ backup._end_time = backup_pb._pb.end_time
+ backup._size_bytes = backup_pb._pb.size_bytes
+ backup._state = backup_pb._pb.state
+
+ return backup
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.backup_id == self.backup_id and other._instance == self._instance
+
+ def __ne__(self, other):
+ return not self == other
+
+ def create(self, cluster_id=None):
+ """Creates this backup within its instance.
+
+ :type cluster_id: str
+ :param cluster_id: (Optional) The ID of the Cluster for the newly
+ created Backup.
+
+ :rtype: :class:`~google.api_core.operation.Operation`
+ :returns: A future to be used to poll the status of the 'create' request
+ :raises Conflict: if the Backup already exists
+ :raises NotFound: if the Instance owning the Backup does not exist
+ :raises BadRequest: if the `table` or `expire_time` values are invalid,
+ or `expire_time` is not set
+ """
+ if not self._expire_time:
+ raise ValueError('"expire_time" parameter must be set')
+ # TODO: Consider implementing a method that sets a default value of
+ # `expire_time`, e.g. 1 week from the creation of the Backup.
+ if not self.table_id:
+ raise ValueError('"table" parameter must be set')
+
+ if cluster_id:
+ self._cluster = cluster_id
+
+ if not self._cluster:
+ raise ValueError('"cluster" parameter must be set')
+
+ backup = table.Backup(
+ source_table=self.source_table,
+ expire_time=_datetime_to_pb_timestamp(self.expire_time),
+ )
+
+ api = self._instance._client.table_admin_client
+ return api.create_backup(
+ request={
+ "parent": self.parent,
+ "backup_id": self.backup_id,
+ "backup": backup,
+ }
+ )
+
+ def get(self):
+ """Retrieves metadata of a pending or completed Backup.
+
+ :returns: An instance of
+ :class:`~google.cloud.bigtable_admin_v2.types.Backup`
+
+ :raises google.api_core.exceptions.GoogleAPICallError: If the request
+ failed for any reason.
+ :raises google.api_core.exceptions.RetryError: If the request failed
+ due to a retryable error and retry attempts failed.
+ :raises ValueError: If the parameters are invalid.
+ """
+ api = self._instance._client.table_admin_client
+ try:
+ return api.get_backup(request={"name": self.name})
+ except NotFound:
+ return None
+
+ def reload(self):
+ """Refreshes the stored backup properties."""
+ backup = self.get()
+ self._source_table = backup.source_table
+ self._expire_time = backup._pb.expire_time
+ self._start_time = backup._pb.start_time
+ self._end_time = backup._pb.end_time
+ self._size_bytes = backup._pb.size_bytes
+ self._state = backup._pb.state
+
+ def exists(self):
+ """Tests whether this Backup exists.
+
+ :rtype: bool
+ :returns: True if the Backup exists, else False.
+ """
+ return self.get() is not None
+
+ def update_expire_time(self, new_expire_time):
+ """Update the expire time of this Backup.
+
+ :type new_expire_time: :class:`datetime.datetime`
+ :param new_expire_time: the new expiration time timestamp
+ """
+ backup_update = table.Backup(
+ name=self.name,
+ expire_time=_datetime_to_pb_timestamp(new_expire_time),
+ )
+ update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
+ api = self._instance._client.table_admin_client
+ api.update_backup(request={"backup": backup_update, "update_mask": update_mask})
+ self._expire_time = new_expire_time
+
+ def delete(self):
+ """Delete this Backup."""
+ self._instance._client.table_admin_client.delete_backup(
+ request={"name": self.name}
+ )
+
+ def restore(self, table_id, instance_id=None):
+ """Creates a new Table by restoring from this Backup. The new Table
+ can be created in the same Instance as the Instance containing the
+ Backup, or another Instance whose ID can be specified in the arguments.
+ The returned Table ``long-running operation`` can be used to track the
+ progress of the operation and to cancel it. The ``response`` type is
+ ``Table``, if successful.
+
+ :type table_id: str
+ :param table_id: The ID of the Table to create and restore to.
+ This Table must not already exist.
+
+ :type instance_id: str
+ :param instance_id: (Optional) The ID of the Instance to restore the
+ backup into, if different from the current one.
+
+ :rtype: :class:`~google.api_core.operation.Operation`
+ :returns: A future to be used to poll the status of the 'restore'
+ request.
+
+ :raises: google.api_core.exceptions.AlreadyExists: If the table
+ already exists.
+ :raises: google.api_core.exceptions.GoogleAPICallError: If the request
+ failed for any reason.
+ :raises: google.api_core.exceptions.RetryError: If the request failed
+ due to a retryable error and retry attempts failed.
+ :raises: ValueError: If the parameters are invalid.
+ """
+ api = self._instance._client.table_admin_client
+ if instance_id:
+ parent = BaseBigtableTableAdminClient.instance_path(
+ project=self._instance._client.project,
+ instance=instance_id,
+ )
+ else:
+ parent = self._instance.name
+
+ return api._restore_table(
+ request={"parent": parent, "table_id": table_id, "backup": self.name}
+ )
+
+ def get_iam_policy(self):
+ """Gets the IAM access control policy for this backup.
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this backup.
+ """
+ table_api = self._instance._client.table_admin_client
+ response = table_api.get_iam_policy(request={"resource": self.name})
+ return Policy.from_pb(response)
+
+ def set_iam_policy(self, policy):
+ """Sets the IAM access control policy for this backup. Replaces any
+ existing policy.
+
+ For more information about policy, please see documentation of
+ class `google.cloud.bigtable.policy.Policy`
+
+ :type policy: :class:`google.cloud.bigtable.policy.Policy`
+ :param policy: A new IAM policy to replace the current IAM policy
+ of this backup.
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this backup.
+ """
+ table_api = self._instance._client.table_admin_client
+ response = table_api.set_iam_policy(
+ request={"resource": self.name, "policy": policy.to_pb()}
+ )
+ return Policy.from_pb(response)
+
+ def test_iam_permissions(self, permissions):
+ """Tests whether the caller has the given permissions for this backup.
+ Returns the permissions that the caller has.
+
+ :type permissions: list
+ :param permissions: The set of permissions to check for
+ the ``resource``. Permissions with wildcards (such as '*'
+ or 'storage.*') are not allowed. For more information see
+ `IAM Overview
+ `_.
+ `Bigtable Permissions
+ `_.
+
+ :rtype: list
+ :returns: A List(string) of permissions allowed on the backup.
+ """
+ table_api = self._instance._client.table_admin_client
+ response = table_api.test_iam_permissions(
+ request={"resource": self.name, "permissions": permissions}
+ )
+ return list(response.permissions)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py
new file mode 100644
index 000000000000..f9b85386d827
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py
@@ -0,0 +1,414 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable MutationBatcher."""
+import threading
+import queue
+import concurrent.futures
+import atexit
+
+
+from google.api_core.exceptions import from_grpc_status
+from dataclasses import dataclass
+
+
+FLUSH_COUNT = 100 # after this many elements, send out the batch
+
+MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch
+
+MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size.
+
+MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations.
+
+
+class MutationsBatchError(Exception):
+ """Error in the batch request"""
+
+ def __init__(self, message, exc):
+ self.exc = exc
+ self.message = message
+ super().__init__(self.message)
+
+
+class _MutationsBatchQueue(object):
+ """Private Threadsafe Queue to hold rows for batching."""
+
+ def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT):
+ """Specify the queue constraints"""
+ self._queue = queue.Queue()
+ self.total_mutation_count = 0
+ self.total_size = 0
+ self.max_mutation_bytes = max_mutation_bytes
+ self.flush_count = flush_count
+
+ def get(self):
+ """
+ Retrieve an item from the queue. Recalculate queue size.
+
+ If the queue is empty, return None.
+ """
+ try:
+ row = self._queue.get_nowait()
+ mutation_size = row.get_mutations_size()
+ self.total_mutation_count -= len(row._get_mutations())
+ self.total_size -= mutation_size
+ return row
+ except queue.Empty:
+ return None
+
+ def put(self, item):
+ """Insert an item to the queue. Recalculate queue size."""
+
+ mutation_count = len(item._get_mutations())
+
+ self._queue.put(item)
+
+ self.total_size += item.get_mutations_size()
+ self.total_mutation_count += mutation_count
+
+ def full(self):
+ """Check if the queue is full."""
+ if (
+ self.total_mutation_count >= self.flush_count
+ or self.total_size >= self.max_mutation_bytes
+ ):
+ return True
+ return False
+
+
+@dataclass
+class _BatchInfo:
+ """Keeping track of size of a batch"""
+
+ mutations_count: int = 0
+ rows_count: int = 0
+ mutations_size: int = 0
+
+
+class _FlowControl(object):
+ def __init__(
+ self,
+ max_mutations=MAX_OUTSTANDING_ELEMENTS,
+ max_mutation_bytes=MAX_OUTSTANDING_BYTES,
+ ):
+ """Control the inflight requests. Keep track of the mutations, row bytes and row counts.
+ As requests to backend are being made, adjust the number of mutations being processed.
+
+ If threshold is reached, block the flow.
+ Reopen the flow as requests are finished.
+ """
+ self.max_mutations = max_mutations
+ self.max_mutation_bytes = max_mutation_bytes
+ self.inflight_mutations = 0
+ self.inflight_size = 0
+ self.event = threading.Event()
+ self.event.set()
+ self._lock = threading.Lock()
+
+ def is_blocked(self):
+ """Returns True if:
+
+ - inflight mutations >= max_mutations, or
+ - inflight bytes size >= max_mutation_bytes, or
+ """
+
+ return (
+ self.inflight_mutations >= self.max_mutations
+ or self.inflight_size >= self.max_mutation_bytes
+ )
+
+ def control_flow(self, batch_info):
+ """
+ Calculate the resources used by this batch
+ """
+
+ with self._lock:
+ self.inflight_mutations += batch_info.mutations_count
+ self.inflight_size += batch_info.mutations_size
+ self.set_flow_control_status()
+
+ def wait(self):
+ """
+ Wait until flow control pushback has been released.
+ It awakens as soon as `event` is set.
+ """
+ self.event.wait()
+
+ def set_flow_control_status(self):
+ """Check the inflight mutations and size.
+
+ If values exceed the allowed threshold, block the event.
+ """
+ if self.is_blocked():
+ self.event.clear() # sleep
+ else:
+ self.event.set() # awaken the threads
+
+ def release(self, batch_info):
+ """
+ Release the resources.
+ Decrement the row size to allow enqueued mutations to be run.
+ """
+ with self._lock:
+ self.inflight_mutations -= batch_info.mutations_count
+ self.inflight_size -= batch_info.mutations_size
+ self.set_flow_control_status()
+
+
+class MutationsBatcher(object):
+ """A MutationsBatcher is used in batch cases where the number of mutations
+ is large or unknown. It will store :class:`DirectRow` in memory until one of the
+ size limits is reached, or an explicit call to :func:`flush()` is performed. When
+ a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud
+ Bigtable. Batching mutations is more efficient than sending individual
+ request.
+
+ This class is not suited for usage in systems where each mutation
+ must be guaranteed to be sent, since calling mutate may only result in an
+ in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in
+ memory will not necessarily be sent to the service, even after the
+ completion of the :func:`mutate()` method.
+
+ Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads.
+
+ :type table: class
+ :param table: class:`~google.cloud.bigtable.table.Table`.
+
+ :type flush_count: int
+ :param flush_count: (Optional) Max number of rows to flush. If it
+ reaches the max number of rows it calls finish_batch() to mutate the
+ current row batch. Default is FLUSH_COUNT (1000 rows).
+
+ :type max_row_bytes: int
+ :param max_row_bytes: (Optional) Max number of row mutations size to
+ flush. If it reaches the max number of row mutations size it calls
+ finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES
+ (5 MB).
+
+ :type flush_interval: float
+ :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush.
+ Default is 1 second.
+
+ :type batch_completed_callback: Callable[list:[`~google.rpc.status_pb2.Status`]] = None
+ :param batch_completed_callback: (Optional) A callable for handling responses
+ after the current batch is sent. The callable function expect a list of grpc
+ Status.
+ """
+
+ def __init__(
+ self,
+ table,
+ flush_count=FLUSH_COUNT,
+ max_row_bytes=MAX_MUTATION_SIZE,
+ flush_interval=1,
+ batch_completed_callback=None,
+ ):
+ self._rows = _MutationsBatchQueue(
+ max_mutation_bytes=max_row_bytes, flush_count=flush_count
+ )
+ self.table = table
+ self._executor = concurrent.futures.ThreadPoolExecutor()
+ atexit.register(self.close)
+ self._timer = threading.Timer(flush_interval, self.flush)
+ self._timer.start()
+ self.flow_control = _FlowControl(
+ max_mutations=MAX_OUTSTANDING_ELEMENTS,
+ max_mutation_bytes=MAX_OUTSTANDING_BYTES,
+ )
+ self.futures_mapping = {}
+ self.exceptions = queue.Queue()
+ self._user_batch_completed_callback = batch_completed_callback
+
+ @property
+ def flush_count(self):
+ return self._rows.flush_count
+
+ @property
+ def max_row_bytes(self):
+ return self._rows.max_mutation_bytes
+
+ def __enter__(self):
+ """Starting the MutationsBatcher as a context manager"""
+ return self
+
+ def mutate(self, row):
+ """Add a row to the batch. If the current batch meets one of the size
+ limits, the batch is sent asynchronously.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_batcher_mutate]
+ :end-before: [END bigtable_api_batcher_mutate]
+ :dedent: 4
+
+ :type row: class
+ :param row: :class:`~google.cloud.bigtable.row.DirectRow`.
+
+ :raises: One of the following:
+ * :exc:`~.table._BigtableRetryableError` if any row returned a transient error.
+ * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried
+ """
+ self._rows.put(row)
+
+ if self._rows.full():
+ self._flush_async()
+
+ def mutate_rows(self, rows):
+ """Add multiple rows to the batch. If the current batch meets one of the size
+ limits, the batch is sent asynchronously.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_batcher_mutate_rows]
+ :end-before: [END bigtable_api_batcher_mutate_rows]
+ :dedent: 4
+
+ :type rows: list:[`~google.cloud.bigtable.row.DirectRow`]
+ :param rows: list:[`~google.cloud.bigtable.row.DirectRow`].
+
+ :raises: One of the following:
+ * :exc:`~.table._BigtableRetryableError` if any row returned a transient error.
+ * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried
+ """
+ for row in rows:
+ self.mutate(row)
+
+ def flush(self):
+ """Sends the current batch to Cloud Bigtable synchronously.
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_batcher_flush]
+ :end-before: [END bigtable_api_batcher_flush]
+ :dedent: 4
+
+ :raises:
+ * :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
+ """
+ rows_to_flush = []
+ row = self._rows.get()
+ while row is not None:
+ rows_to_flush.append(row)
+ row = self._rows.get()
+ response = self._flush_rows(rows_to_flush)
+ return response
+
+ def _flush_async(self):
+ """Sends the current batch to Cloud Bigtable asynchronously.
+
+ :raises:
+ * :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
+ """
+ next_row = self._rows.get()
+ while next_row is not None:
+ # start a new batch
+ rows_to_flush = [next_row]
+ batch_info = _BatchInfo(
+ mutations_count=len(next_row._get_mutations()),
+ rows_count=1,
+ mutations_size=next_row.get_mutations_size(),
+ )
+ # fill up batch with rows
+ next_row = self._rows.get()
+ while next_row is not None and self._row_fits_in_batch(
+ next_row, batch_info
+ ):
+ rows_to_flush.append(next_row)
+ batch_info.mutations_count += len(next_row._get_mutations())
+ batch_info.rows_count += 1
+ batch_info.mutations_size += next_row.get_mutations_size()
+ next_row = self._rows.get()
+ # send batch over network
+ # wait for resources to become available
+ self.flow_control.wait()
+ # once unblocked, submit the batch
+ # event flag will be set by control_flow to block subsequent thread, but not blocking this one
+ self.flow_control.control_flow(batch_info)
+ future = self._executor.submit(self._flush_rows, rows_to_flush)
+ # schedule release of resources from flow control
+ self.futures_mapping[future] = batch_info
+ future.add_done_callback(self._batch_completed_callback)
+
+ def _batch_completed_callback(self, future):
+ """Callback for when the mutation has finished to clean up the current batch
+ and release items from the flow controller.
+ Raise exceptions if there's any.
+ Release the resources locked by the flow control and allow enqueued tasks to be run.
+ """
+ processed_rows = self.futures_mapping[future]
+ self.flow_control.release(processed_rows)
+ del self.futures_mapping[future]
+
+ def _row_fits_in_batch(self, row, batch_info):
+ """Checks if a row can fit in the current batch.
+
+ :type row: class
+ :param row: :class:`~google.cloud.bigtable.row.DirectRow`.
+
+ :type batch_info: :class:`_BatchInfo`
+ :param batch_info: Information about the current batch.
+
+ :rtype: bool
+ :returns: True if the row can fit in the current batch.
+ """
+ new_rows_count = batch_info.rows_count + 1
+ new_mutations_count = batch_info.mutations_count + len(row._get_mutations())
+ new_mutations_size = batch_info.mutations_size + row.get_mutations_size()
+ return (
+ new_rows_count <= self.flush_count
+ and new_mutations_size <= self.max_row_bytes
+ and new_mutations_count <= self.flow_control.max_mutations
+ and new_mutations_size <= self.flow_control.max_mutation_bytes
+ )
+
+ def _flush_rows(self, rows_to_flush):
+ """Mutate the specified rows.
+
+ :raises:
+ * :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
+ """
+ responses = []
+ if len(rows_to_flush) > 0:
+ response = self.table.mutate_rows(rows_to_flush)
+
+ if self._user_batch_completed_callback:
+ self._user_batch_completed_callback(response)
+
+ for result in response:
+ if result.code != 0:
+ exc = from_grpc_status(result.code, result.message)
+ self.exceptions.put(exc)
+ responses.append(result)
+
+ return responses
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ """Clean up resources. Flush and shutdown the ThreadPoolExecutor."""
+ self.close()
+
+ def close(self):
+ """Clean up resources. Flush and shutdown the ThreadPoolExecutor.
+ Any errors will be raised.
+
+ :raises:
+ * :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
+ """
+ self.flush()
+ self._executor.shutdown(wait=True)
+ atexit.unregister(self.close)
+ if self.exceptions.qsize() > 0:
+ exc = list(self.exceptions.queue)
+ raise MutationsBatchError("Errors in batch mutations.", exc=exc)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py
new file mode 100644
index 000000000000..37de10b6e772
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py
@@ -0,0 +1,475 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Parent client for calling the Google Cloud Bigtable API.
+
+This is the base from which all interactions with the API occur.
+
+In the hierarchy of API concepts
+
+* a :class:`~google.cloud.bigtable.client.Client` owns an
+ :class:`~google.cloud.bigtable.instance.Instance`
+* an :class:`~google.cloud.bigtable.instance.Instance` owns a
+ :class:`~google.cloud.bigtable.table.Table`
+* a :class:`~google.cloud.bigtable.table.Table` owns a
+ :class:`~.column_family.ColumnFamily`
+* a :class:`~google.cloud.bigtable.table.Table` owns a
+ :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row)
+"""
+import os
+import warnings
+import grpc # type: ignore
+
+from google.api_core.gapic_v1 import client_info as client_info_lib
+from google.auth.credentials import AnonymousCredentials # type: ignore
+
+from google.cloud import bigtable_v2
+from google.cloud import bigtable_admin_v2
+from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport
+from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports import (
+ BigtableInstanceAdminGrpcTransport,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports import (
+ BigtableTableAdminGrpcTransport,
+)
+
+from google.cloud import bigtable
+from google.cloud.bigtable.instance import Instance
+from google.cloud.bigtable.cluster import Cluster
+
+from google.cloud.client import ClientWithProject # type: ignore
+
+from google.cloud.bigtable_admin_v2.types import instance
+from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE
+from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore
+
+
+INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION
+INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT
+INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED
+SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin"
+ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin"
+"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
+DATA_SCOPE = "https://www.googleapis.com/auth/bigtable.data"
+"""Scope for reading and writing table data."""
+READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly"
+"""Scope for reading table data."""
+
+_DEFAULT_BIGTABLE_EMULATOR_CLIENT = "google-cloud-bigtable-emulator"
+_GRPC_CHANNEL_OPTIONS = (
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ("grpc.keepalive_time_ms", 30000),
+ ("grpc.keepalive_timeout_ms", 10000),
+)
+
+
+def _create_gapic_client(client_class, client_options=None, transport=None):
+ def inner(self):
+ return client_class(
+ credentials=None,
+ client_info=self._client_info,
+ client_options=client_options,
+ transport=transport,
+ )
+
+ return inner
+
+
+class Client(ClientWithProject):
+ """Client for interacting with Google Cloud Bigtable API.
+
+ .. note::
+
+ Since the Cloud Bigtable API requires the gRPC transport, no
+ ``_http`` argument is accepted by this class.
+
+ :type project: :class:`str` or :func:`unicode `
+ :param project: (Optional) The ID of the project which owns the
+ instances, tables and data. If not provided, will
+ attempt to determine from the environment.
+
+ :type credentials: :class:`~google.auth.credentials.Credentials`
+ :param credentials: (Optional) The OAuth2 Credentials to use for this
+ client. If not passed, falls back to the default
+ inferred from the environment.
+
+ :type read_only: bool
+ :param read_only: (Optional) Boolean indicating if the data scope should be
+ for reading only (or for writing as well). Defaults to
+ :data:`False`.
+
+ :type admin: bool
+ :param admin: (Optional) Boolean indicating if the client will be used to
+ interact with the Instance Admin or Table Admin APIs. This
+ requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`.
+
+ :type: client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo`
+ :param client_info:
+ The client info used to send a user-agent string along with API
+ requests. If ``None``, then default info will be used. Generally,
+ you only need to set this if you're developing your own library
+ or partner tool.
+
+ :type client_options: :class:`~google.api_core.client_options.ClientOptions`
+ or :class:`dict`
+ :param client_options: (Optional) Client options used to set user options
+ on the client. API Endpoint should be set through client_options.
+
+ :type admin_client_options:
+ :class:`~google.api_core.client_options.ClientOptions` or :class:`dict`
+ :param admin_client_options: (Optional) Client options used to set user
+ options on the client. API Endpoint for admin operations should be set
+ through admin_client_options.
+
+ :type channel: :instance: grpc.Channel
+ :param channel (grpc.Channel): (Optional) DEPRECATED:
+ A ``Channel`` instance through which to make calls.
+ This argument is mutually exclusive with ``credentials``;
+ providing both will raise an exception. No longer used.
+
+ :raises: :class:`ValueError ` if both ``read_only``
+ and ``admin`` are :data:`True`
+ """
+
+ _table_data_client = None
+ _table_admin_client = None
+ _instance_admin_client = None
+
+ def __init__(
+ self,
+ project=None,
+ credentials=None,
+ read_only=False,
+ admin=False,
+ client_info=None,
+ client_options=None,
+ admin_client_options=None,
+ channel=None,
+ ):
+ if client_info is None:
+ client_info = client_info_lib.ClientInfo(
+ client_library_version=bigtable.__version__,
+ )
+ if read_only and admin:
+ raise ValueError(
+ "A read-only client cannot also perform" "administrative actions."
+ )
+
+ # NOTE: We set the scopes **before** calling the parent constructor.
+ # It **may** use those scopes in ``with_scopes_if_required``.
+ self._read_only = bool(read_only)
+ self._admin = bool(admin)
+ self._client_info = client_info
+ self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
+
+ if self._emulator_host is not None:
+ if credentials is None:
+ credentials = AnonymousCredentials()
+ if project is None:
+ project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+
+ if channel is not None:
+ warnings.warn(
+ "'channel' is deprecated and no longer used.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ self._client_options = client_options
+ self._admin_client_options = admin_client_options
+ self._channel = channel
+ self.SCOPE = self._get_scopes()
+ super(Client, self).__init__(
+ project=project,
+ credentials=credentials,
+ client_options=client_options,
+ )
+
+ def _get_scopes(self):
+ """Get the scopes corresponding to admin / read-only state.
+
+ Returns:
+ Tuple[str, ...]: The tuple of scopes.
+ """
+ if self._read_only:
+ scopes = (READ_ONLY_SCOPE,)
+ else:
+ scopes = (DATA_SCOPE,)
+
+ if self._admin:
+ scopes += (ADMIN_SCOPE,)
+
+ return scopes
+
+ def _emulator_channel(self, transport, options):
+ """Create a channel for use with the Bigtable emulator.
+
+ Insecure channels are used for the emulator as secure channels
+ cannot be used to communicate on some environments.
+ https://github.com/googleapis/python-firestore/issues/359
+
+ Returns:
+ grpc.Channel or grpc.aio.Channel
+ """
+ # Note: this code also exists in the firestore client.
+ if "GrpcAsyncIOTransport" in str(transport.__name__):
+ channel_fn = grpc.aio.insecure_channel
+ else:
+ channel_fn = grpc.insecure_channel
+ return channel_fn(self._emulator_host, options=options)
+
+ def _create_gapic_client_channel(self, client_class, grpc_transport):
+ if self._emulator_host is not None:
+ api_endpoint = self._emulator_host
+ elif self._client_options and self._client_options.api_endpoint:
+ api_endpoint = self._client_options.api_endpoint
+ else:
+ api_endpoint = client_class.DEFAULT_ENDPOINT
+
+ if self._emulator_host is not None:
+ channel = self._emulator_channel(
+ transport=grpc_transport,
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+ else:
+ channel = grpc_transport.create_channel(
+ host=api_endpoint,
+ credentials=self._credentials,
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+ return grpc_transport(channel=channel, host=api_endpoint)
+
+ @property
+ def project_path(self):
+ """Project name to be used with Instance Admin API.
+
+ .. note::
+
+ This property will not change if ``project`` does not, but the
+ return value is not cached.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_project_path]
+ :end-before: [END bigtable_api_project_path]
+ :dedent: 4
+
+ The project name is of the form
+
+ ``"projects/{project}"``
+
+ :rtype: str
+ :returns: Return a fully-qualified project string.
+ """
+ return self.instance_admin_client.common_project_path(self.project)
+
+ @property
+ def table_data_client(self):
+ """Getter for the gRPC stub used for the Table Admin API.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_table_data_client]
+ :end-before: [END bigtable_api_table_data_client]
+ :dedent: 4
+
+ :rtype: :class:`.bigtable_v2.BigtableClient`
+ :returns: A BigtableClient object.
+ """
+ if self._table_data_client is None:
+ transport = self._create_gapic_client_channel(
+ bigtable_v2.BigtableClient,
+ BigtableGrpcTransport,
+ )
+ klass = _create_gapic_client(
+ bigtable_v2.BigtableClient,
+ client_options=self._client_options,
+ transport=transport,
+ )
+ self._table_data_client = klass(self)
+ return self._table_data_client
+
+ @property
+ def table_admin_client(self):
+ """Getter for the gRPC stub used for the Table Admin API.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_table_admin_client]
+ :end-before: [END bigtable_api_table_admin_client]
+ :dedent: 4
+
+ :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin`
+ :returns: A BigtableTableAdmin instance.
+ :raises: :class:`ValueError ` if the current
+ client is not an admin client or if it has not been
+ :meth:`start`-ed.
+ """
+ if self._table_admin_client is None:
+ if not self._admin:
+ raise ValueError("Client is not an admin client.")
+
+ transport = self._create_gapic_client_channel(
+ bigtable_admin_v2.BaseBigtableTableAdminClient,
+ BigtableTableAdminGrpcTransport,
+ )
+ klass = _create_gapic_client(
+ bigtable_admin_v2.BaseBigtableTableAdminClient,
+ client_options=self._admin_client_options,
+ transport=transport,
+ )
+ self._table_admin_client = klass(self)
+ return self._table_admin_client
+
+ @property
+ def instance_admin_client(self):
+ """Getter for the gRPC stub used for the Table Admin API.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_instance_admin_client]
+ :end-before: [END bigtable_api_instance_admin_client]
+ :dedent: 4
+
+ :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin`
+ :returns: A BigtableInstanceAdmin instance.
+ :raises: :class:`ValueError ` if the current
+ client is not an admin client or if it has not been
+ :meth:`start`-ed.
+ """
+ if self._instance_admin_client is None:
+ if not self._admin:
+ raise ValueError("Client is not an admin client.")
+
+ transport = self._create_gapic_client_channel(
+ bigtable_admin_v2.BigtableInstanceAdminClient,
+ BigtableInstanceAdminGrpcTransport,
+ )
+ klass = _create_gapic_client(
+ bigtable_admin_v2.BigtableInstanceAdminClient,
+ client_options=self._admin_client_options,
+ transport=transport,
+ )
+ self._instance_admin_client = klass(self)
+ return self._instance_admin_client
+
+ def instance(self, instance_id, display_name=None, instance_type=None, labels=None):
+ """Factory to create a instance associated with this client.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_prod_instance]
+ :end-before: [END bigtable_api_create_prod_instance]
+ :dedent: 4
+
+ :type instance_id: str
+ :param instance_id: The ID of the instance.
+
+ :type display_name: str
+ :param display_name: (Optional) The display name for the instance in
+ the Cloud Console UI. (Must be between 4 and 30
+ characters.) If this value is not set in the
+ constructor, will fall back to the instance ID.
+
+ :type instance_type: int
+ :param instance_type: (Optional) The type of the instance.
+ Possible values are represented
+ by the following constants:
+ :data:`google.cloud.bigtable.instance.InstanceType.PRODUCTION`.
+ :data:`google.cloud.bigtable.instance.InstanceType.DEVELOPMENT`,
+ Defaults to
+ :data:`google.cloud.bigtable.instance.InstanceType.UNSPECIFIED`.
+
+ :type labels: dict
+ :param labels: (Optional) Labels are a flexible and lightweight
+ mechanism for organizing cloud resources into groups
+ that reflect a customer's organizational needs and
+ deployment strategies. They can be used to filter
+ resources and aggregate metrics. Label keys must be
+ between 1 and 63 characters long. Maximum 64 labels can
+ be associated with a given resource. Label values must
+ be between 0 and 63 characters long. Keys and values
+ must both be under 128 bytes.
+
+ :rtype: :class:`~google.cloud.bigtable.instance.Instance`
+ :returns: an instance owned by this client.
+ """
+ return Instance(
+ instance_id,
+ self,
+ display_name=display_name,
+ instance_type=instance_type,
+ labels=labels,
+ )
+
+ def list_instances(self):
+ """List instances owned by the project.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_list_instances]
+ :end-before: [END bigtable_api_list_instances]
+ :dedent: 4
+
+ :rtype: tuple
+ :returns:
+ (instances, failed_locations), where 'instances' is list of
+ :class:`google.cloud.bigtable.instance.Instance`, and
+ 'failed_locations' is a list of locations which could not
+ be resolved.
+ """
+ resp = self.instance_admin_client.list_instances(
+ request={"parent": self.project_path}
+ )
+ instances = [Instance.from_pb(instance, self) for instance in resp.instances]
+ return instances, resp.failed_locations
+
+ def list_clusters(self):
+ """List the clusters in the project.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_list_clusters_in_project]
+ :end-before: [END bigtable_api_list_clusters_in_project]
+ :dedent: 4
+
+ :rtype: tuple
+ :returns:
+ (clusters, failed_locations), where 'clusters' is list of
+ :class:`google.cloud.bigtable.instance.Cluster`, and
+ 'failed_locations' is a list of strings representing
+ locations which could not be resolved.
+ """
+ resp = self.instance_admin_client.list_clusters(
+ request={
+ "parent": self.instance_admin_client.instance_path(self.project, "-")
+ }
+ )
+ clusters = []
+ instances = {}
+ for cluster in resp.clusters:
+ match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name)
+ instance_id = match_cluster_name.group("instance")
+ if instance_id not in instances:
+ instances[instance_id] = self.instance(instance_id)
+ clusters.append(Cluster.from_pb(cluster, instances[instance_id]))
+ return clusters, resp.failed_locations
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py
new file mode 100644
index 000000000000..11fb5492dad4
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py
@@ -0,0 +1,541 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Cluster."""
+
+
+import re
+from google.cloud.bigtable_admin_v2.types import instance
+from google.api_core.exceptions import NotFound
+from google.protobuf import field_mask_pb2
+
+
+_CLUSTER_NAME_RE = re.compile(
+ r"^projects/(?P[^/]+)/"
+ r"instances/(?P[^/]+)/clusters/"
+ r"(?P[a-z][-a-z0-9]*)$"
+)
+
+
+class Cluster(object):
+ """Representation of a Google Cloud Bigtable Cluster.
+
+ We can use a :class:`Cluster` to:
+
+ * :meth:`reload` itself
+ * :meth:`create` itself
+ * :meth:`update` itself
+ * :meth:`delete` itself
+ * :meth:`disable_autoscaling` itself
+
+ :type cluster_id: str
+ :param cluster_id: The ID of the cluster.
+
+ :type instance: :class:`~google.cloud.bigtable.instance.Instance`
+ :param instance: The instance where the cluster resides.
+
+ :type location_id: str
+ :param location_id: (Creation Only) The location where this cluster's
+ nodes and storage reside . For best performance,
+ clients should be located as close as possible to
+ this cluster.
+ For list of supported locations refer to
+ https://cloud.google.com/bigtable/docs/locations
+
+ :type serve_nodes: int
+ :param serve_nodes: (Optional) The number of nodes in the cluster for manual scaling. If any of the
+ autoscaling configuration are specified, then the autoscaling
+ configuration will take precedent.
+
+ :type default_storage_type: int
+ :param default_storage_type: (Optional) The type of storage
+ Possible values are represented by the
+ following constants:
+ :data:`google.cloud.bigtable.enums.StorageType.SSD`.
+ :data:`google.cloud.bigtable.enums.StorageType.HDD`,
+ Defaults to
+ :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`.
+
+ :type kms_key_name: str
+ :param kms_key_name: (Optional, Creation Only) The name of the KMS customer managed
+ encryption key (CMEK) to use for at-rest encryption of data in
+ this cluster. If omitted, Google's default encryption will be
+ used. If specified, the requirements for this key are:
+
+ 1) The Cloud Bigtable service account associated with the
+ project that contains the cluster must be granted the
+ ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK.
+ 2) Only regional keys can be used and the region of the CMEK
+ key must match the region of the cluster.
+ 3) All clusters within an instance must use the same CMEK key.
+
+ :type _state: int
+ :param _state: (`OutputOnly`)
+ The current state of the cluster.
+ Possible values are represented by the following constants:
+ :data:`google.cloud.bigtable.enums.Cluster.State.NOT_KNOWN`.
+ :data:`google.cloud.bigtable.enums.Cluster.State.READY`.
+ :data:`google.cloud.bigtable.enums.Cluster.State.CREATING`.
+ :data:`google.cloud.bigtable.enums.Cluster.State.RESIZING`.
+ :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`.
+
+ :type min_serve_nodes: int
+ :param min_serve_nodes: (Optional) The minimum number of nodes to be set in the cluster for autoscaling.
+ Must be 1 or greater.
+ If specified, this configuration takes precedence over
+ ``serve_nodes``.
+ If specified, then
+ ``max_serve_nodes`` and ``cpu_utilization_percent`` must be
+ specified too.
+
+ :type max_serve_nodes: int
+ :param max_serve_nodes: (Optional) The maximum number of nodes to be set in the cluster for autoscaling.
+ If specified, this configuration
+ takes precedence over ``serve_nodes``. If specified, then
+ ``min_serve_nodes`` and ``cpu_utilization_percent`` must be
+ specified too.
+
+ :param cpu_utilization_percent: (Optional) The CPU utilization target for the cluster's workload for autoscaling.
+ If specified, this configuration takes precedence over ``serve_nodes``. If specified, then
+ ``min_serve_nodes`` and ``max_serve_nodes`` must be
+ specified too.
+ """
+
+ def __init__(
+ self,
+ cluster_id,
+ instance,
+ location_id=None,
+ serve_nodes=None,
+ default_storage_type=None,
+ kms_key_name=None,
+ _state=None,
+ min_serve_nodes=None,
+ max_serve_nodes=None,
+ cpu_utilization_percent=None,
+ ):
+ self.cluster_id = cluster_id
+ self._instance = instance
+ self.location_id = location_id
+ self.serve_nodes = serve_nodes
+ self.default_storage_type = default_storage_type
+ self._kms_key_name = kms_key_name
+ self._state = _state
+ self.min_serve_nodes = min_serve_nodes
+ self.max_serve_nodes = max_serve_nodes
+ self.cpu_utilization_percent = cpu_utilization_percent
+
+ @classmethod
+ def from_pb(cls, cluster_pb, instance):
+ """Creates a cluster instance from a protobuf.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_cluster_from_pb]
+ :end-before: [END bigtable_api_cluster_from_pb]
+ :dedent: 4
+
+ :type cluster_pb: :class:`instance.Cluster`
+ :param cluster_pb: An instance protobuf object.
+
+ :type instance: :class:`google.cloud.bigtable.instance.Instance`
+ :param instance: The instance that owns the cluster.
+
+ :rtype: :class:`Cluster`
+ :returns: The Cluster parsed from the protobuf response.
+ :raises: :class:`ValueError ` if the cluster
+ name does not match
+ ``projects/{project}/instances/{instance_id}/clusters/{cluster_id}``
+ or if the parsed instance ID does not match the istance ID
+ on the client.
+ or if the parsed project ID does not match the project ID
+ on the client.
+ """
+ match_cluster_name = _CLUSTER_NAME_RE.match(cluster_pb.name)
+ if match_cluster_name is None:
+ raise ValueError(
+ "Cluster protobuf name was not in the " "expected format.",
+ cluster_pb.name,
+ )
+ if match_cluster_name.group("instance") != instance.instance_id:
+ raise ValueError(
+ "Instance ID on cluster does not match the " "instance ID on the client"
+ )
+ if match_cluster_name.group("project") != instance._client.project:
+ raise ValueError(
+ "Project ID on cluster does not match the " "project ID on the client"
+ )
+ cluster_id = match_cluster_name.group("cluster_id")
+
+ result = cls(cluster_id, instance)
+ result._update_from_pb(cluster_pb)
+ return result
+
+ def _update_from_pb(self, cluster_pb):
+ """Refresh self from the server-provided protobuf.
+ Helper for :meth:`from_pb` and :meth:`reload`.
+ """
+
+ self.location_id = cluster_pb.location.split("/")[-1]
+ self.serve_nodes = cluster_pb.serve_nodes
+
+ self.min_serve_nodes = (
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes
+ )
+ self.max_serve_nodes = (
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes
+ )
+ self.cpu_utilization_percent = (
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent
+ )
+
+ self.default_storage_type = cluster_pb.default_storage_type
+ if cluster_pb.encryption_config:
+ self._kms_key_name = cluster_pb.encryption_config.kms_key_name
+ else:
+ self._kms_key_name = None
+ self._state = cluster_pb.state
+
+ @property
+ def name(self):
+ """Cluster name used in requests.
+
+ .. note::
+ This property will not change if ``_instance`` and ``cluster_id``
+ do not, but the return value is not cached.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_cluster_name]
+ :end-before: [END bigtable_api_cluster_name]
+ :dedent: 4
+
+ The cluster name is of the form
+
+ ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"``
+
+ :rtype: str
+ :returns: The cluster name.
+ """
+ return self._instance._client.instance_admin_client.cluster_path(
+ self._instance._client.project, self._instance.instance_id, self.cluster_id
+ )
+
+ @property
+ def state(self):
+ """google.cloud.bigtable.enums.Cluster.State: state of cluster.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_cluster_state]
+ :end-before: [END bigtable_api_cluster_state]
+ :dedent: 4
+
+ """
+ return self._state
+
+ @property
+ def kms_key_name(self):
+ """str: Customer managed encryption key for the cluster."""
+ return self._kms_key_name
+
+ def _validate_scaling_config(self):
+ """Validate auto/manual scaling configuration before creating or updating."""
+
+ if (
+ not self.serve_nodes
+ and not self.min_serve_nodes
+ and not self.max_serve_nodes
+ and not self.cpu_utilization_percent
+ ):
+ raise ValueError(
+ "Must specify either serve_nodes or all of the autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)."
+ )
+ if self.serve_nodes and (
+ self.max_serve_nodes or self.min_serve_nodes or self.cpu_utilization_percent
+ ):
+ raise ValueError(
+ "Cannot specify both serve_nodes and autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)."
+ )
+ if (
+ (
+ self.min_serve_nodes
+ and (not self.max_serve_nodes or not self.cpu_utilization_percent)
+ )
+ or (
+ self.max_serve_nodes
+ and (not self.min_serve_nodes or not self.cpu_utilization_percent)
+ )
+ or (
+ self.cpu_utilization_percent
+ and (not self.min_serve_nodes or not self.max_serve_nodes)
+ )
+ ):
+ raise ValueError(
+ "All of autoscaling configurations must be specified at the same time (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)."
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ # NOTE: This does not compare the configuration values, such as
+ # the serve_nodes. Instead, it only compares
+ # identifying values instance, cluster ID and client. This is
+ # intentional, since the same cluster can be in different states
+ # if not synchronized. Clusters with similar instance/cluster
+ # settings but different clients can't be used in the same way.
+ return other.cluster_id == self.cluster_id and other._instance == self._instance
+
+ def __ne__(self, other):
+ return not self == other
+
+ def reload(self):
+ """Reload the metadata for this cluster.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_reload_cluster]
+ :end-before: [END bigtable_api_reload_cluster]
+ :dedent: 4
+ """
+ cluster_pb = self._instance._client.instance_admin_client.get_cluster(
+ request={"name": self.name}
+ )
+
+ # NOTE: _update_from_pb does not check that the project and
+ # cluster ID on the response match the request.
+ self._update_from_pb(cluster_pb)
+
+ def exists(self):
+ """Check whether the cluster already exists.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_check_cluster_exists]
+ :end-before: [END bigtable_api_check_cluster_exists]
+ :dedent: 4
+
+ :rtype: bool
+ :returns: True if the table exists, else False.
+ """
+ client = self._instance._client
+ try:
+ client.instance_admin_client.get_cluster(request={"name": self.name})
+ return True
+ # NOTE: There could be other exceptions that are returned to the user.
+ except NotFound:
+ return False
+
+ def create(self):
+ """Create this cluster.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_cluster]
+ :end-before: [END bigtable_api_create_cluster]
+ :dedent: 4
+
+ .. note::
+
+ Uses the ``project``, ``instance`` and ``cluster_id`` on the
+ current :class:`Cluster` in addition to the ``serve_nodes``.
+ To change them before creating, reset the values via
+
+ .. code:: python
+
+ cluster.serve_nodes = 8
+ cluster.cluster_id = 'i-changed-my-mind'
+
+ before calling :meth:`create`.
+
+ :rtype: :class:`~google.api_core.operation.Operation`
+ :returns: The long-running operation corresponding to the
+ create operation.
+
+ :raises: :class:`ValueError ` if the both ``serve_nodes`` and autoscaling configurations
+ are set at the same time or if none of the ``serve_nodes`` or autoscaling configurations are set
+ or if the autoscaling configurations are only partially set.
+
+ """
+
+ self._validate_scaling_config()
+
+ client = self._instance._client
+ cluster_pb = self._to_pb()
+
+ return client.instance_admin_client.create_cluster(
+ request={
+ "parent": self._instance.name,
+ "cluster_id": self.cluster_id,
+ "cluster": cluster_pb,
+ }
+ )
+
+ def update(self):
+ """Update this cluster.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_update_cluster]
+ :end-before: [END bigtable_api_update_cluster]
+ :dedent: 4
+
+ .. note::
+
+ Updates the ``serve_nodes``. If you'd like to
+ change them before updating, reset the values via
+
+ .. code:: python
+
+ cluster.serve_nodes = 8
+
+ before calling :meth:`update`.
+
+ If autoscaling is already enabled, manual scaling will be silently ignored.
+ To disable autoscaling and enable manual scaling, use the :meth:`disable_autoscaling` instead.
+
+ :rtype: :class:`Operation`
+ :returns: The long-running operation corresponding to the
+ update operation.
+
+ """
+
+ client = self._instance._client
+
+ update_mask_pb = field_mask_pb2.FieldMask()
+
+ if self.serve_nodes:
+ update_mask_pb.paths.append("serve_nodes")
+
+ if self.min_serve_nodes:
+ update_mask_pb.paths.append(
+ "cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes"
+ )
+ if self.max_serve_nodes:
+ update_mask_pb.paths.append(
+ "cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes"
+ )
+ if self.cpu_utilization_percent:
+ update_mask_pb.paths.append(
+ "cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent"
+ )
+
+ cluster_pb = self._to_pb()
+ cluster_pb.name = self.name
+
+ return client.instance_admin_client.partial_update_cluster(
+ request={"cluster": cluster_pb, "update_mask": update_mask_pb}
+ )
+
+ def disable_autoscaling(self, serve_nodes):
+ """
+ Disable autoscaling by specifying the number of nodes.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_cluster_disable_autoscaling]
+ :end-before: [END bigtable_api_cluster_disable_autoscaling]
+ :dedent: 4
+
+ :type serve_nodes: int
+ :param serve_nodes: The number of nodes in the cluster.
+ """
+
+ client = self._instance._client
+
+ update_mask_pb = field_mask_pb2.FieldMask()
+
+ self.serve_nodes = serve_nodes
+ self.min_serve_nodes = 0
+ self.max_serve_nodes = 0
+ self.cpu_utilization_percent = 0
+
+ update_mask_pb.paths.append("serve_nodes")
+ update_mask_pb.paths.append("cluster_config.cluster_autoscaling_config")
+ cluster_pb = self._to_pb()
+ cluster_pb.name = self.name
+
+ return client.instance_admin_client.partial_update_cluster(
+ request={"cluster": cluster_pb, "update_mask": update_mask_pb}
+ )
+
+ def delete(self):
+ """Delete this cluster.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_delete_cluster]
+ :end-before: [END bigtable_api_delete_cluster]
+ :dedent: 4
+
+ Marks a cluster and all of its tables for permanent deletion in 7 days.
+
+ Immediately upon completion of the request:
+
+ * Billing will cease for all of the cluster's reserved resources.
+ * The cluster's ``delete_time`` field will be set 7 days in the future.
+
+ Soon afterward:
+
+ * All tables within the cluster will become unavailable.
+
+ At the cluster's ``delete_time``:
+
+ * The cluster and **all of its tables** will immediately and
+ irrevocably disappear from the API, and their data will be
+ permanently deleted.
+ """
+ client = self._instance._client
+ client.instance_admin_client.delete_cluster(request={"name": self.name})
+
+ def _to_pb(self):
+ """Create cluster proto buff message for API calls"""
+ client = self._instance._client
+ location = client.instance_admin_client.common_location_path(
+ client.project, self.location_id
+ )
+
+ cluster_pb = instance.Cluster(
+ location=location,
+ serve_nodes=self.serve_nodes,
+ default_storage_type=self.default_storage_type,
+ )
+ if self._kms_key_name:
+ cluster_pb.encryption_config = instance.Cluster.EncryptionConfig(
+ kms_key_name=self._kms_key_name,
+ )
+
+ if self.min_serve_nodes:
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes = (
+ self.min_serve_nodes
+ )
+ if self.max_serve_nodes:
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes = (
+ self.max_serve_nodes
+ )
+ if self.cpu_utilization_percent:
+ cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent = (
+ self.cpu_utilization_percent
+ )
+
+ return cluster_pb
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py
new file mode 100644
index 000000000000..80232958d492
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py
@@ -0,0 +1,362 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Column Family."""
+
+
+from google.cloud import _helpers
+from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2
+from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_v2_pb2,
+)
+from google.api_core.gapic_v1.method import DEFAULT
+
+
+class GarbageCollectionRule(object):
+ """Garbage collection rule for column families within a table.
+
+ Cells in the column family (within a table) fitting the rule will be
+ deleted during garbage collection.
+
+ .. note::
+
+ This class is a do-nothing base class for all GC rules.
+
+ .. note::
+
+ A string ``gc_expression`` can also be used with API requests, but
+ that value would be superceded by a ``gc_rule``. As a result, we
+ don't support that feature and instead support via native classes.
+ """
+
+
+class MaxVersionsGCRule(GarbageCollectionRule):
+ """Garbage collection limiting the number of versions of a cell.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_family_gc_max_versions]
+ :end-before: [END bigtable_api_create_family_gc_max_versions]
+ :dedent: 4
+
+ :type max_num_versions: int
+ :param max_num_versions: The maximum number of versions
+ """
+
+ def __init__(self, max_num_versions):
+ self.max_num_versions = max_num_versions
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.max_num_versions == self.max_num_versions
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the garbage collection rule to a protobuf.
+
+ :rtype: :class:`.table_v2_pb2.GcRule`
+ :returns: The converted current object.
+ """
+ return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions)
+
+
+class MaxAgeGCRule(GarbageCollectionRule):
+ """Garbage collection limiting the age of a cell.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_family_gc_max_age]
+ :end-before: [END bigtable_api_create_family_gc_max_age]
+ :dedent: 4
+
+ :type max_age: :class:`datetime.timedelta`
+ :param max_age: The maximum age allowed for a cell in the table.
+ """
+
+ def __init__(self, max_age):
+ self.max_age = max_age
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.max_age == self.max_age
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the garbage collection rule to a protobuf.
+
+ :rtype: :class:`.table_v2_pb2.GcRule`
+ :returns: The converted current object.
+ """
+ max_age = _helpers._timedelta_to_duration_pb(self.max_age)
+ return table_v2_pb2.GcRule(max_age=max_age)
+
+
+class GCRuleUnion(GarbageCollectionRule):
+ """Union of garbage collection rules.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_family_gc_union]
+ :end-before: [END bigtable_api_create_family_gc_union]
+ :dedent: 4
+
+ :type rules: list
+ :param rules: List of :class:`GarbageCollectionRule`.
+ """
+
+ def __init__(self, rules):
+ self.rules = rules
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.rules == self.rules
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the union into a single GC rule as a protobuf.
+
+ :rtype: :class:`.table_v2_pb2.GcRule`
+ :returns: The converted current object.
+ """
+ union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules])
+ return table_v2_pb2.GcRule(union=union)
+
+
+class GCRuleIntersection(GarbageCollectionRule):
+ """Intersection of garbage collection rules.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_family_gc_intersection]
+ :end-before: [END bigtable_api_create_family_gc_intersection]
+ :dedent: 4
+
+ :type rules: list
+ :param rules: List of :class:`GarbageCollectionRule`.
+ """
+
+ def __init__(self, rules):
+ self.rules = rules
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.rules == self.rules
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the intersection into a single GC rule as a protobuf.
+
+ :rtype: :class:`.table_v2_pb2.GcRule`
+ :returns: The converted current object.
+ """
+ intersection = table_v2_pb2.GcRule.Intersection(
+ rules=[rule.to_pb() for rule in self.rules]
+ )
+ return table_v2_pb2.GcRule(intersection=intersection)
+
+
+class ColumnFamily(object):
+ """Representation of a Google Cloud Bigtable Column Family.
+
+ We can use a :class:`ColumnFamily` to:
+
+ * :meth:`create` itself
+ * :meth:`update` itself
+ * :meth:`delete` itself
+
+ :type column_family_id: str
+ :param column_family_id: The ID of the column family. Must be of the
+ form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type table: :class:`Table `
+ :param table: The table that owns the column family.
+
+ :type gc_rule: :class:`GarbageCollectionRule`
+ :param gc_rule: (Optional) The garbage collection settings for this
+ column family.
+ """
+
+ def __init__(self, column_family_id, table, gc_rule=None):
+ self.column_family_id = column_family_id
+ self._table = table
+ self.gc_rule = gc_rule
+
+ @property
+ def name(self):
+ """Column family name used in requests.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_column_family_name]
+ :end-before: [END bigtable_api_column_family_name]
+ :dedent: 4
+
+ .. note::
+
+ This property will not change if ``column_family_id`` does not, but
+ the return value is not cached.
+
+ The Column family name is of the form
+
+ ``"projects/../zones/../clusters/../tables/../columnFamilies/.."``
+
+ :rtype: str
+ :returns: The column family name.
+ """
+ return self._table.name + "/columnFamilies/" + self.column_family_id
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.column_family_id == self.column_family_id
+ and other._table == self._table
+ and other.gc_rule == self.gc_rule
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the column family to a protobuf.
+
+ :rtype: :class:`.table_v2_pb2.ColumnFamily`
+ :returns: The converted current object.
+ """
+ if self.gc_rule is None:
+ return table_v2_pb2.ColumnFamily()
+ else:
+ return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
+
+ def create(self):
+ """Create this column family.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_column_family]
+ :end-before: [END bigtable_api_create_column_family]
+ :dedent: 4
+
+ """
+ column_family = self.to_pb()
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
+ id=self.column_family_id, create=column_family
+ )
+
+ client = self._table._instance._client
+ # data it contains are the GC rule and the column family ID already
+ # stored on this instance.
+ client.table_admin_client.modify_column_families(
+ request={"name": self._table.name, "modifications": [modification]},
+ timeout=DEFAULT,
+ )
+
+ def update(self):
+ """Update this column family.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_update_column_family]
+ :end-before: [END bigtable_api_update_column_family]
+ :dedent: 4
+
+ .. note::
+
+ Only the GC rule can be updated. By changing the column family ID,
+ you will simply be referring to a different column family.
+ """
+ column_family = self.to_pb()
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
+ id=self.column_family_id, update=column_family
+ )
+
+ client = self._table._instance._client
+ # data it contains are the GC rule and the column family ID already
+ # stored on this instance.
+ client.table_admin_client.modify_column_families(
+ request={"name": self._table.name, "modifications": [modification]},
+ timeout=DEFAULT,
+ )
+
+ def delete(self):
+ """Delete this column family.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_delete_column_family]
+ :end-before: [END bigtable_api_delete_column_family]
+ :dedent: 4
+
+ """
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
+ id=self.column_family_id, drop=True
+ )
+
+ client = self._table._instance._client
+ # data it contains are the GC rule and the column family ID already
+ # stored on this instance.
+ client.table_admin_client.modify_column_families(
+ request={"name": self._table.name, "modifications": [modification]},
+ timeout=DEFAULT,
+ )
+
+
+def _gc_rule_from_pb(gc_rule_pb):
+ """Convert a protobuf GC rule to a native object.
+
+ :type gc_rule_pb: :class:`.table_v2_pb2.GcRule`
+ :param gc_rule_pb: The GC rule to convert.
+
+ :rtype: :class:`GarbageCollectionRule` or :data:`NoneType `
+ :returns: An instance of one of the native rules defined
+ in :module:`column_family` or :data:`None` if no values were
+ set on the protobuf passed in.
+ :raises: :class:`ValueError ` if the rule name
+ is unexpected.
+ """
+ rule_name = gc_rule_pb._pb.WhichOneof("rule")
+ if rule_name is None:
+ return None
+
+ if rule_name == "max_num_versions":
+ return MaxVersionsGCRule(gc_rule_pb.max_num_versions)
+ elif rule_name == "max_age":
+ return MaxAgeGCRule(gc_rule_pb.max_age)
+ elif rule_name == "union":
+ return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules])
+ elif rule_name == "intersection":
+ rules = [_gc_rule_from_pb(rule) for rule in gc_rule_pb.intersection.rules]
+ return GCRuleIntersection(rules)
+ else:
+ raise ValueError("Unexpected rule name", rule_name)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst
new file mode 100644
index 000000000000..8142cc34d9c8
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst
@@ -0,0 +1,9 @@
+Async Data Client
+=================
+
+Synchronous API surface and usage examples coming soon
+
+Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com,
+or through the Github `issue tracker`_.
+
+.. _issue tracker: https://github.com/googleapis/python-bigtable/issues
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py
new file mode 100644
index 000000000000..9439f0f8d19b
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.cloud.bigtable import gapic_version as package_version
+
+from google.cloud.bigtable.data._async.client import BigtableDataClientAsync
+from google.cloud.bigtable.data._async.client import TableAsync
+from google.cloud.bigtable.data._async.client import AuthorizedViewAsync
+from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync
+from google.cloud.bigtable.data._sync_autogen.client import BigtableDataClient
+from google.cloud.bigtable.data._sync_autogen.client import Table
+from google.cloud.bigtable.data._sync_autogen.client import AuthorizedView
+from google.cloud.bigtable.data._sync_autogen.mutations_batcher import MutationsBatcher
+
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+from google.cloud.bigtable.data.read_rows_query import RowRange
+from google.cloud.bigtable.data.row import Row
+from google.cloud.bigtable.data.row import Cell
+
+from google.cloud.bigtable.data.mutations import Mutation
+from google.cloud.bigtable.data.mutations import RowMutationEntry
+from google.cloud.bigtable.data.mutations import SetCell
+from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn
+from google.cloud.bigtable.data.mutations import DeleteAllFromFamily
+from google.cloud.bigtable.data.mutations import DeleteAllFromRow
+
+from google.cloud.bigtable.data.exceptions import InvalidChunk
+from google.cloud.bigtable.data.exceptions import FailedMutationEntryError
+from google.cloud.bigtable.data.exceptions import FailedQueryShardError
+
+from google.cloud.bigtable.data.exceptions import RetryExceptionGroup
+from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup
+from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup
+from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed
+
+from google.cloud.bigtable.data._helpers import TABLE_DEFAULT
+from google.cloud.bigtable.data._helpers import RowKeySamples
+from google.cloud.bigtable.data._helpers import ShardedQuery
+
+# setup custom CrossSync mappings for library
+from google.cloud.bigtable_v2.services.bigtable.async_client import (
+ BigtableAsyncClient,
+)
+from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync
+from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync
+
+from google.cloud.bigtable_v2.services.bigtable.client import (
+ BigtableClient,
+)
+from google.cloud.bigtable.data._sync_autogen._read_rows import _ReadRowsOperation
+from google.cloud.bigtable.data._sync_autogen._mutate_rows import _MutateRowsOperation
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+CrossSync.add_mapping("GapicClient", BigtableAsyncClient)
+CrossSync._Sync_Impl.add_mapping("GapicClient", BigtableClient)
+CrossSync.add_mapping("_ReadRowsOperation", _ReadRowsOperationAsync)
+CrossSync._Sync_Impl.add_mapping("_ReadRowsOperation", _ReadRowsOperation)
+CrossSync.add_mapping("_MutateRowsOperation", _MutateRowsOperationAsync)
+CrossSync._Sync_Impl.add_mapping("_MutateRowsOperation", _MutateRowsOperation)
+CrossSync.add_mapping("MutationsBatcher", MutationsBatcherAsync)
+CrossSync._Sync_Impl.add_mapping("MutationsBatcher", MutationsBatcher)
+
+__version__: str = package_version.__version__
+
+__all__ = (
+ "BigtableDataClientAsync",
+ "TableAsync",
+ "AuthorizedViewAsync",
+ "MutationsBatcherAsync",
+ "BigtableDataClient",
+ "Table",
+ "AuthorizedView",
+ "MutationsBatcher",
+ "RowKeySamples",
+ "ReadRowsQuery",
+ "RowRange",
+ "Mutation",
+ "RowMutationEntry",
+ "SetCell",
+ "DeleteRangeFromColumn",
+ "DeleteAllFromFamily",
+ "DeleteAllFromRow",
+ "Row",
+ "Cell",
+ "InvalidChunk",
+ "FailedMutationEntryError",
+ "FailedQueryShardError",
+ "RetryExceptionGroup",
+ "MutationsExceptionGroup",
+ "ShardedReadRowsExceptionGroup",
+ "ParameterTypeInferenceFailed",
+ "ShardedQuery",
+ "TABLE_DEFAULT",
+)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py
new file mode 100644
index 000000000000..e13c9acb7c0e
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.bigtable.data._async.client import BigtableDataClientAsync
+from google.cloud.bigtable.data._async.client import TableAsync
+
+from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync
+
+
+__all__ = [
+ "BigtableDataClientAsync",
+ "TableAsync",
+ "MutationsBatcherAsync",
+]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py
new file mode 100644
index 000000000000..8e6833bcafee
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py
@@ -0,0 +1,229 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import Sequence, TYPE_CHECKING
+
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry as retries
+import google.cloud.bigtable_v2.types.bigtable as types_pb
+import google.cloud.bigtable.data.exceptions as bt_exceptions
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+
+# mutate_rows requests are limited to this number of mutations
+from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT
+from google.cloud.bigtable.data.mutations import _EntryWithProto
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data.mutations import RowMutationEntry
+
+ if CrossSync.is_async:
+ from google.cloud.bigtable_v2.services.bigtable.async_client import (
+ BigtableAsyncClient as GapicClientType,
+ )
+ from google.cloud.bigtable.data._async.client import ( # type: ignore
+ _DataApiTargetAsync as TargetType,
+ )
+ else:
+ from google.cloud.bigtable_v2.services.bigtable.client import ( # type: ignore
+ BigtableClient as GapicClientType,
+ )
+ from google.cloud.bigtable.data._sync_autogen.client import ( # type: ignore
+ _DataApiTarget as TargetType,
+ )
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._mutate_rows"
+
+
+@CrossSync.convert_class("_MutateRowsOperation")
+class _MutateRowsOperationAsync:
+ """
+ MutateRowsOperation manages the logic of sending a set of row mutations,
+ and retrying on failed entries. It manages this using the _run_attempt
+ function, which attempts to mutate all outstanding entries, and raises
+ _MutateRowsIncomplete if any retryable errors are encountered.
+
+ Errors are exposed as a MutationsExceptionGroup, which contains a list of
+ exceptions organized by the related failed mutation entries.
+
+ Args:
+ gapic_client: the client to use for the mutate_rows call
+ target: the table or view associated with the request
+ mutation_entries: a list of RowMutationEntry objects to send to the server
+ operation_timeout: the timeout to use for the entire operation, in seconds.
+ attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds.
+ If not specified, the request will run until operation_timeout is reached.
+ """
+
+ @CrossSync.convert
+ def __init__(
+ self,
+ gapic_client: GapicClientType,
+ target: TargetType,
+ mutation_entries: list["RowMutationEntry"],
+ operation_timeout: float,
+ attempt_timeout: float | None,
+ retryable_exceptions: Sequence[type[Exception]] = (),
+ ):
+ # check that mutations are within limits
+ total_mutations = sum(len(entry.mutations) for entry in mutation_entries)
+ if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT:
+ raise ValueError(
+ "mutate_rows requests can contain at most "
+ f"{_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across "
+ f"all entries. Found {total_mutations}."
+ )
+ self._target = target
+ self._gapic_fn = gapic_client.mutate_rows
+ # create predicate for determining which errors are retryable
+ self.is_retryable = retries.if_exception_type(
+ # RPC level errors
+ *retryable_exceptions,
+ # Entry level errors
+ bt_exceptions._MutateRowsIncomplete,
+ )
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+ self._operation = lambda: CrossSync.retry_target(
+ self._run_attempt,
+ self.is_retryable,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+ # initialize state
+ self.timeout_generator = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries]
+ self.remaining_indices = list(range(len(self.mutations)))
+ self.errors: dict[int, list[Exception]] = {}
+
+ @CrossSync.convert
+ async def start(self):
+ """
+ Start the operation, and run until completion
+
+ Raises:
+ MutationsExceptionGroup: if any mutations failed
+ """
+ try:
+ # trigger mutate_rows
+ await self._operation()
+ except Exception as exc:
+ # exceptions raised by retryable are added to the list of exceptions for all unfinalized mutations
+ incomplete_indices = self.remaining_indices.copy()
+ for idx in incomplete_indices:
+ self._handle_entry_error(idx, exc)
+ finally:
+ # raise exception detailing incomplete mutations
+ all_errors: list[Exception] = []
+ for idx, exc_list in self.errors.items():
+ if len(exc_list) == 0:
+ raise core_exceptions.ClientError(
+ f"Mutation {idx} failed with no associated errors"
+ )
+ elif len(exc_list) == 1:
+ cause_exc = exc_list[0]
+ else:
+ cause_exc = bt_exceptions.RetryExceptionGroup(exc_list)
+ entry = self.mutations[idx].entry
+ all_errors.append(
+ bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc)
+ )
+ if all_errors:
+ raise bt_exceptions.MutationsExceptionGroup(
+ all_errors, len(self.mutations)
+ )
+
+ @CrossSync.convert
+ async def _run_attempt(self):
+ """
+ Run a single attempt of the mutate_rows rpc.
+
+ Raises:
+ _MutateRowsIncomplete: if there are failed mutations eligible for
+ retry after the attempt is complete
+ GoogleAPICallError: if the gapic rpc fails
+ """
+ request_entries = [self.mutations[idx].proto for idx in self.remaining_indices]
+ # track mutations in this request that have not been finalized yet
+ active_request_indices = {
+ req_idx: orig_idx for req_idx, orig_idx in enumerate(self.remaining_indices)
+ }
+ self.remaining_indices = []
+ if not request_entries:
+ # no more mutations. return early
+ return
+ # make gapic request
+ try:
+ result_generator = await self._gapic_fn(
+ request=types_pb.MutateRowsRequest(
+ entries=request_entries,
+ app_profile_id=self._target.app_profile_id,
+ **self._target._request_path,
+ ),
+ timeout=next(self.timeout_generator),
+ retry=None,
+ )
+ async for result_list in result_generator:
+ for result in result_list.entries:
+ # convert sub-request index to global index
+ orig_idx = active_request_indices[result.index]
+ entry_error = core_exceptions.from_grpc_status(
+ result.status.code,
+ result.status.message,
+ details=result.status.details,
+ )
+ if result.status.code != 0:
+ # mutation failed; update error list (and remaining_indices if retryable)
+ self._handle_entry_error(orig_idx, entry_error)
+ elif orig_idx in self.errors:
+ # mutation succeeded; remove from error list
+ del self.errors[orig_idx]
+ # remove processed entry from active list
+ del active_request_indices[result.index]
+ except Exception as exc:
+ # add this exception to list for each mutation that wasn't
+ # already handled, and update remaining_indices if mutation is retryable
+ for idx in active_request_indices.values():
+ self._handle_entry_error(idx, exc)
+ # bubble up exception to be handled by retry wrapper
+ raise
+ # check if attempt succeeded, or needs to be retried
+ if self.remaining_indices:
+ # unfinished work; raise exception to trigger retry
+ raise bt_exceptions._MutateRowsIncomplete
+
+ def _handle_entry_error(self, idx: int, exc: Exception):
+ """
+ Add an exception to the list of exceptions for a given mutation index,
+ and add the index to the list of remaining indices if the exception is
+ retryable.
+
+ Args:
+ idx: the index of the mutation that failed
+ exc: the exception to add to the list
+ """
+ entry = self.mutations[idx].entry
+ self.errors.setdefault(idx, []).append(exc)
+ if (
+ entry.is_idempotent()
+ and self.is_retryable(exc)
+ and idx not in self.remaining_indices
+ ):
+ self.remaining_indices.append(idx)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py
new file mode 100644
index 000000000000..8787bfa71411
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py
@@ -0,0 +1,365 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import annotations
+
+from typing import Sequence, TYPE_CHECKING
+
+from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB
+from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB
+from google.cloud.bigtable_v2.types import RowSet as RowSetPB
+from google.cloud.bigtable_v2.types import RowRange as RowRangePB
+
+from google.cloud.bigtable.data.row import Row, Cell
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+from google.cloud.bigtable.data.exceptions import InvalidChunk
+from google.cloud.bigtable.data.exceptions import _RowSetComplete
+from google.cloud.bigtable.data.exceptions import _ResetRow
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+
+from google.api_core import retry as retries
+from google.api_core.retry import exponential_sleep_generator
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ if CrossSync.is_async:
+ from google.cloud.bigtable.data._async.client import (
+ _DataApiTargetAsync as TargetType,
+ )
+ else:
+ from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._read_rows"
+
+
+@CrossSync.convert_class("_ReadRowsOperation")
+class _ReadRowsOperationAsync:
+ """
+ ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream
+ into a stream of Row objects.
+
+ ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse
+ and turns them into a stream of Row objects using an internal
+ StateMachine.
+
+ ReadRowsOperation(request, client) handles row merging logic end-to-end, including
+ performing retries on stream errors.
+
+ Args:
+ query: The query to execute
+ target: The table or view to send the request to
+ operation_timeout: The total time to allow for the operation, in seconds
+ attempt_timeout: The time to allow for each individual attempt, in seconds
+ retryable_exceptions: A list of exceptions that should trigger a retry
+ """
+
+ __slots__ = (
+ "attempt_timeout_gen",
+ "operation_timeout",
+ "request",
+ "target",
+ "_predicate",
+ "_last_yielded_row_key",
+ "_remaining_count",
+ )
+
+ def __init__(
+ self,
+ query: ReadRowsQuery,
+ target: TargetType,
+ operation_timeout: float,
+ attempt_timeout: float,
+ retryable_exceptions: Sequence[type[Exception]] = (),
+ ):
+ self.attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self.operation_timeout = operation_timeout
+ if isinstance(query, dict):
+ self.request = ReadRowsRequestPB(
+ **query,
+ **target._request_path,
+ app_profile_id=target.app_profile_id,
+ )
+ else:
+ self.request = query._to_pb(target)
+ self.target = target
+ self._predicate = retries.if_exception_type(*retryable_exceptions)
+ self._last_yielded_row_key: bytes | None = None
+ self._remaining_count: int | None = self.request.rows_limit or None
+
+ def start_operation(self) -> CrossSync.Iterable[Row]:
+ """
+ Start the read_rows operation, retrying on retryable errors.
+
+ Yields:
+ Row: The next row in the stream
+ """
+ return CrossSync.retry_target_stream(
+ self._read_rows_attempt,
+ self._predicate,
+ exponential_sleep_generator(0.01, 60, multiplier=2),
+ self.operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ def _read_rows_attempt(self) -> CrossSync.Iterable[Row]:
+ """
+ Attempt a single read_rows rpc call.
+ This function is intended to be wrapped by retry logic,
+ which will call this function until it succeeds or
+ a non-retryable error is raised.
+
+ Yields:
+ Row: The next row in the stream
+ """
+ # revise request keys and ranges between attempts
+ if self._last_yielded_row_key is not None:
+ # if this is a retry, try to trim down the request to avoid ones we've already processed
+ try:
+ self.request.rows = self._revise_request_rowset(
+ row_set=self.request.rows,
+ last_seen_row_key=self._last_yielded_row_key,
+ )
+ except _RowSetComplete:
+ # if we've already seen all the rows, we're done
+ return self.merge_rows(None)
+ # revise the limit based on number of rows already yielded
+ if self._remaining_count is not None:
+ self.request.rows_limit = self._remaining_count
+ if self._remaining_count == 0:
+ return self.merge_rows(None)
+ # create and return a new row merger
+ gapic_stream = self.target.client._gapic_client.read_rows(
+ self.request,
+ timeout=next(self.attempt_timeout_gen),
+ retry=None,
+ )
+ chunked_stream = self.chunk_stream(gapic_stream)
+ return self.merge_rows(chunked_stream)
+
+ @CrossSync.convert()
+ async def chunk_stream(
+ self, stream: CrossSync.Awaitable[CrossSync.Iterable[ReadRowsResponsePB]]
+ ) -> CrossSync.Iterable[ReadRowsResponsePB.CellChunk]:
+ """
+ process chunks out of raw read_rows stream
+
+ Args:
+ stream: the raw read_rows stream from the gapic client
+ Yields:
+ ReadRowsResponsePB.CellChunk: the next chunk in the stream
+ """
+ async for resp in await stream:
+ # extract proto from proto-plus wrapper
+ resp = resp._pb
+
+ # handle last_scanned_row_key packets, sent when server
+ # has scanned past the end of the row range
+ if resp.last_scanned_row_key:
+ if (
+ self._last_yielded_row_key is not None
+ and resp.last_scanned_row_key <= self._last_yielded_row_key
+ ):
+ raise InvalidChunk("last scanned out of order")
+ self._last_yielded_row_key = resp.last_scanned_row_key
+
+ current_key = None
+ # process each chunk in the response
+ for c in resp.chunks:
+ if current_key is None:
+ current_key = c.row_key
+ if current_key is None:
+ raise InvalidChunk("first chunk is missing a row key")
+ elif (
+ self._last_yielded_row_key
+ and current_key <= self._last_yielded_row_key
+ ):
+ raise InvalidChunk("row keys should be strictly increasing")
+
+ yield c
+
+ if c.reset_row:
+ current_key = None
+ elif c.commit_row:
+ # update row state after each commit
+ self._last_yielded_row_key = current_key
+ if self._remaining_count is not None:
+ self._remaining_count -= 1
+ if self._remaining_count < 0:
+ raise InvalidChunk("emit count exceeds row limit")
+ current_key = None
+
+ @staticmethod
+ @CrossSync.convert(
+ replace_symbols={"__aiter__": "__iter__", "__anext__": "__next__"},
+ )
+ async def merge_rows(
+ chunks: CrossSync.Iterable[ReadRowsResponsePB.CellChunk] | None,
+ ) -> CrossSync.Iterable[Row]:
+ """
+ Merge chunks into rows
+
+ Args:
+ chunks: the chunk stream to merge
+ Yields:
+ Row: the next row in the stream
+ """
+ if chunks is None:
+ return
+ it = chunks.__aiter__()
+ # For each row
+ while True:
+ try:
+ c = await it.__anext__()
+ except CrossSync.StopIteration:
+ # stream complete
+ return
+ row_key = c.row_key
+
+ if not row_key:
+ raise InvalidChunk("first row chunk is missing key")
+
+ cells = []
+
+ # shared per cell storage
+ family: str | None = None
+ qualifier: bytes | None = None
+
+ try:
+ # for each cell
+ while True:
+ if c.reset_row:
+ raise _ResetRow(c)
+ k = c.row_key
+ f = c.family_name.value
+ q = c.qualifier.value if c.HasField("qualifier") else None
+ if k and k != row_key:
+ raise InvalidChunk("unexpected new row key")
+ if f:
+ family = f
+ if q is not None:
+ qualifier = q
+ else:
+ raise InvalidChunk("new family without qualifier")
+ elif family is None:
+ raise InvalidChunk("missing family")
+ elif q is not None:
+ if family is None:
+ raise InvalidChunk("new qualifier without family")
+ qualifier = q
+ elif qualifier is None:
+ raise InvalidChunk("missing qualifier")
+
+ ts = c.timestamp_micros
+ labels = c.labels if c.labels else []
+ value = c.value
+
+ # merge split cells
+ if c.value_size > 0:
+ buffer = [value]
+ while c.value_size > 0:
+ # throws when premature end
+ c = await it.__anext__()
+
+ t = c.timestamp_micros
+ cl = c.labels
+ k = c.row_key
+ if (
+ c.HasField("family_name")
+ and c.family_name.value != family
+ ):
+ raise InvalidChunk("family changed mid cell")
+ if (
+ c.HasField("qualifier")
+ and c.qualifier.value != qualifier
+ ):
+ raise InvalidChunk("qualifier changed mid cell")
+ if t and t != ts:
+ raise InvalidChunk("timestamp changed mid cell")
+ if cl and cl != labels:
+ raise InvalidChunk("labels changed mid cell")
+ if k and k != row_key:
+ raise InvalidChunk("row key changed mid cell")
+
+ if c.reset_row:
+ raise _ResetRow(c)
+ buffer.append(c.value)
+ value = b"".join(buffer)
+ cells.append(
+ Cell(value, row_key, family, qualifier, ts, list(labels))
+ )
+ if c.commit_row:
+ yield Row(row_key, cells)
+ break
+ c = await it.__anext__()
+ except _ResetRow as e:
+ c = e.chunk
+ if (
+ c.row_key
+ or c.HasField("family_name")
+ or c.HasField("qualifier")
+ or c.timestamp_micros
+ or c.labels
+ or c.value
+ ):
+ raise InvalidChunk("reset row with data")
+ continue
+ except CrossSync.StopIteration:
+ raise InvalidChunk("premature end of stream")
+
+ @staticmethod
+ def _revise_request_rowset(
+ row_set: RowSetPB,
+ last_seen_row_key: bytes,
+ ) -> RowSetPB:
+ """
+ Revise the rows in the request to avoid ones we've already processed.
+
+ Args:
+ row_set: the row set from the request
+ last_seen_row_key: the last row key encountered
+ Returns:
+ RowSetPB: the new rowset after adusting for the last seen key
+ Raises:
+ _RowSetComplete: if there are no rows left to process after the revision
+ """
+ # if user is doing a whole table scan, start a new one with the last seen key
+ if row_set is None or (not row_set.row_ranges and not row_set.row_keys):
+ last_seen = last_seen_row_key
+ return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)])
+ # remove seen keys from user-specific key list
+ adjusted_keys: list[bytes] = [
+ k for k in row_set.row_keys if k > last_seen_row_key
+ ]
+ # adjust ranges to ignore keys before last seen
+ adjusted_ranges: list[RowRangePB] = []
+ for row_range in row_set.row_ranges:
+ end_key = row_range.end_key_closed or row_range.end_key_open or None
+ if end_key is None or end_key > last_seen_row_key:
+ # end range is after last seen key
+ new_range = RowRangePB(row_range)
+ start_key = row_range.start_key_closed or row_range.start_key_open
+ if start_key is None or start_key <= last_seen_row_key:
+ # replace start key with last seen
+ new_range.start_key_open = last_seen_row_key
+ adjusted_ranges.append(new_range)
+ if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0:
+ # if the query is empty after revision, raise an exception
+ # this will avoid an unwanted full table scan
+ raise _RowSetComplete()
+ return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py
new file mode 100644
index 000000000000..bbc9a0d47ec1
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py
@@ -0,0 +1,139 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import Callable
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+from grpc import ChannelConnectivity
+
+if CrossSync.is_async:
+ from grpc.aio import Channel
+else:
+ from grpc import Channel
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._swappable_channel"
+
+
+@CrossSync.convert_class(sync_name="_WrappedChannel", rm_aio=True)
+class _AsyncWrappedChannel(Channel):
+ """
+ A wrapper around a gRPC channel. All methods are passed
+ through to the underlying channel.
+ """
+
+ def __init__(self, channel: Channel):
+ self._channel = channel
+
+ def unary_unary(self, *args, **kwargs):
+ return self._channel.unary_unary(*args, **kwargs)
+
+ def unary_stream(self, *args, **kwargs):
+ return self._channel.unary_stream(*args, **kwargs)
+
+ def stream_unary(self, *args, **kwargs):
+ return self._channel.stream_unary(*args, **kwargs)
+
+ def stream_stream(self, *args, **kwargs):
+ return self._channel.stream_stream(*args, **kwargs)
+
+ async def channel_ready(self):
+ return await self._channel.channel_ready()
+
+ @CrossSync.convert(
+ sync_name="__enter__", replace_symbols={"__aenter__": "__enter__"}
+ )
+ async def __aenter__(self):
+ await self._channel.__aenter__()
+ return self
+
+ @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"})
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ return await self._channel.__aexit__(exc_type, exc_val, exc_tb)
+
+ def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity:
+ return self._channel.get_state(try_to_connect=try_to_connect)
+
+ async def wait_for_state_change(self, last_observed_state):
+ return await self._channel.wait_for_state_change(last_observed_state)
+
+ def __getattr__(self, name):
+ return getattr(self._channel, name)
+
+ async def close(self, grace=None):
+ if CrossSync.is_async:
+ return await self._channel.close(grace=grace)
+ else:
+ # grace not supported by sync version
+ return self._channel.close()
+
+ if not CrossSync.is_async:
+ # add required sync methods
+
+ def subscribe(self, callback, try_to_connect=False):
+ return self._channel.subscribe(callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ return self._channel.unsubscribe(callback)
+
+
+@CrossSync.convert_class(
+ sync_name="SwappableChannel",
+ replace_symbols={"_AsyncWrappedChannel": "_WrappedChannel"},
+)
+class AsyncSwappableChannel(_AsyncWrappedChannel):
+ """
+ Provides a grpc channel wrapper, that allows the internal channel to be swapped out
+
+ Args:
+ - channel_fn: a nullary function that returns a new channel instance.
+ It should be a partial with all channel configuration arguments built-in
+ """
+
+ def __init__(self, channel_fn: Callable[[], Channel]):
+ self._channel_fn = channel_fn
+ self._channel = channel_fn()
+
+ def create_channel(self) -> Channel:
+ """
+ Create a fresh channel using the stored `channel_fn` partial
+ """
+ new_channel = self._channel_fn()
+ if CrossSync.is_async:
+ # copy over interceptors
+ # this is needed because of how gapic attaches the LoggingClientAIOInterceptor
+ # sync channels add interceptors by wrapping, so this step isn't needed
+ new_channel._unary_unary_interceptors = (
+ self._channel._unary_unary_interceptors
+ )
+ new_channel._unary_stream_interceptors = (
+ self._channel._unary_stream_interceptors
+ )
+ new_channel._stream_unary_interceptors = (
+ self._channel._stream_unary_interceptors
+ )
+ new_channel._stream_stream_interceptors = (
+ self._channel._stream_stream_interceptors
+ )
+ return new_channel
+
+ def swap_channel(self, new_channel: Channel) -> Channel:
+ """
+ Replace the wrapped channel with a new instance. Typically created using `create_channel`
+ """
+ old_channel = self._channel
+ self._channel = new_channel
+ return old_channel
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py
new file mode 100644
index 000000000000..1c98f56abc9e
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py
@@ -0,0 +1,1885 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import annotations
+
+from typing import (
+ cast,
+ Any,
+ AsyncIterable,
+ Callable,
+ Optional,
+ Set,
+ Sequence,
+ TYPE_CHECKING,
+)
+
+import abc
+import time
+import warnings
+import random
+import os
+import concurrent.futures
+
+from functools import partial
+from grpc import Channel
+
+from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType
+from google.cloud.bigtable.data.execute_query.metadata import (
+ SqlType,
+ _pb_metadata_to_metadata_types,
+)
+from google.cloud.bigtable.data.execute_query._parameters_formatting import (
+ _format_execute_query_params,
+ _to_param_types,
+)
+from google.cloud.bigtable_v2.services.bigtable.transports.base import (
+ DEFAULT_CLIENT_INFO,
+)
+from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest
+from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest
+from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest
+from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest
+from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest
+from google.cloud.client import ClientWithProject
+from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore
+from google.api_core import retry as retries
+from google.api_core.exceptions import DeadlineExceeded
+from google.api_core.exceptions import ServiceUnavailable
+from google.api_core.exceptions import Aborted
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+
+import google.auth.credentials
+import google.auth._default
+from google.api_core import client_options as client_options_lib
+from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+from google.cloud.bigtable.data.row import Row
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+from google.cloud.bigtable.data.exceptions import FailedQueryShardError
+from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup
+
+from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts
+from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+from google.cloud.bigtable.data._helpers import _validate_timeouts
+from google.cloud.bigtable.data._helpers import _get_error_type
+from google.cloud.bigtable.data._helpers import _get_retryable_errors
+from google.cloud.bigtable.data._helpers import _get_timeouts
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry
+
+from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule
+from google.cloud.bigtable.data.row_filters import RowFilter
+from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter
+from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter
+from google.cloud.bigtable.data.row_filters import RowFilterChain
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if CrossSync.is_async:
+ from grpc.aio import insecure_channel
+ from google.cloud.bigtable_v2.services.bigtable.transports import (
+ BigtableGrpcAsyncIOTransport as TransportType,
+ )
+ from google.cloud.bigtable_v2.services.bigtable import (
+ BigtableAsyncClient as GapicClient,
+ )
+ from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE
+ from google.cloud.bigtable.data._async._swappable_channel import (
+ AsyncSwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor as MetricsInterceptorType,
+ )
+else:
+ from typing import Iterable # noqa: F401
+ from grpc import insecure_channel
+ from grpc import intercept_channel
+ from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore
+ from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
+ from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401
+ SwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401
+ BigtableMetricsInterceptor as MetricsInterceptorType,
+ )
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data._helpers import RowKeySamples
+ from google.cloud.bigtable.data._helpers import ShardedQuery
+
+ if CrossSync.is_async:
+ from google.cloud.bigtable.data._async.mutations_batcher import (
+ MutationsBatcherAsync,
+ )
+ from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import (
+ ExecuteQueryIteratorAsync,
+ )
+ else:
+ from google.cloud.bigtable.data._sync_autogen.mutations_batcher import ( # noqa: F401
+ MutationsBatcher,
+ )
+ from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( # noqa: F401
+ ExecuteQueryIterator,
+ )
+
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.client"
+
+
+@CrossSync.convert_class(
+ sync_name="BigtableDataClient",
+ add_mapping_for_name="DataClient",
+)
+class BigtableDataClientAsync(ClientWithProject):
+ @CrossSync.convert(
+ docstring_format_vars={
+ "LOOP_MESSAGE": (
+ "Client should be created within an async context (running event loop)",
+ None,
+ ),
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if called outside of an async context (no running event loop)",
+ None,
+ ),
+ }
+ )
+ def __init__(
+ self,
+ *,
+ project: str | None = None,
+ credentials: google.auth.credentials.Credentials | None = None,
+ client_options: dict[str, Any]
+ | "google.api_core.client_options.ClientOptions"
+ | None = None,
+ **kwargs,
+ ):
+ """
+ Create a client instance for the Bigtable Data API
+
+ {LOOP_MESSAGE}
+
+ Args:
+ project: the project which the client acts on behalf of.
+ If not passed, falls back to the default inferred
+ from the environment.
+ credentials:
+ Thehe OAuth2 Credentials to use for this
+ client. If not passed (and if no ``_http`` object is
+ passed), falls back to the default inferred from the
+ environment.
+ client_options:
+ Client options used to set user options
+ on the client. API Endpoint should be set through client_options.
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ if "pool_size" in kwargs:
+ warnings.warn("pool_size no longer supported")
+ # set up client info headers for veneer library
+ self.client_info = DEFAULT_CLIENT_INFO
+ self.client_info.client_library_version = self._client_version()
+ # parse client options
+ if type(client_options) is dict:
+ client_options = client_options_lib.from_dict(client_options)
+ client_options = cast(
+ Optional[client_options_lib.ClientOptions], client_options
+ )
+ self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
+ if self._emulator_host is not None:
+ warnings.warn(
+ "Connecting to Bigtable emulator at {}".format(self._emulator_host),
+ RuntimeWarning,
+ stacklevel=2,
+ )
+ # use insecure channel if emulator is set
+ if credentials is None:
+ credentials = google.auth.credentials.AnonymousCredentials()
+ if project is None:
+ project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ self._metrics_interceptor = MetricsInterceptorType()
+ # initialize client
+ ClientWithProject.__init__(
+ self,
+ credentials=credentials,
+ project=project,
+ client_options=client_options,
+ )
+ self._gapic_client = GapicClient(
+ credentials=credentials,
+ client_options=client_options,
+ client_info=self.client_info,
+ transport=lambda *args, **kwargs: TransportType(
+ *args, **kwargs, channel=self._build_grpc_channel
+ ),
+ )
+ if (
+ credentials
+ and credentials.universe_domain != self.universe_domain
+ and self._emulator_host is None
+ ):
+ # validate that the universe domain of the credentials matches the
+ # universe domain configured in client_options
+ raise ValueError(
+ f"The configured universe domain ({self.universe_domain}) does "
+ "not match the universe domain found in the credentials "
+ f"({self._credentials.universe_domain}). If you haven't "
+ "configured the universe domain explicitly, `googleapis.com` "
+ "is the default."
+ )
+ self._is_closed = CrossSync.Event()
+ self.transport = cast(TransportType, self._gapic_client.transport)
+ # keep track of active instances to for warmup on channel refresh
+ self._active_instances: Set[_WarmedInstanceKey] = set()
+ # keep track of _DataApiTarget objects associated with each instance
+ # only remove instance from _active_instances when all associated targets are closed
+ self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {}
+ self._channel_init_time = time.monotonic()
+ self._channel_refresh_task: CrossSync.Task[None] | None = None
+ self._executor: concurrent.futures.ThreadPoolExecutor | None = (
+ concurrent.futures.ThreadPoolExecutor() if not CrossSync.is_async else None
+ )
+ if self._emulator_host is None:
+ # attempt to start background channel refresh tasks
+ try:
+ self._start_background_channel_refresh()
+ except RuntimeError:
+ warnings.warn(
+ f"{self.__class__.__name__} should be started in an "
+ "asyncio event loop. Channel refresh will not be started",
+ RuntimeWarning,
+ stacklevel=2,
+ )
+
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
+ """
+ This method is called by the gapic transport to create a grpc channel.
+
+ The init arguments passed down are captured in a partial used by SwappableChannel
+ to create new channel instances in the future, as part of the channel refresh logic
+
+ Emulators always use an inseucre channel
+
+ Args:
+ - *args: positional arguments passed by the gapic layer to create a new channel with
+ - **kwargs: keyword arguments passed by the gapic layer to create a new channel with
+ Returns:
+ a custom wrapped swappable channel
+ """
+ create_channel_fn: Callable[[], Channel]
+ if self._emulator_host is not None:
+ # Emulators use insecure channels
+ create_channel_fn = partial(insecure_channel, self._emulator_host)
+ elif CrossSync.is_async:
+ # For async client, use the default create_channel.
+ create_channel_fn = partial(TransportType.create_channel, *args, **kwargs)
+ else:
+ # For sync client, wrap create_channel with interceptors.
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+
+ # Instantiate SwappableChannelType with the determined creation function.
+ new_channel = SwappableChannelType(create_channel_fn)
+ if CrossSync.is_async:
+ # Attach async interceptors to the channel instance itself.
+ new_channel._unary_unary_interceptors.append(self._metrics_interceptor)
+ new_channel._unary_stream_interceptors.append(self._metrics_interceptor)
+ return new_channel
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance.
+ """
+ return self._gapic_client.universe_domain
+
+ @property
+ def api_endpoint(self) -> str:
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._gapic_client.api_endpoint
+
+ @staticmethod
+ def _client_version() -> str:
+ """
+ Helper function to return the client version string for this client
+ """
+ version_str = f"{google.cloud.bigtable.__version__}-data"
+ if CrossSync.is_async:
+ version_str += "-async"
+ return version_str
+
+ @CrossSync.convert(
+ docstring_format_vars={
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if not called in an asyncio event loop",
+ "None",
+ )
+ }
+ )
+ def _start_background_channel_refresh(self) -> None:
+ """
+ Starts a background task to ping and warm grpc channel
+
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ if (
+ not self._channel_refresh_task
+ and not self._emulator_host
+ and not self._is_closed.is_set()
+ ):
+ # raise error if not in an event loop in async client
+ CrossSync.verify_async_event_loop()
+ self._channel_refresh_task = CrossSync.create_task(
+ self._manage_channel,
+ sync_executor=self._executor,
+ task_name=f"{self.__class__.__name__} channel refresh",
+ )
+
+ @CrossSync.convert
+ async def close(self, timeout: float | None = 2.0):
+ """
+ Cancel all background tasks
+ """
+ self._is_closed.set()
+ if self._channel_refresh_task is not None:
+ self._channel_refresh_task.cancel()
+ await CrossSync.wait([self._channel_refresh_task], timeout=timeout)
+ await self.transport.close()
+ if self._executor:
+ self._executor.shutdown(wait=False)
+ self._channel_refresh_task = None
+
+ @CrossSync.convert
+ async def _ping_and_warm_instances(
+ self,
+ instance_key: _WarmedInstanceKey | None = None,
+ channel: Channel | None = None,
+ ) -> list[BaseException | None]:
+ """
+ Prepares the backend for requests on a channel
+
+ Pings each Bigtable instance registered in `_active_instances` on the client
+
+ Args:
+ instance_key: if provided, only warm the instance associated with the key
+ channel: grpc channel to warm. If none, warms `self.transport.grpc_channel`
+ Returns:
+ list[BaseException | None]: sequence of results or exceptions from the ping requests
+ """
+ channel = channel or self.transport.grpc_channel
+ instance_list = (
+ [instance_key] if instance_key is not None else self._active_instances
+ )
+ ping_rpc = channel.unary_unary(
+ "/google.bigtable.v2.Bigtable/PingAndWarm",
+ request_serializer=PingAndWarmRequest.serialize,
+ )
+ # prepare list of coroutines to run
+ partial_list = [
+ partial(
+ ping_rpc,
+ request={"name": instance_name, "app_profile_id": app_profile_id},
+ metadata=[
+ (
+ "x-goog-request-params",
+ f"name={instance_name}&app_profile_id={app_profile_id}",
+ )
+ ],
+ wait_for_ready=True,
+ )
+ for (instance_name, app_profile_id) in instance_list
+ ]
+ result_list = await CrossSync.gather_partials(
+ partial_list, return_exceptions=True, sync_executor=self._executor
+ )
+ return [r or None for r in result_list]
+
+ def _invalidate_channel_stubs(self):
+ """Helper to reset the cached stubs. Needed when changing out the grpc channel"""
+ self.transport._stubs = {}
+ self.transport._prep_wrapped_messages(self.client_info)
+
+ @CrossSync.convert
+ async def _manage_channel(
+ self,
+ refresh_interval_min: float = 60 * 35,
+ refresh_interval_max: float = 60 * 45,
+ grace_period: float = 60 * 10,
+ ) -> None:
+ """
+ Background task that periodically refreshes and warms a grpc channel
+
+ The backend will automatically close channels after 60 minutes, so
+ `refresh_interval` + `grace_period` should be < 60 minutes
+
+ Runs continuously until the client is closed
+
+ Args:
+ refresh_interval_min: minimum interval before initiating refresh
+ process in seconds. Actual interval will be a random value
+ between `refresh_interval_min` and `refresh_interval_max`
+ refresh_interval_max: maximum interval before initiating refresh
+ process in seconds. Actual interval will be a random value
+ between `refresh_interval_min` and `refresh_interval_max`
+ grace_period: time to allow previous channel to serve existing
+ requests before closing, in seconds
+ """
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
+ warnings.warn("Channel does not support auto-refresh.")
+ return
+ super_channel: SwappableChannelType = self.transport.grpc_channel
+ first_refresh = self._channel_init_time + random.uniform(
+ refresh_interval_min, refresh_interval_max
+ )
+ next_sleep = max(first_refresh - time.monotonic(), 0)
+ if next_sleep > 0:
+ # warm the current channel immediately
+ await self._ping_and_warm_instances(channel=super_channel)
+ # continuously refresh the channel every `refresh_interval` seconds
+ while not self._is_closed.is_set():
+ await CrossSync.event_wait(
+ self._is_closed,
+ next_sleep,
+ async_break_early=False, # no need to interrupt sleep. Task will be cancelled on close
+ )
+ if self._is_closed.is_set():
+ # don't refresh if client is closed
+ break
+ start_timestamp = time.monotonic()
+ # prepare new channel for use
+ new_channel = super_channel.create_channel()
+ await self._ping_and_warm_instances(channel=new_channel)
+ # cycle channel out of use, with long grace window before closure
+ old_channel = super_channel.swap_channel(new_channel)
+ self._invalidate_channel_stubs()
+ # give old_channel a chance to complete existing rpcs
+ if CrossSync.is_async:
+ await old_channel.close(grace_period)
+ else:
+ if grace_period:
+ self._is_closed.wait(grace_period) # type: ignore
+ old_channel.close() # type: ignore
+ # subtract the time spent waiting for the channel to be replaced
+ next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
+ next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
+
+ @CrossSync.convert(
+ replace_symbols={
+ "TableAsync": "Table",
+ "ExecuteQueryIteratorAsync": "ExecuteQueryIterator",
+ "_DataApiTargetAsync": "_DataApiTarget",
+ }
+ )
+ async def _register_instance(
+ self,
+ instance_id: str,
+ app_profile_id: Optional[str],
+ owner_id: int,
+ ) -> None:
+ """
+ Registers an instance with the client, and warms the channel for the instance
+ The client will periodically refresh grpc channel used to make
+ requests, and new channels will be warmed for each registered instance
+ Channels will not be refreshed unless at least one instance is registered
+
+ Args:
+ instance_id: id of the instance to register.
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Owners will be tracked in
+ _instance_owners, and instances will only be unregistered when all
+ owners call _remove_instance_registration. Can be obtained by calling
+ `id` identity funcion, using `id(owner)`
+ """
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ self._instance_owners.setdefault(instance_key, set()).add(owner_id)
+ if instance_key not in self._active_instances:
+ self._active_instances.add(instance_key)
+ if self._channel_refresh_task:
+ # refresh tasks already running
+ # call ping and warm on all existing channels
+ await self._ping_and_warm_instances(instance_key)
+ else:
+ # refresh tasks aren't active. start them as background tasks
+ self._start_background_channel_refresh()
+
+ @CrossSync.convert(
+ replace_symbols={
+ "TableAsync": "Table",
+ "ExecuteQueryIteratorAsync": "ExecuteQueryIterator",
+ "_DataApiTargetAsync": "_DataApiTarget",
+ }
+ )
+ def _remove_instance_registration(
+ self,
+ instance_id: str,
+ app_profile_id: Optional[str],
+ owner_id: int,
+ ) -> bool:
+ """
+ Removes an instance from the client's registered instances, to prevent
+ warming new channels for the instance
+
+ If instance_id is not registered, or is still in use by other tables, returns False
+
+ Args:
+ instance_id: id of the instance to remove
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Can be
+ obtained by the `id` identity funcion, using `id(owner)`.
+ Returns:
+ bool: True if instance was removed, else False
+ """
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ owner_list = self._instance_owners.get(instance_key, set())
+ try:
+ owner_list.remove(owner_id)
+ if len(owner_list) == 0:
+ self._active_instances.remove(instance_key)
+ return True
+ except KeyError:
+ return False
+
+ @CrossSync.convert(
+ replace_symbols={"TableAsync": "Table"},
+ docstring_format_vars={
+ "LOOP_MESSAGE": (
+ "Must be created within an async context (running event loop)",
+ "",
+ ),
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if called outside of an async context (no running event loop)",
+ "None",
+ ),
+ },
+ )
+ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAsync:
+ """
+ Returns a table instance for making data API requests. All arguments are passed
+ directly to the TableAsync constructor.
+
+ {LOOP_MESSAGE}
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Returns:
+ TableAsync: a table instance for making data API requests
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ return TableAsync(self, instance_id, table_id, *args, **kwargs)
+
+ @CrossSync.convert(
+ replace_symbols={"AuthorizedViewAsync": "AuthorizedView"},
+ docstring_format_vars={
+ "LOOP_MESSAGE": (
+ "Must be created within an async context (running event loop)",
+ "",
+ ),
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if called outside of an async context (no running event loop)",
+ "None",
+ ),
+ },
+ )
+ def get_authorized_view(
+ self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs
+ ) -> AuthorizedViewAsync:
+ """
+ Returns an authorized view instance for making data API requests. All arguments are passed
+ directly to the AuthorizedViewAsync constructor.
+
+ {LOOP_MESSAGE}
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ authorized_view_id: The id for the authorized view to use for requests
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to Table's value
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults Table's value
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to Table's value
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults Table's value
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to Table's value
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to Table's value
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations. If not set,
+ defaults to Table's value
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations. If not set,
+ defaults to Table's value
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations. If not set, defaults to
+ Table's value
+ Returns:
+ AuthorizedViewAsync: a table instance for making data API requests
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ return CrossSync.AuthorizedView(
+ self,
+ instance_id,
+ table_id,
+ authorized_view_id,
+ *args,
+ **kwargs,
+ )
+
+ @CrossSync.convert(
+ replace_symbols={"ExecuteQueryIteratorAsync": "ExecuteQueryIterator"}
+ )
+ async def execute_query(
+ self,
+ query: str,
+ instance_id: str,
+ *,
+ parameters: dict[str, ExecuteQueryValueType] | None = None,
+ parameter_types: dict[str, SqlType.Type] | None = None,
+ app_profile_id: str | None = None,
+ operation_timeout: float = 600,
+ attempt_timeout: float | None = 20,
+ retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ Aborted,
+ ),
+ prepare_operation_timeout: float = 60,
+ prepare_attempt_timeout: float | None = 20,
+ prepare_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> "ExecuteQueryIteratorAsync":
+ """
+ Executes an SQL query on an instance.
+ Returns an iterator to asynchronously stream back columns from selected rows.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``.
+ These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the
+ work happens.
+
+ Args:
+ query: Query to be run on Bigtable instance. The query can use ``@param``
+ placeholders to use parameter interpolation on the server. Values for all
+ parameters should be provided in ``parameters``. Types of parameters are
+ inferred but should be provided in ``parameter_types`` if the inference is
+ not possible (i.e. when value can be None, an empty list or an empty dict).
+ instance_id: The Bigtable instance ID to perform the query on.
+ instance_id is combined with the client's project to fully
+ specify the instance.
+ parameters: Dictionary with values for all parameters used in the ``query``.
+ parameter_types: Dictionary with types of parameters used in the ``query``.
+ Required to contain entries only for parameters whose type cannot be
+ detected automatically (i.e. the value can be None, an empty list or
+ an empty dict).
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ operation_timeout: the time budget for the entire executeQuery operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to 600 seconds.
+ attempt_timeout: the time budget for an individual executeQuery network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the 20 seconds.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered during executeQuery.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to 60 seconds.
+ prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the 20 seconds.
+ If None, defaults to prepare_operation_timeout.
+ prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects.
+ This dictionary provides the necessary type information for deserializing PROTO and
+ ENUM column values from the query results. When an entry is provided
+ for a PROTO or ENUM column, the client library will attempt to deserialize the raw data.
+
+ - For PROTO columns: The value in the dictionary should be the
+ Protobuf Message class (e.g., ``my_pb2.MyMessage``).
+ - For ENUM columns: The value should be the Protobuf EnumTypeWrapper
+ object (e.g., ``my_pb2.MyEnum``).
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum
+ }
+
+ If ``column_info`` is not provided, or if a specific column name is not found
+ in the dictionary:
+
+ - PROTO columns will be returned as raw bytes.
+ - ENUM columns will be returned as integers.
+
+ Note for Nested PROTO or ENUM Fields:
+
+ To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated
+ path from the top-level column name.
+
+ - For STRUCTs: ``struct_column_name.field_name``
+ - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types
+ for the map keys or values, respectively.
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ # Top-level column
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum,
+
+ # Nested field in a STRUCT column named 'my_struct'
+ "my_struct.nested_proto_field": my_pb2.OtherMessage,
+ "my_struct.nested_enum_field": my_pb2.AnotherEnum,
+
+ # Nested field in a MAP column named 'my_map'
+ "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums
+ "my_map.value": my_pb2.MapValueMessage,
+
+ # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column
+ "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto,
+ "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum
+ }
+
+ Returns:
+ ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if
+ a parameter is passed without an explicit type, and the type cannot be infered
+ google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails.
+ """
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ converted_param_types = _to_param_types(parameters, parameter_types)
+ prepare_request = {
+ "instance_name": instance_name,
+ "query": query,
+ "app_profile_id": app_profile_id,
+ "param_types": converted_param_types,
+ "proto_format": {},
+ }
+ prepare_predicate = retries.if_exception_type(
+ *[_get_error_type(e) for e in prepare_retryable_errors]
+ )
+ prepare_operation_timeout, prepare_attempt_timeout = _align_timeouts(
+ prepare_operation_timeout, prepare_attempt_timeout
+ )
+ prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+
+ target = partial(
+ self._gapic_client.prepare_query,
+ request=prepare_request,
+ timeout=prepare_attempt_timeout,
+ retry=None,
+ )
+ prepare_result = await CrossSync.retry_target(
+ target,
+ prepare_predicate,
+ prepare_sleep_generator,
+ prepare_operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata)
+
+ retryable_excs = [_get_error_type(e) for e in retryable_errors]
+
+ pb_params = _format_execute_query_params(parameters, parameter_types)
+
+ request_body = {
+ "instance_name": instance_name,
+ "app_profile_id": app_profile_id,
+ "prepared_query": prepare_result.prepared_query,
+ "params": pb_params,
+ }
+ operation_timeout, attempt_timeout = _align_timeouts(
+ operation_timeout, attempt_timeout
+ )
+
+ return CrossSync.ExecuteQueryIterator(
+ self,
+ instance_id,
+ app_profile_id,
+ request_body,
+ prepare_metadata,
+ attempt_timeout,
+ operation_timeout,
+ retryable_excs=retryable_excs,
+ column_info=column_info,
+ )
+
+ @CrossSync.convert(sync_name="__enter__")
+ async def __aenter__(self):
+ self._start_background_channel_refresh()
+ return self
+
+ @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"})
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.close()
+ await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb)
+
+
+@CrossSync.convert_class(sync_name="_DataApiTarget")
+class _DataApiTargetAsync(abc.ABC):
+ """
+ Abstract class containing API surface for BigtableDataClient. Should not be created directly
+
+ Can be instantiated as a Table or an AuthorizedView
+ """
+
+ @CrossSync.convert(
+ replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"},
+ docstring_format_vars={
+ "LOOP_MESSAGE": (
+ "Must be created within an async context (running event loop)",
+ "",
+ ),
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if called outside of an async context (no running event loop)",
+ "None",
+ ),
+ },
+ )
+ def __init__(
+ self,
+ client: BigtableDataClientAsync,
+ instance_id: str,
+ table_id: str,
+ app_profile_id: str | None = None,
+ *,
+ default_read_rows_operation_timeout: float = 600,
+ default_read_rows_attempt_timeout: float | None = 20,
+ default_mutate_rows_operation_timeout: float = 600,
+ default_mutate_rows_attempt_timeout: float | None = 60,
+ default_operation_timeout: float = 60,
+ default_attempt_timeout: float | None = 20,
+ default_read_rows_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ Aborted,
+ ),
+ default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ default_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ ):
+ """
+ Initialize a Table instance
+
+ {LOOP_MESSAGE}
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ # NOTE: any changes to the signature of this method should also be reflected
+ # in client.get_table()
+ # validate timeouts
+ _validate_timeouts(
+ default_operation_timeout, default_attempt_timeout, allow_none=True
+ )
+ _validate_timeouts(
+ default_read_rows_operation_timeout,
+ default_read_rows_attempt_timeout,
+ allow_none=True,
+ )
+ _validate_timeouts(
+ default_mutate_rows_operation_timeout,
+ default_mutate_rows_attempt_timeout,
+ allow_none=True,
+ )
+
+ self.client = client
+ self.instance_id = instance_id
+ self.instance_name = self.client._gapic_client.instance_path(
+ self.client.project, instance_id
+ )
+ self.table_id = table_id
+ self.table_name = self.client._gapic_client.table_path(
+ self.client.project, instance_id, table_id
+ )
+ self.app_profile_id: str | None = app_profile_id
+
+ self.default_operation_timeout: float = default_operation_timeout
+ self.default_attempt_timeout: float | None = default_attempt_timeout
+ self.default_read_rows_operation_timeout: float = (
+ default_read_rows_operation_timeout
+ )
+ self.default_read_rows_attempt_timeout: float | None = (
+ default_read_rows_attempt_timeout
+ )
+ self.default_mutate_rows_operation_timeout: float = (
+ default_mutate_rows_operation_timeout
+ )
+ self.default_mutate_rows_attempt_timeout: float | None = (
+ default_mutate_rows_attempt_timeout
+ )
+
+ self.default_read_rows_retryable_errors: Sequence[type[Exception]] = (
+ default_read_rows_retryable_errors or ()
+ )
+ self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
+ default_mutate_rows_retryable_errors or ()
+ )
+ self.default_retryable_errors: Sequence[type[Exception]] = (
+ default_retryable_errors or ()
+ )
+
+ try:
+ self._register_instance_future = CrossSync.create_task(
+ self.client._register_instance,
+ self.instance_id,
+ self.app_profile_id,
+ id(self),
+ sync_executor=self.client._executor,
+ )
+ except RuntimeError as e:
+ raise RuntimeError(
+ f"{self.__class__.__name__} must be created within an async event loop context."
+ ) from e
+
+ @property
+ @abc.abstractmethod
+ def _request_path(self) -> dict[str, str]:
+ """
+ Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass
+
+ Unimplemented in base class
+ """
+ raise NotImplementedError
+
+ def __str__(self):
+ path_str = list(self._request_path.values())[0] if self._request_path else ""
+ return f"{self.__class__.__name__}<{path_str!r}>"
+
+ @CrossSync.convert(replace_symbols={"AsyncIterable": "Iterable"})
+ async def read_rows_stream(
+ self,
+ query: ReadRowsQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> AsyncIterable[Row]:
+ """
+ Read a set of rows from the table, based on the specified query.
+ Returns an iterator to asynchronously stream back row data.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors
+ Returns:
+ AsyncIterable[Row]: an asynchronous iterator that yields rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ operation_timeout, attempt_timeout = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+
+ row_merger = CrossSync._ReadRowsOperation(
+ query,
+ self,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_exceptions=retryable_excs,
+ )
+ return row_merger.start_operation()
+
+ @CrossSync.convert
+ async def read_rows(
+ self,
+ query: ReadRowsQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> list[Row]:
+ """
+ Read a set of rows from the table, based on the specified query.
+ Retruns results as a list of Row objects when the request is complete.
+ For streamed results, use read_rows_stream.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ If None, defaults to the Table's default_read_rows_attempt_timeout,
+ or the operation_timeout if that is also None.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ list[Row]: a list of Rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ row_generator = await self.read_rows_stream(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ return [row async for row in row_generator]
+
+ @CrossSync.convert
+ async def read_row(
+ self,
+ row_key: str | bytes,
+ *,
+ row_filter: RowFilter | None = None,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> Row | None:
+ """
+ Read a single row from the table, based on the specified key.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ Row | None: a Row object if the row exists, otherwise None
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ if row_key is None:
+ raise ValueError("row_key must be string or bytes")
+ query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1)
+ results = await self.read_rows(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ if len(results) == 0:
+ return None
+ return results[0]
+
+ @CrossSync.convert
+ async def read_rows_sharded(
+ self,
+ sharded_query: ShardedQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> list[Row]:
+ """
+ Runs a sharded query in parallel, then return the results in a single list.
+ Results will be returned in the order of the input queries.
+
+ This function is intended to be run on the results on a query.shard() call.
+ For example::
+
+ table_shard_keys = await table.sample_row_keys()
+ query = ReadRowsQuery(...)
+ shard_queries = query.shard(table_shard_keys)
+ results = await table.read_rows_sharded(shard_queries)
+
+ Args:
+ sharded_query: a sharded query to execute
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ list[Row]: a list of Rows returned by the query
+ Raises:
+ ShardedReadRowsExceptionGroup: if any of the queries failed
+ ValueError: if the query_list is empty
+ """
+ if not sharded_query:
+ raise ValueError("empty sharded_query")
+ operation_timeout, attempt_timeout = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ # make sure each rpc stays within overall operation timeout
+ rpc_timeout_generator = _attempt_timeout_generator(
+ operation_timeout, operation_timeout
+ )
+
+ # limit the number of concurrent requests using a semaphore
+ concurrency_sem = CrossSync.Semaphore(_CONCURRENCY_LIMIT)
+
+ @CrossSync.convert
+ async def read_rows_with_semaphore(query):
+ async with concurrency_sem:
+ # calculate new timeout based on time left in overall operation
+ shard_timeout = next(rpc_timeout_generator)
+ if shard_timeout <= 0:
+ raise DeadlineExceeded(
+ "Operation timeout exceeded before starting query"
+ )
+ return await self.read_rows(
+ query,
+ operation_timeout=shard_timeout,
+ attempt_timeout=min(attempt_timeout, shard_timeout),
+ retryable_errors=retryable_errors,
+ )
+
+ routine_list = [
+ partial(read_rows_with_semaphore, query) for query in sharded_query
+ ]
+ batch_result = await CrossSync.gather_partials(
+ routine_list,
+ return_exceptions=True,
+ sync_executor=self.client._executor,
+ )
+
+ # collect results and errors
+ error_dict = {}
+ shard_idx = 0
+ results_list = []
+ for result in batch_result:
+ if isinstance(result, Exception):
+ error_dict[shard_idx] = result
+ elif isinstance(result, BaseException):
+ # BaseException not expected; raise immediately
+ raise result
+ else:
+ results_list.extend(result)
+ shard_idx += 1
+ if error_dict:
+ # if any sub-request failed, raise an exception instead of returning results
+ raise ShardedReadRowsExceptionGroup(
+ [
+ FailedQueryShardError(idx, sharded_query[idx], e)
+ for idx, e in error_dict.items()
+ ],
+ results_list,
+ len(sharded_query),
+ )
+ return results_list
+
+ @CrossSync.convert
+ async def row_exists(
+ self,
+ row_key: str | bytes,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> bool:
+ """
+ Return a boolean indicating whether the specified row exists in the table.
+ uses the filters: chain(limit cells per row = 1, strip value)
+
+ Args:
+ row_key: the key of the row to check
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ bool: a bool indicating whether the row exists
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ if row_key is None:
+ raise ValueError("row_key must be string or bytes")
+
+ strip_filter = StripValueTransformerFilter(flag=True)
+ limit_filter = CellsRowLimitFilter(1)
+ chain_filter = RowFilterChain(filters=[limit_filter, strip_filter])
+ query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter)
+ results = await self.read_rows(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ return len(results) > 0
+
+ @CrossSync.convert
+ async def sample_row_keys(
+ self,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> RowKeySamples:
+ """
+ Return a set of RowKeySamples that delimit contiguous sections of the table of
+ approximately equal size
+
+ RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that
+ can be parallelized across multiple backend nodes read_rows and read_rows_stream
+ requests will call sample_row_keys internally for this purpose when sharding is enabled
+
+ RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of
+ row_keys, along with offset positions in the table
+
+ Args:
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.i
+ Defaults to the Table's default_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_retryable_errors.
+ Returns:
+ RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ # prepare timeouts
+ operation_timeout, attempt_timeout = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ # prepare retryable
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+ predicate = retries.if_exception_type(*retryable_excs)
+
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+
+ @CrossSync.convert
+ async def execute_rpc():
+ results = await self.client._gapic_client.sample_row_keys(
+ request=SampleRowKeysRequest(
+ app_profile_id=self.app_profile_id, **self._request_path
+ ),
+ timeout=next(attempt_timeout_gen),
+ retry=None,
+ )
+ return [(s.row_key, s.offset_bytes) async for s in results]
+
+ return await CrossSync.retry_target(
+ execute_rpc,
+ predicate,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ @CrossSync.convert(replace_symbols={"MutationsBatcherAsync": "MutationsBatcher"})
+ def mutations_batcher(
+ self,
+ *,
+ flush_interval: float | None = 5,
+ flush_limit_mutation_count: int | None = 1000,
+ flush_limit_bytes: int = 20 * _MB_SIZE,
+ flow_control_max_mutation_count: int = 100_000,
+ flow_control_max_bytes: int = 100 * _MB_SIZE,
+ batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ) -> "MutationsBatcherAsync":
+ """
+ Returns a new mutations batcher instance.
+
+ Can be used to iteratively add mutations that are flushed as a group,
+ to avoid excess network calls
+
+ Args:
+ flush_interval: Automatically flush every flush_interval seconds. If None,
+ a table default will be used
+ flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count
+ mutations are added across all entries. If None, this limit is ignored.
+ flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added.
+ flow_control_max_mutation_count: Maximum number of inflight mutations.
+ flow_control_max_bytes: Maximum number of inflight bytes.
+ batch_operation_timeout: timeout for each mutate_rows operation, in seconds.
+ Defaults to the Table's default_mutate_rows_operation_timeout
+ batch_attempt_timeout: timeout for each individual request, in seconds.
+ Defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to batch_operation_timeout.
+ batch_retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors.
+ Returns:
+ MutationsBatcherAsync: a MutationsBatcherAsync context manager that can batch requests
+ """
+ return CrossSync.MutationsBatcher(
+ self,
+ flush_interval=flush_interval,
+ flush_limit_mutation_count=flush_limit_mutation_count,
+ flush_limit_bytes=flush_limit_bytes,
+ flow_control_max_mutation_count=flow_control_max_mutation_count,
+ flow_control_max_bytes=flow_control_max_bytes,
+ batch_operation_timeout=batch_operation_timeout,
+ batch_attempt_timeout=batch_attempt_timeout,
+ batch_retryable_errors=batch_retryable_errors,
+ )
+
+ @CrossSync.convert
+ async def mutate_row(
+ self,
+ row_key: str | bytes,
+ mutations: list[Mutation] | Mutation,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ):
+ """
+ Mutates a row atomically.
+
+ Cells already present in the row are left unchanged unless explicitly changed
+ by ``mutation``.
+
+ Idempotent operations (i.e, all mutations have an explicit timestamp) will be
+ retried on server failure. Non-idempotent operations will not.
+
+ Args:
+ row_key: the row to apply mutations to
+ mutations: the set of mutations to apply to the row
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Only idempotent mutations will be retried. Defaults to the Table's
+ default_retryable_errors.
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing all
+ GoogleAPIError exceptions from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be
+ safely retried.
+ ValueError: if invalid arguments are provided
+ """
+ operation_timeout, attempt_timeout = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+
+ if not mutations:
+ raise ValueError("No mutations provided")
+ mutations_list = mutations if isinstance(mutations, list) else [mutations]
+
+ if all(mutation.is_idempotent() for mutation in mutations_list):
+ # mutations are all idempotent and safe to retry
+ predicate = retries.if_exception_type(
+ *_get_retryable_errors(retryable_errors, self)
+ )
+ else:
+ # mutations should not be retried
+ predicate = retries.if_exception_type()
+
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+
+ target = partial(
+ self.client._gapic_client.mutate_row,
+ request=MutateRowRequest(
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ mutations=[mutation._to_pb() for mutation in mutations_list],
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=attempt_timeout,
+ retry=None,
+ )
+ return await CrossSync.retry_target(
+ target,
+ predicate,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ @CrossSync.convert
+ async def bulk_mutate_rows(
+ self,
+ mutation_entries: list[RowMutationEntry],
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ):
+ """
+ Applies mutations for multiple rows in a single batched request.
+
+ Each individual RowMutationEntry is applied atomically, but separate entries
+ may be applied in arbitrary order (even for entries targetting the same row)
+ In total, the row_mutations can contain at most 100000 individual mutations
+ across all entries
+
+ Idempotent entries (i.e., entries with mutations with explicit timestamps)
+ will be retried on failure. Non-idempotent will not, and will reported in a
+ raised exception group
+
+ Args:
+ mutation_entries: the batches of mutations to apply
+ Each entry will be applied atomically, but entries will be applied
+ in arbitrary order
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_mutate_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors
+ Raises:
+ MutationsExceptionGroup: if one or more mutations fails
+ Contains details about any failed entries in .exceptions
+ ValueError: if invalid arguments are provided
+ """
+ operation_timeout, attempt_timeout = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+
+ operation = CrossSync._MutateRowsOperation(
+ self.client._gapic_client,
+ self,
+ mutation_entries,
+ operation_timeout,
+ attempt_timeout,
+ retryable_exceptions=retryable_excs,
+ )
+ await operation.start()
+
+ @CrossSync.convert
+ async def check_and_mutate_row(
+ self,
+ row_key: str | bytes,
+ predicate: RowFilter | None,
+ *,
+ true_case_mutations: Mutation | list[Mutation] | None = None,
+ false_case_mutations: Mutation | list[Mutation] | None = None,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> bool:
+ """
+ Mutates a row atomically based on the output of a predicate filter
+
+ Non-idempotent operation: will not be retried
+
+ Args:
+ row_key: the key of the row to mutate
+ predicate: the filter to be applied to the contents of the specified row.
+ Depending on whether or not any results are yielded,
+ either true_case_mutations or false_case_mutations will be executed.
+ If None, checks that the row contains any values at all.
+ true_case_mutations:
+ Changes to be atomically applied to the specified row if
+ predicate yields at least one cell when
+ applied to row_key. Entries are applied in order,
+ meaning that earlier mutations can be masked by later
+ ones. Must contain at least one entry if
+ false_case_mutations is empty, and at most 100000.
+ false_case_mutations:
+ Changes to be atomically applied to the specified row if
+ predicate_filter does not yield any cells when
+ applied to row_key. Entries are applied in order,
+ meaning that earlier mutations can be masked by later
+ ones. Must contain at least one entry if
+ `true_case_mutations` is empty, and at most 100000.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will not be retried. Defaults to the Table's default_operation_timeout
+ Returns:
+ bool indicating whether the predicate was true or false
+ Raises:
+ google.api_core.exceptions.GoogleAPIError: exceptions from grpc call
+ """
+ operation_timeout, _ = _get_timeouts(operation_timeout, None, self)
+ if true_case_mutations is not None and not isinstance(
+ true_case_mutations, list
+ ):
+ true_case_mutations = [true_case_mutations]
+ true_case_list = [m._to_pb() for m in true_case_mutations or []]
+ if false_case_mutations is not None and not isinstance(
+ false_case_mutations, list
+ ):
+ false_case_mutations = [false_case_mutations]
+ false_case_list = [m._to_pb() for m in false_case_mutations or []]
+ result = await self.client._gapic_client.check_and_mutate_row(
+ request=CheckAndMutateRowRequest(
+ true_mutations=true_case_list,
+ false_mutations=false_case_list,
+ predicate_filter=predicate._to_pb() if predicate is not None else None,
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=operation_timeout,
+ retry=None,
+ )
+ return result.predicate_matched
+
+ @CrossSync.convert
+ async def read_modify_write_row(
+ self,
+ row_key: str | bytes,
+ rules: ReadModifyWriteRule | list[ReadModifyWriteRule],
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> Row:
+ """
+ Reads and modifies a row atomically according to input ReadModifyWriteRules,
+ and returns the contents of all modified cells
+
+ The new value for the timestamp is the greater of the existing timestamp or
+ the current server time.
+
+ Non-idempotent operation: will not be retried
+
+ Args:
+ row_key: the key of the row to apply read/modify/write rules to
+ rules: A rule or set of rules to apply to the row.
+ Rules are applied in order, meaning that earlier rules will affect the
+ results of later ones.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will not be retried.
+ Defaults to the Table's default_operation_timeout.
+ Returns:
+ Row: a Row containing cell data that was modified as part of the operation
+ Raises:
+ google.api_core.exceptions.GoogleAPIError: exceptions from grpc call
+ ValueError: if invalid arguments are provided
+ """
+ operation_timeout, _ = _get_timeouts(operation_timeout, None, self)
+ if operation_timeout <= 0:
+ raise ValueError("operation_timeout must be greater than 0")
+ if rules is not None and not isinstance(rules, list):
+ rules = [rules]
+ if not rules:
+ raise ValueError("rules must contain at least one item")
+ result = await self.client._gapic_client.read_modify_write_row(
+ request=ReadModifyWriteRowRequest(
+ rules=[rule._to_pb() for rule in rules],
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=operation_timeout,
+ retry=None,
+ )
+ # construct Row from result
+ return Row._from_pb(result.row)
+
+ @CrossSync.convert
+ async def close(self):
+ """
+ Called to close the Table instance and release any resources held by it.
+ """
+ if self._register_instance_future:
+ self._register_instance_future.cancel()
+ self.client._remove_instance_registration(
+ self.instance_id, self.app_profile_id, id(self)
+ )
+
+ @CrossSync.convert(sync_name="__enter__")
+ async def __aenter__(self):
+ """
+ Implement async context manager protocol
+
+ Ensure registration task has time to run, so that
+ grpc channels will be warmed for the specified instance
+ """
+ if self._register_instance_future:
+ await self._register_instance_future
+ return self
+
+ @CrossSync.convert(sync_name="__exit__")
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ """
+ Implement async context manager protocol
+
+ Unregister this instance with the client, so that
+ grpc channels will no longer be warmed
+ """
+ await self.close()
+
+
+@CrossSync.convert_class(
+ sync_name="Table",
+ add_mapping_for_name="Table",
+ replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"},
+)
+class TableAsync(_DataApiTargetAsync):
+ """
+ Main Data API surface for interacting with a Bigtable table.
+
+ Table object maintains table_id, and app_profile_id context, and passes them with
+ each call
+ """
+
+ @property
+ def _request_path(self) -> dict[str, str]:
+ return {"table_name": self.table_name}
+
+
+@CrossSync.convert_class(
+ sync_name="AuthorizedView",
+ add_mapping_for_name="AuthorizedView",
+ replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"},
+)
+class AuthorizedViewAsync(_DataApiTargetAsync):
+ """
+ Provides access to an authorized view of a table.
+
+ An authorized view is a subset of a table that you configure to include specific table data.
+ Then you grant access to the authorized view separately from access to the table.
+
+ AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context,
+ and passed them with each call
+ """
+
+ @CrossSync.convert(
+ docstring_format_vars={
+ "LOOP_MESSAGE": (
+ "Must be created within an async context (running event loop)",
+ "",
+ ),
+ "RAISE_NO_LOOP": (
+ "RuntimeError: if called outside of an async context (no running event loop)",
+ "None",
+ ),
+ }
+ )
+ def __init__(
+ self,
+ client,
+ instance_id,
+ table_id,
+ authorized_view_id,
+ app_profile_id: str | None = None,
+ **kwargs,
+ ):
+ """
+ Initialize an AuthorizedView instance
+
+ {LOOP_MESSAGE}
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ authorized_view_id: The id for the authorized view to use for requests
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Raises:
+ {RAISE_NO_LOOP}
+ """
+ super().__init__(client, instance_id, table_id, app_profile_id, **kwargs)
+ self.authorized_view_id = authorized_view_id
+ self.authorized_view_name: str = self.client._gapic_client.authorized_view_path(
+ self.client.project, instance_id, table_id, authorized_view_id
+ )
+
+ @property
+ def _request_path(self) -> dict[str, str]:
+ return {"authorized_view_name": self.authorized_view_name}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py
new file mode 100644
index 000000000000..a154c0083dd6
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py
@@ -0,0 +1,78 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if CrossSync.is_async:
+ from grpc.aio import UnaryUnaryClientInterceptor
+ from grpc.aio import UnaryStreamClientInterceptor
+else:
+ from grpc import UnaryUnaryClientInterceptor
+ from grpc import UnaryStreamClientInterceptor
+
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.metrics_interceptor"
+
+
+@CrossSync.convert_class(sync_name="BigtableMetricsInterceptor")
+class AsyncBigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ @CrossSync.convert
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
+ """
+ Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow
+ """
+ try:
+ call = await continuation(client_call_details, request)
+ return call
+ except Exception as rpc_error:
+ raise rpc_error
+
+ @CrossSync.convert
+ async def intercept_unary_stream(self, continuation, client_call_details, request):
+ """
+ Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys
+ """
+ try:
+ return self._streaming_generator_wrapper(
+ await continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ # handle errors while intializing stream
+ raise rpc_error
+
+ @staticmethod
+ @CrossSync.convert
+ async def _streaming_generator_wrapper(call):
+ """
+ Wrapped generator to be returned by intercept_unary_stream.
+ """
+ try:
+ async for response in call:
+ yield response
+ except Exception as e:
+ # handle errors while processing stream
+ raise e
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py
new file mode 100644
index 000000000000..a8e99ea9e91b
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py
@@ -0,0 +1,536 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import Sequence, TYPE_CHECKING, cast
+import atexit
+import warnings
+from collections import deque
+import concurrent.futures
+
+from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup
+from google.cloud.bigtable.data.exceptions import FailedMutationEntryError
+from google.cloud.bigtable.data._helpers import _get_retryable_errors
+from google.cloud.bigtable.data._helpers import _get_timeouts
+from google.cloud.bigtable.data._helpers import TABLE_DEFAULT
+
+from google.cloud.bigtable.data.mutations import (
+ _MUTATE_ROWS_REQUEST_MUTATION_LIMIT,
+)
+from google.cloud.bigtable.data.mutations import Mutation
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data.mutations import RowMutationEntry
+
+ if CrossSync.is_async:
+ from google.cloud.bigtable.data._async.client import (
+ _DataApiTargetAsync as TargetType,
+ )
+ else:
+ from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.mutations_batcher"
+
+# used to make more readable default values
+_MB_SIZE = 1024 * 1024
+
+
+@CrossSync.convert_class(sync_name="_FlowControl", add_mapping_for_name="_FlowControl")
+class _FlowControlAsync:
+ """
+ Manages flow control for batched mutations. Mutations are registered against
+ the FlowControl object before being sent, which will block if size or count
+ limits have reached capacity. As mutations completed, they are removed from
+ the FlowControl object, which will notify any blocked requests that there
+ is additional capacity.
+
+ Flow limits are not hard limits. If a single mutation exceeds the configured
+ limits, it will be allowed as a single batch when the capacity is available.
+
+ Args:
+ max_mutation_count: maximum number of mutations to send in a single rpc.
+ This corresponds to individual mutations in a single RowMutationEntry.
+ max_mutation_bytes: maximum number of bytes to send in a single rpc.
+ Raises:
+ ValueError: if max_mutation_count or max_mutation_bytes is less than 0
+ """
+
+ def __init__(
+ self,
+ max_mutation_count: int,
+ max_mutation_bytes: int,
+ ):
+ self._max_mutation_count = max_mutation_count
+ self._max_mutation_bytes = max_mutation_bytes
+ if self._max_mutation_count < 1:
+ raise ValueError("max_mutation_count must be greater than 0")
+ if self._max_mutation_bytes < 1:
+ raise ValueError("max_mutation_bytes must be greater than 0")
+ self._capacity_condition = CrossSync.Condition()
+ self._in_flight_mutation_count = 0
+ self._in_flight_mutation_bytes = 0
+
+ def _has_capacity(self, additional_count: int, additional_size: int) -> bool:
+ """
+ Checks if there is capacity to send a new entry with the given size and count
+
+ FlowControl limits are not hard limits. If a single mutation exceeds
+ the configured flow limits, it will be sent in a single batch when
+ previous batches have completed.
+
+ Args:
+ additional_count: number of mutations in the pending entry
+ additional_size: size of the pending entry
+ Returns:
+ bool: True if there is capacity to send the pending entry, False otherwise
+ """
+ # adjust limits to allow overly large mutations
+ acceptable_size = max(self._max_mutation_bytes, additional_size)
+ acceptable_count = max(self._max_mutation_count, additional_count)
+ # check if we have capacity for new mutation
+ new_size = self._in_flight_mutation_bytes + additional_size
+ new_count = self._in_flight_mutation_count + additional_count
+ return new_size <= acceptable_size and new_count <= acceptable_count
+
+ @CrossSync.convert
+ async def remove_from_flow(
+ self, mutations: RowMutationEntry | list[RowMutationEntry]
+ ) -> None:
+ """
+ Removes mutations from flow control. This method should be called once
+ for each mutation that was sent to add_to_flow, after the corresponding
+ operation is complete.
+
+ Args:
+ mutations: mutation or list of mutations to remove from flow control
+ """
+ if not isinstance(mutations, list):
+ mutations = [mutations]
+ total_count = sum(len(entry.mutations) for entry in mutations)
+ total_size = sum(entry.size() for entry in mutations)
+ self._in_flight_mutation_count -= total_count
+ self._in_flight_mutation_bytes -= total_size
+ # notify any blocked requests that there is additional capacity
+ async with self._capacity_condition:
+ self._capacity_condition.notify_all()
+
+ @CrossSync.convert
+ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]):
+ """
+ Generator function that registers mutations with flow control. As mutations
+ are accepted into the flow control, they are yielded back to the caller,
+ to be sent in a batch. If the flow control is at capacity, the generator
+ will block until there is capacity available.
+
+ Args:
+ mutations: list mutations to break up into batches
+ Yields:
+ list[RowMutationEntry]:
+ list of mutations that have reserved space in the flow control.
+ Each batch contains at least one mutation.
+ """
+ if not isinstance(mutations, list):
+ mutations = [mutations]
+ start_idx = 0
+ end_idx = 0
+ while end_idx < len(mutations):
+ start_idx = end_idx
+ batch_mutation_count = 0
+ # fill up batch until we hit capacity
+ async with self._capacity_condition:
+ while end_idx < len(mutations):
+ next_entry = mutations[end_idx]
+ next_size = next_entry.size()
+ next_count = len(next_entry.mutations)
+ if (
+ self._has_capacity(next_count, next_size)
+ # make sure not to exceed per-request mutation count limits
+ and (batch_mutation_count + next_count)
+ <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT
+ ):
+ # room for new mutation; add to batch
+ end_idx += 1
+ batch_mutation_count += next_count
+ self._in_flight_mutation_bytes += next_size
+ self._in_flight_mutation_count += next_count
+ elif start_idx != end_idx:
+ # we have at least one mutation in the batch, so send it
+ break
+ else:
+ # batch is empty. Block until we have capacity
+ await self._capacity_condition.wait_for(
+ lambda: self._has_capacity(next_count, next_size)
+ )
+ yield mutations[start_idx:end_idx]
+
+
+@CrossSync.convert_class(sync_name="MutationsBatcher")
+class MutationsBatcherAsync:
+ """
+ Allows users to send batches using context manager API.
+
+ Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining
+ to use as few network requests as required
+
+ Will automatically flush the batcher:
+ - every flush_interval seconds
+ - after queue size reaches flush_limit_mutation_count
+ - after queue reaches flush_limit_bytes
+ - when batcher is closed or destroyed
+
+ Args:
+ table: table or autrhorized_view used to preform rpc calls
+ flush_interval: Automatically flush every flush_interval seconds.
+ If None, no time-based flushing is performed.
+ flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count
+ mutations are added across all entries. If None, this limit is ignored.
+ flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added.
+ flow_control_max_mutation_count: Maximum number of inflight mutations.
+ flow_control_max_bytes: Maximum number of inflight bytes.
+ batch_operation_timeout: timeout for each mutate_rows operation, in seconds.
+ If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout.
+ batch_attempt_timeout: timeout for each individual request, in seconds.
+ If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to batch_operation_timeout.
+ batch_retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors.
+ """
+
+ def __init__(
+ self,
+ table: TargetType,
+ *,
+ flush_interval: float | None = 5,
+ flush_limit_mutation_count: int | None = 1000,
+ flush_limit_bytes: int = 20 * _MB_SIZE,
+ flow_control_max_mutation_count: int = 100_000,
+ flow_control_max_bytes: int = 100 * _MB_SIZE,
+ batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ):
+ self._operation_timeout, self._attempt_timeout = _get_timeouts(
+ batch_operation_timeout, batch_attempt_timeout, table
+ )
+ self._retryable_errors: list[type[Exception]] = _get_retryable_errors(
+ batch_retryable_errors, table
+ )
+
+ self._closed = CrossSync.Event()
+ self._target = table
+ self._staged_entries: list[RowMutationEntry] = []
+ self._staged_count, self._staged_bytes = 0, 0
+ self._flow_control = CrossSync._FlowControl(
+ flow_control_max_mutation_count, flow_control_max_bytes
+ )
+ self._flush_limit_bytes = flush_limit_bytes
+ self._flush_limit_count = (
+ flush_limit_mutation_count
+ if flush_limit_mutation_count is not None
+ else float("inf")
+ )
+ # used by sync class to run mutate_rows operations
+ self._sync_rpc_executor = (
+ concurrent.futures.ThreadPoolExecutor(max_workers=8)
+ if not CrossSync.is_async
+ else None
+ )
+ # used by sync class to manage flush_internal tasks
+ self._sync_flush_executor = (
+ concurrent.futures.ThreadPoolExecutor(max_workers=4)
+ if not CrossSync.is_async
+ else None
+ )
+ self._flush_timer = CrossSync.create_task(
+ self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor
+ )
+ self._flush_jobs: set[CrossSync.Future[None]] = set()
+ # MutationExceptionGroup reports number of successful entries along with failures
+ self._entries_processed_since_last_raise: int = 0
+ self._exceptions_since_last_raise: int = 0
+ # keep track of the first and last _exception_list_limit exceptions
+ self._exception_list_limit: int = 10
+ self._oldest_exceptions: list[Exception] = []
+ self._newest_exceptions: deque[Exception] = deque(
+ maxlen=self._exception_list_limit
+ )
+ # clean up on program exit
+ atexit.register(self._on_exit)
+
+ @CrossSync.convert
+ async def _timer_routine(self, interval: float | None) -> None:
+ """
+ Set up a background task to flush the batcher every interval seconds
+
+ If interval is None, an empty future is returned
+
+ Args:
+ flush_interval: Automatically flush every flush_interval seconds.
+ If None, no time-based flushing is performed.
+ """
+ if not interval or interval <= 0:
+ return None
+ while not self._closed.is_set():
+ # wait until interval has passed, or until closed
+ await CrossSync.event_wait(
+ self._closed, timeout=interval, async_break_early=False
+ )
+ if not self._closed.is_set() and self._staged_entries:
+ self._schedule_flush()
+
+ @CrossSync.convert
+ async def append(self, mutation_entry: RowMutationEntry):
+ """
+ Add a new set of mutations to the internal queue
+
+ Args:
+ mutation_entry: new entry to add to flush queue
+ Raises:
+ RuntimeError: if batcher is closed
+ ValueError: if an invalid mutation type is added
+ """
+ # TODO: return a future to track completion of this entry
+ if self._closed.is_set():
+ raise RuntimeError("Cannot append to closed MutationsBatcher")
+ if isinstance(cast(Mutation, mutation_entry), Mutation):
+ raise ValueError(
+ f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher"
+ )
+ self._staged_entries.append(mutation_entry)
+ # start a new flush task if limits exceeded
+ self._staged_count += len(mutation_entry.mutations)
+ self._staged_bytes += mutation_entry.size()
+ if (
+ self._staged_count >= self._flush_limit_count
+ or self._staged_bytes >= self._flush_limit_bytes
+ ):
+ self._schedule_flush()
+ # yield to the event loop to allow flush to run
+ await CrossSync.yield_to_event_loop()
+
+ def _schedule_flush(self) -> CrossSync.Future[None] | None:
+ """
+ Update the flush task to include the latest staged entries
+
+ Returns:
+ Future[None] | None:
+ future representing the background task, if started
+ """
+ if self._staged_entries:
+ entries, self._staged_entries = self._staged_entries, []
+ self._staged_count, self._staged_bytes = 0, 0
+ new_task = CrossSync.create_task(
+ self._flush_internal, entries, sync_executor=self._sync_flush_executor
+ )
+ if not new_task.done():
+ self._flush_jobs.add(new_task)
+ new_task.add_done_callback(self._flush_jobs.remove)
+ return new_task
+ return None
+
+ @CrossSync.convert
+ async def _flush_internal(self, new_entries: list[RowMutationEntry]):
+ """
+ Flushes a set of mutations to the server, and updates internal state
+
+ Args:
+ new_entries list of RowMutationEntry objects to flush
+ """
+ # flush new entries
+ in_process_requests: list[CrossSync.Future[list[FailedMutationEntryError]]] = []
+ async for batch in self._flow_control.add_to_flow(new_entries):
+ batch_task = CrossSync.create_task(
+ self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor
+ )
+ in_process_requests.append(batch_task)
+ # wait for all inflight requests to complete
+ found_exceptions = await self._wait_for_batch_results(*in_process_requests)
+ # update exception data to reflect any new errors
+ self._entries_processed_since_last_raise += len(new_entries)
+ self._add_exceptions(found_exceptions)
+
+ @CrossSync.convert
+ async def _execute_mutate_rows(
+ self, batch: list[RowMutationEntry]
+ ) -> list[FailedMutationEntryError]:
+ """
+ Helper to execute mutation operation on a batch
+
+ Args:
+ batch: list of RowMutationEntry objects to send to server
+ timeout: timeout in seconds. Used as operation_timeout and attempt_timeout.
+ If not given, will use table defaults
+ Returns:
+ list[FailedMutationEntryError]:
+ list of FailedMutationEntryError objects for mutations that failed.
+ FailedMutationEntryError objects will not contain index information
+ """
+ try:
+ operation = CrossSync._MutateRowsOperation(
+ self._target.client._gapic_client,
+ self._target,
+ batch,
+ operation_timeout=self._operation_timeout,
+ attempt_timeout=self._attempt_timeout,
+ retryable_exceptions=self._retryable_errors,
+ )
+ await operation.start()
+ except MutationsExceptionGroup as e:
+ # strip index information from exceptions, since it is not useful in a batch context
+ for subexc in e.exceptions:
+ subexc.index = None
+ return list(e.exceptions)
+ finally:
+ # mark batch as complete in flow control
+ await self._flow_control.remove_from_flow(batch)
+ return []
+
+ def _add_exceptions(self, excs: list[Exception]):
+ """
+ Add new list of exceptions to internal store. To avoid unbounded memory,
+ the batcher will store the first and last _exception_list_limit exceptions,
+ and discard any in between.
+
+ Args:
+ excs: list of exceptions to add to the internal store
+ """
+ self._exceptions_since_last_raise += len(excs)
+ if excs and len(self._oldest_exceptions) < self._exception_list_limit:
+ # populate oldest_exceptions with found_exceptions
+ addition_count = self._exception_list_limit - len(self._oldest_exceptions)
+ self._oldest_exceptions.extend(excs[:addition_count])
+ excs = excs[addition_count:]
+ if excs:
+ # populate newest_exceptions with remaining found_exceptions
+ self._newest_exceptions.extend(excs[-self._exception_list_limit :])
+
+ def _raise_exceptions(self):
+ """
+ Raise any unreported exceptions from background flush operations
+
+ Raises:
+ MutationsExceptionGroup: exception group with all unreported exceptions
+ """
+ if self._oldest_exceptions or self._newest_exceptions:
+ oldest, self._oldest_exceptions = self._oldest_exceptions, []
+ newest = list(self._newest_exceptions)
+ self._newest_exceptions.clear()
+ entry_count, self._entries_processed_since_last_raise = (
+ self._entries_processed_since_last_raise,
+ 0,
+ )
+ exc_count, self._exceptions_since_last_raise = (
+ self._exceptions_since_last_raise,
+ 0,
+ )
+ raise MutationsExceptionGroup.from_truncated_lists(
+ first_list=oldest,
+ last_list=newest,
+ total_excs=exc_count,
+ entry_count=entry_count,
+ )
+
+ @CrossSync.convert(sync_name="__enter__")
+ async def __aenter__(self):
+ """Allow use of context manager API"""
+ return self
+
+ @CrossSync.convert(sync_name="__exit__")
+ async def __aexit__(self, exc_type, exc, tb):
+ """
+ Allow use of context manager API.
+
+ Flushes the batcher and cleans up resources.
+ """
+ await self.close()
+
+ @property
+ def closed(self) -> bool:
+ """
+ Returns:
+ - True if the batcher is closed, False otherwise
+ """
+ return self._closed.is_set()
+
+ @CrossSync.convert
+ async def close(self):
+ """
+ Flush queue and clean up resources
+ """
+ self._closed.set()
+ self._flush_timer.cancel()
+ self._schedule_flush()
+ # shut down executors
+ if self._sync_flush_executor:
+ with self._sync_flush_executor:
+ self._sync_flush_executor.shutdown(wait=True)
+ if self._sync_rpc_executor:
+ with self._sync_rpc_executor:
+ self._sync_rpc_executor.shutdown(wait=True)
+ await CrossSync.wait([*self._flush_jobs, self._flush_timer])
+ atexit.unregister(self._on_exit)
+ # raise unreported exceptions
+ self._raise_exceptions()
+
+ def _on_exit(self):
+ """
+ Called when program is exited. Raises warning if unflushed mutations remain
+ """
+ if not self._closed.is_set() and self._staged_entries:
+ warnings.warn(
+ f"MutationsBatcher for target {self._target!r} was not closed. "
+ f"{len(self._staged_entries)} Unflushed mutations will not be sent to the server."
+ )
+
+ @staticmethod
+ @CrossSync.convert
+ async def _wait_for_batch_results(
+ *tasks: CrossSync.Future[list[FailedMutationEntryError]]
+ | CrossSync.Future[None],
+ ) -> list[Exception]:
+ """
+ Takes in a list of futures representing _execute_mutate_rows tasks,
+ waits for them to complete, and returns a list of errors encountered.
+
+ Args:
+ *tasks: futures representing _execute_mutate_rows or _flush_internal tasks
+ Returns:
+ list[Exception]:
+ list of Exceptions encountered by any of the tasks. Errors are expected
+ to be FailedMutationEntryError, representing a failed mutation operation.
+ If a task fails with a different exception, it will be included in the
+ output list. Successful tasks will not be represented in the output list.
+ """
+ if not tasks:
+ return []
+ exceptions: list[Exception] = []
+ for task in tasks:
+ if CrossSync.is_async:
+ # futures don't need to be awaited in sync mode
+ await task
+ try:
+ exc_list = task.result()
+ if exc_list:
+ # expect a list of FailedMutationEntryError objects
+ for exc in exc_list:
+ # strip index information
+ exc.index = None
+ exceptions.extend(exc_list)
+ except Exception as e:
+ exceptions.append(e)
+ return exceptions
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py
new file mode 100644
index 000000000000..77a9ddae9d38
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .cross_sync import CrossSync
+
+
+__all__ = [
+ "CrossSync",
+]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py
new file mode 100644
index 000000000000..a0dd140dd01d
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py
@@ -0,0 +1,448 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Contains a set of AstDecorator classes, which define the behavior of CrossSync decorators.
+Each AstDecorator class is used through @CrossSync.
+"""
+from __future__ import annotations
+from typing import TYPE_CHECKING, Iterable
+
+if TYPE_CHECKING:
+ import ast
+ from typing import Callable, Any
+
+
+class AstDecorator:
+ """
+ Helper class for CrossSync decorators used for guiding ast transformations.
+
+ AstDecorators are accessed in two ways:
+ 1. The decorations are used directly as method decorations in the async client,
+ wrapping existing classes and methods
+ 2. The decorations are read back when processing the AST transformations when
+ generating sync code.
+
+ This class allows the same decorator to be used in both contexts.
+
+ Typically, AstDecorators act as a no-op in async code, and the arguments simply
+ provide configuration guidance for the sync code generation.
+ """
+
+ @classmethod
+ def decorator(cls, *args, **kwargs) -> Callable[..., Any]:
+ """
+ Provides a callable that can be used as a decorator function in async code
+
+ AstDecorator.decorate is called by CrossSync when attaching decorators to
+ the CrossSync class.
+
+ This method creates a new instance of the class, using the arguments provided
+ to the decorator, and defers to the async_decorator method of the instance
+ to build the wrapper function.
+
+ Arguments:
+ *args: arguments to the decorator
+ **kwargs: keyword arguments to the decorator
+ """
+ # decorators with no arguments will provide the function to be wrapped
+ # as the first argument. Pull it out if it exists
+ func = None
+ if len(args) == 1 and callable(args[0]):
+ func = args[0]
+ args = args[1:]
+ # create new AstDecorator instance from given decorator arguments
+ new_instance = cls(*args, **kwargs)
+ # build wrapper
+ wrapper = new_instance.async_decorator()
+ if wrapper is None:
+ # if no wrapper, return no-op decorator
+ return func or (lambda f: f)
+ elif func:
+ # if we can, return single wrapped function
+ return wrapper(func)
+ else:
+ # otherwise, return decorator function
+ return wrapper
+
+ def async_decorator(self) -> Callable[..., Any] | None:
+ """
+ Decorator to apply the async_impl decorator to the wrapped function
+
+ Default implementation is a no-op
+ """
+ return None
+
+ def sync_ast_transform(
+ self, wrapped_node: ast.AST, transformers_globals: dict[str, Any]
+ ) -> ast.AST | None:
+ """
+ When this decorator is encountered in the ast during sync generation, this method is called
+ to transform the wrapped node.
+
+ If None is returned, the node will be dropped from the output file.
+
+ Args:
+ wrapped_node: ast node representing the wrapped function or class that is being wrapped
+ transformers_globals: the set of globals() from the transformers module. This is used to access
+ ast transformer classes that live outside the main codebase
+ Returns:
+ transformed ast node, or None if the node should be dropped
+ """
+ return wrapped_node
+
+ @classmethod
+ def get_for_node(cls, node: ast.Call | ast.Attribute | ast.Name) -> "AstDecorator":
+ """
+ Build an AstDecorator instance from an ast decorator node
+
+ The right subclass is found by comparing the string representation of the
+ decorator name to the class name. (Both names are converted to lowercase and
+ underscores are removed for comparison). If a matching subclass is found,
+ a new instance is created with the provided arguments.
+
+ Args:
+ node: ast.Call node representing the decorator
+ Returns:
+ AstDecorator instance corresponding to the decorator
+ Raises:
+ ValueError: if the decorator cannot be parsed
+ """
+ import ast
+
+ # expect decorators in format @CrossSync.
+ # (i.e. should be an ast.Call or an ast.Attribute)
+ root_attr = node.func if isinstance(node, ast.Call) else node
+ if not isinstance(root_attr, ast.Attribute):
+ raise ValueError("Unexpected decorator format")
+ # extract the module and decorator names
+ if "CrossSync" in ast.dump(root_attr):
+ decorator_name = root_attr.attr
+ got_kwargs: dict[str, Any] = (
+ {str(kw.arg): cls._convert_ast_to_py(kw.value) for kw in node.keywords}
+ if hasattr(node, "keywords")
+ else {}
+ )
+ got_args = (
+ [cls._convert_ast_to_py(arg) for arg in node.args]
+ if hasattr(node, "args")
+ else []
+ )
+ # convert to standardized representation
+ formatted_name = decorator_name.replace("_", "").lower()
+ for subclass in cls.get_subclasses():
+ if subclass.__name__.lower() == formatted_name:
+ return subclass(*got_args, **got_kwargs)
+ raise ValueError(f"Unknown decorator encountered: {decorator_name}")
+ else:
+ raise ValueError("Not a CrossSync decorator")
+
+ @classmethod
+ def get_subclasses(cls) -> Iterable[type["AstDecorator"]]:
+ """
+ Get all subclasses of AstDecorator
+
+ Returns:
+ list of all subclasses of AstDecorator
+ """
+ for subclass in cls.__subclasses__():
+ yield from subclass.get_subclasses()
+ yield subclass
+
+ @classmethod
+ def _convert_ast_to_py(cls, ast_node: ast.expr | None) -> Any:
+ """
+ Helper to convert ast primitives to python primitives. Used when unwrapping arguments
+ """
+ import ast
+
+ if ast_node is None:
+ return None
+ if isinstance(ast_node, ast.Constant):
+ return ast_node.value
+ if isinstance(ast_node, ast.List):
+ return [cls._convert_ast_to_py(node) for node in ast_node.elts]
+ if isinstance(ast_node, ast.Tuple):
+ return tuple(cls._convert_ast_to_py(node) for node in ast_node.elts)
+ if isinstance(ast_node, ast.Dict):
+ return {
+ cls._convert_ast_to_py(k): cls._convert_ast_to_py(v)
+ for k, v in zip(ast_node.keys, ast_node.values)
+ }
+ # unsupported node type
+ return ast_node
+
+
+class ConvertClass(AstDecorator):
+ """
+ Class decorator for guiding generation of sync classes
+
+ Args:
+ sync_name: use a new name for the sync class
+ replace_symbols: a dict of symbols and replacements to use when generating sync class
+ docstring_format_vars: a dict of variables to replace in the docstring
+ rm_aio: if True, automatically strip all asyncio keywords from method. If false,
+ only keywords wrapped in CrossSync.rm_aio() calls to be removed.
+ add_mapping_for_name: when given, will add a new attribute to CrossSync,
+ so the original class and its sync version can be accessed from CrossSync.
+ """
+
+ def __init__(
+ self,
+ sync_name: str | None = None,
+ *,
+ replace_symbols: dict[str, str] | None = None,
+ docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None,
+ rm_aio: bool = False,
+ add_mapping_for_name: str | None = None,
+ ):
+ self.sync_name = sync_name
+ self.replace_symbols = replace_symbols
+ docstring_format_vars = docstring_format_vars or {}
+ self.async_docstring_format_vars = {
+ k: v[0] or "" for k, v in docstring_format_vars.items()
+ }
+ self.sync_docstring_format_vars = {
+ k: v[1] or "" for k, v in docstring_format_vars.items()
+ }
+ self.rm_aio = rm_aio
+ self.add_mapping_for_name = add_mapping_for_name
+
+ def async_decorator(self):
+ """
+ Use async decorator as a hook to update CrossSync mappings
+ """
+ from .cross_sync import CrossSync
+
+ if not self.add_mapping_for_name and not self.async_docstring_format_vars:
+ # return None if no changes needed
+ return None
+
+ new_mapping = self.add_mapping_for_name
+
+ def decorator(cls):
+ if new_mapping:
+ CrossSync.add_mapping(new_mapping, cls)
+ if self.async_docstring_format_vars:
+ cls.__doc__ = cls.__doc__.format(**self.async_docstring_format_vars)
+ return cls
+
+ return decorator
+
+ def sync_ast_transform(self, wrapped_node, transformers_globals):
+ """
+ Transform async class into sync copy
+ """
+ import ast
+ import copy
+
+ # copy wrapped node
+ wrapped_node = copy.deepcopy(wrapped_node)
+ # update name
+ if self.sync_name:
+ wrapped_node.name = self.sync_name
+ # strip CrossSync decorators
+ if hasattr(wrapped_node, "decorator_list"):
+ wrapped_node.decorator_list = [
+ d for d in wrapped_node.decorator_list if "CrossSync" not in ast.dump(d)
+ ]
+ else:
+ wrapped_node.decorator_list = []
+ # strip async keywords if specified
+ if self.rm_aio:
+ wrapped_node = transformers_globals["AsyncToSync"]().visit(wrapped_node)
+ # add mapping decorator if needed
+ if self.add_mapping_for_name:
+ wrapped_node.decorator_list.append(
+ ast.Call(
+ func=ast.Attribute(
+ value=ast.Name(id="CrossSync", ctx=ast.Load()),
+ attr="add_mapping_decorator",
+ ctx=ast.Load(),
+ ),
+ args=[
+ ast.Constant(value=self.add_mapping_for_name),
+ ],
+ keywords=[],
+ )
+ )
+ # replace symbols if specified
+ if self.replace_symbols:
+ wrapped_node = transformers_globals["SymbolReplacer"](
+ self.replace_symbols
+ ).visit(wrapped_node)
+ # update docstring if specified
+ if self.sync_docstring_format_vars:
+ docstring = ast.get_docstring(wrapped_node)
+ if docstring:
+ wrapped_node.body[0].value = ast.Constant(
+ value=docstring.format(**self.sync_docstring_format_vars)
+ )
+ return wrapped_node
+
+
+class Convert(ConvertClass):
+ """
+ Method decorator to mark async methods to be converted to sync methods
+
+ Args:
+ sync_name: use a new name for the sync method
+ replace_symbols: a dict of symbols and replacements to use when generating sync method
+ docstring_format_vars: a dict of variables to replace in the docstring
+ rm_aio: if True, automatically strip all asyncio keywords from method. If False,
+ only the signature `async def` is stripped. Other keywords must be wrapped in
+ CrossSync.rm_aio() calls to be removed.
+ """
+
+ def __init__(
+ self,
+ sync_name: str | None = None,
+ *,
+ replace_symbols: dict[str, str] | None = None,
+ docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None,
+ rm_aio: bool = True,
+ ):
+ super().__init__(
+ sync_name=sync_name,
+ replace_symbols=replace_symbols,
+ docstring_format_vars=docstring_format_vars,
+ rm_aio=rm_aio,
+ add_mapping_for_name=None,
+ )
+
+ def sync_ast_transform(self, wrapped_node, transformers_globals):
+ """
+ Transform async method into sync
+ """
+ import ast
+
+ # replace async function with sync function
+ converted = ast.copy_location(
+ ast.FunctionDef(
+ wrapped_node.name,
+ wrapped_node.args,
+ wrapped_node.body,
+ wrapped_node.decorator_list
+ if hasattr(wrapped_node, "decorator_list")
+ else [],
+ wrapped_node.returns if hasattr(wrapped_node, "returns") else None,
+ ),
+ wrapped_node,
+ )
+ # transform based on arguments
+ return super().sync_ast_transform(converted, transformers_globals)
+
+
+class Drop(AstDecorator):
+ """
+ Method decorator to drop methods or classes from the sync output
+ """
+
+ def sync_ast_transform(self, wrapped_node, transformers_globals):
+ """
+ Drop from sync output
+ """
+ return None
+
+
+class Pytest(AstDecorator):
+ """
+ Used in place of pytest.mark.asyncio to mark tests
+
+ When generating sync version, also runs rm_aio to remove async keywords from
+ entire test function
+
+ Args:
+ rm_aio: if True, automatically strip all asyncio keywords from test code.
+ Defaults to True, to simplify test code generation.
+ """
+
+ def __init__(self, rm_aio=True):
+ self.rm_aio = rm_aio
+
+ def async_decorator(self):
+ import pytest
+
+ return pytest.mark.asyncio
+
+ def sync_ast_transform(self, wrapped_node, transformers_globals):
+ """
+ convert async to sync
+ """
+ import ast
+
+ # always convert method to sync
+ converted = ast.copy_location(
+ ast.FunctionDef(
+ wrapped_node.name,
+ wrapped_node.args,
+ wrapped_node.body,
+ wrapped_node.decorator_list
+ if hasattr(wrapped_node, "decorator_list")
+ else [],
+ wrapped_node.returns if hasattr(wrapped_node, "returns") else None,
+ ),
+ wrapped_node,
+ )
+ # convert entire body to sync if rm_aio is set
+ if self.rm_aio:
+ converted = transformers_globals["AsyncToSync"]().visit(converted)
+ return converted
+
+
+class PytestFixture(AstDecorator):
+ """
+ Used in place of pytest.fixture or pytest.mark.asyncio to mark fixtures
+
+ Args:
+ *args: all arguments to pass to pytest.fixture
+ **kwargs: all keyword arguments to pass to pytest.fixture
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._args = args
+ self._kwargs = kwargs
+
+ def async_decorator(self):
+ import pytest_asyncio # type: ignore
+
+ return lambda f: pytest_asyncio.fixture(*self._args, **self._kwargs)(f)
+
+ def sync_ast_transform(self, wrapped_node, transformers_globals):
+ import ast
+ import copy
+
+ arg_nodes = [
+ a if isinstance(a, ast.expr) else ast.Constant(value=a) for a in self._args
+ ]
+ kwarg_nodes = []
+ for k, v in self._kwargs.items():
+ if not isinstance(v, ast.expr):
+ v = ast.Constant(value=v)
+ kwarg_nodes.append(ast.keyword(arg=k, value=v))
+
+ new_node = copy.deepcopy(wrapped_node)
+ if not hasattr(new_node, "decorator_list"):
+ new_node.decorator_list = []
+ new_node.decorator_list.append(
+ ast.Call(
+ func=ast.Attribute(
+ value=ast.Name(id="pytest", ctx=ast.Load()),
+ attr="fixture",
+ ctx=ast.Load(),
+ ),
+ args=arg_nodes,
+ keywords=kwarg_nodes,
+ )
+ )
+ return new_node
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py
new file mode 100644
index 000000000000..5312708ccc46
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py
@@ -0,0 +1,64 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+from typing import Any
+
+
+class MappingMeta(type):
+ """
+ Metaclass to provide add_mapping functionality, allowing users to add
+ custom attributes to derived classes at runtime.
+
+ Using a metaclass allows us to share functionality between CrossSync
+ and CrossSync._Sync_Impl, and it works better with mypy checks than
+ monkypatching
+ """
+
+ # list of attributes that can be added to the derived class at runtime
+ _runtime_replacements: dict[tuple[MappingMeta, str], Any] = {}
+
+ def add_mapping(cls: MappingMeta, name: str, value: Any):
+ """
+ Add a new attribute to the class, for replacing library-level symbols
+
+ Raises:
+ - AttributeError if the attribute already exists with a different value
+ """
+ key = (cls, name)
+ old_value = cls._runtime_replacements.get(key)
+ if old_value is None:
+ cls._runtime_replacements[key] = value
+ elif old_value != value:
+ raise AttributeError(f"Conflicting assignments for CrossSync.{name}")
+
+ def add_mapping_decorator(cls: MappingMeta, name: str):
+ """
+ Exposes add_mapping as a class decorator
+ """
+
+ def decorator(wrapped_cls):
+ cls.add_mapping(name, wrapped_cls)
+ return wrapped_cls
+
+ return decorator
+
+ def __getattr__(cls: MappingMeta, name: str):
+ """
+ Retrieve custom attributes
+ """
+ key = (cls, name)
+ found = cls._runtime_replacements.get(key)
+ if found is not None:
+ return found
+ raise AttributeError(f"CrossSync has no attribute {name}")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py
new file mode 100644
index 000000000000..1f1ee111aee9
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py
@@ -0,0 +1,334 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+CrossSync provides a toolset for sharing logic between async and sync codebases, including:
+- A set of decorators for annotating async classes and functions
+ (@CrossSync.export_sync, @CrossSync.convert, @CrossSync.drop_method, ...)
+- A set of wrappers to wrap common objects and types that have corresponding async and sync implementations
+ (CrossSync.Queue, CrossSync.Condition, CrossSync.Future, ...)
+- A set of function implementations for common async operations that can be used in both async and sync codebases
+ (CrossSync.gather_partials, CrossSync.wait, CrossSync.condition_wait, ...)
+- CrossSync.rm_aio(), which is used to annotate regions of the code containing async keywords to strip
+
+A separate module will use CrossSync annotations to generate a corresponding sync
+class based on a decorated async class.
+
+Usage Example:
+```python
+@CrossSync.export_sync(path="path/to/sync_module.py")
+
+ @CrossSync.convert
+ async def async_func(self, arg: int) -> int:
+ await CrossSync.sleep(1)
+ return arg
+```
+"""
+
+from __future__ import annotations
+
+from typing import (
+ TypeVar,
+ Any,
+ Callable,
+ Coroutine,
+ Sequence,
+ Union,
+ AsyncIterable,
+ AsyncIterator,
+ AsyncGenerator,
+ TYPE_CHECKING,
+)
+import typing
+
+import asyncio
+import sys
+import concurrent.futures
+import google.api_core.retry as retries
+import queue
+import threading
+import time
+from ._decorators import (
+ ConvertClass,
+ Convert,
+ Drop,
+ Pytest,
+ PytestFixture,
+)
+from ._mapping_meta import MappingMeta
+
+if TYPE_CHECKING:
+ from typing_extensions import TypeAlias
+
+T = TypeVar("T")
+
+
+class CrossSync(metaclass=MappingMeta):
+ # support CrossSync.is_async to check if the current environment is async
+ is_async = True
+
+ # provide aliases for common async functions and types
+ sleep = asyncio.sleep
+ retry_target = retries.retry_target_async
+ retry_target_stream = retries.retry_target_stream_async
+ Retry = retries.AsyncRetry
+ Queue: TypeAlias = asyncio.Queue
+ Condition: TypeAlias = asyncio.Condition
+ Future: TypeAlias = asyncio.Future
+ Task: TypeAlias = asyncio.Task
+ Event: TypeAlias = asyncio.Event
+ Semaphore: TypeAlias = asyncio.Semaphore
+ StopIteration: TypeAlias = StopAsyncIteration
+ # provide aliases for common async type annotations
+ Awaitable: TypeAlias = typing.Awaitable
+ Iterable: TypeAlias = AsyncIterable
+ Iterator: TypeAlias = AsyncIterator
+ Generator: TypeAlias = AsyncGenerator
+
+ # decorators
+ convert_class = ConvertClass.decorator # decorate classes to convert
+ convert = Convert.decorator # decorate methods to convert from async to sync
+ drop = Drop.decorator # decorate methods to remove from sync version
+ pytest = Pytest.decorator # decorate test methods to run with pytest-asyncio
+ pytest_fixture = (
+ PytestFixture.decorator
+ ) # decorate test methods to run with pytest fixture
+
+ @classmethod
+ def next(cls, iterable):
+ return iterable.__anext__()
+
+ @classmethod
+ def Mock(cls, *args, **kwargs):
+ """
+ Alias for AsyncMock, importing at runtime to avoid hard dependency on mock
+ """
+ try:
+ from unittest.mock import AsyncMock # type: ignore
+ except ImportError: # pragma: NO COVER
+ from mock import AsyncMock # type: ignore
+ return AsyncMock(*args, **kwargs)
+
+ @staticmethod
+ async def gather_partials(
+ partial_list: Sequence[Callable[[], Awaitable[T]]],
+ return_exceptions: bool = False,
+ sync_executor: concurrent.futures.ThreadPoolExecutor | None = None,
+ ) -> list[T | BaseException]:
+ """
+ abstraction over asyncio.gather, but with a set of partial functions instead
+ of coroutines, to work with sync functions.
+ To use gather with a set of futures instead of partials, use CrpssSync.wait
+
+ In the async version, the partials are expected to return an awaitable object. Patials
+ are unpacked and awaited in the gather call.
+
+ Sync version implemented with threadpool executor
+
+ Returns:
+ - a list of results (or exceptions, if return_exceptions=True) in the same order as partial_list
+ """
+ if not partial_list:
+ return []
+ awaitable_list = [partial() for partial in partial_list]
+ return await asyncio.gather(
+ *awaitable_list, return_exceptions=return_exceptions
+ )
+
+ @staticmethod
+ async def wait(
+ futures: Sequence[CrossSync.Future[T]], timeout: float | None = None
+ ) -> tuple[set[CrossSync.Future[T]], set[CrossSync.Future[T]]]:
+ """
+ abstraction over asyncio.wait
+
+ Return:
+ - a tuple of (done, pending) sets of futures
+ """
+ if not futures:
+ return set(), set()
+ return await asyncio.wait(futures, timeout=timeout)
+
+ @staticmethod
+ async def event_wait(
+ event: CrossSync.Event,
+ timeout: float | None = None,
+ async_break_early: bool = True,
+ ) -> None:
+ """
+ abstraction over asyncio.Event.wait
+
+ Args:
+ - event: event to wait for
+ - timeout: if set, will break out early after `timeout` seconds
+ - async_break_early: if False, the async version will wait for
+ the full timeout even if the event is set before the timeout.
+ This avoids creating a new background task
+ """
+ if timeout is None:
+ await event.wait()
+ elif not async_break_early:
+ if not event.is_set():
+ await asyncio.sleep(timeout)
+ else:
+ try:
+ await asyncio.wait_for(event.wait(), timeout=timeout)
+ except asyncio.TimeoutError:
+ pass
+
+ @staticmethod
+ def create_task(
+ fn: Callable[..., Coroutine[Any, Any, T]],
+ *fn_args,
+ sync_executor: concurrent.futures.ThreadPoolExecutor | None = None,
+ task_name: str | None = None,
+ **fn_kwargs,
+ ) -> CrossSync.Task[T]:
+ """
+ abstraction over asyncio.create_task. Sync version implemented with threadpool executor
+
+ sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version
+ """
+ task: CrossSync.Task[T] = asyncio.create_task(fn(*fn_args, **fn_kwargs))
+ if task_name and sys.version_info >= (3, 8):
+ task.set_name(task_name)
+ return task
+
+ @staticmethod
+ async def yield_to_event_loop() -> None:
+ """
+ Call asyncio.sleep(0) to yield to allow other tasks to run
+ """
+ await asyncio.sleep(0)
+
+ @staticmethod
+ def verify_async_event_loop() -> None:
+ """
+ Raises RuntimeError if the event loop is not running
+ """
+ asyncio.get_running_loop()
+
+ @staticmethod
+ def rm_aio(statement: T) -> T:
+ """
+ Used to annotate regions of the code containing async keywords to strip
+
+ All async keywords inside an rm_aio call are removed, along with
+ `async with` and `async for` statements containing CrossSync.rm_aio() in the body
+ """
+ return statement
+
+ class _Sync_Impl(metaclass=MappingMeta):
+ """
+ Provide sync versions of the async functions and types in CrossSync
+ """
+
+ is_async = False
+
+ sleep = time.sleep
+ next = next
+ retry_target = retries.retry_target
+ retry_target_stream = retries.retry_target_stream
+ Retry = retries.Retry
+ Queue: TypeAlias = queue.Queue
+ Condition: TypeAlias = threading.Condition
+ Future: TypeAlias = concurrent.futures.Future
+ Task: TypeAlias = concurrent.futures.Future
+ Event: TypeAlias = threading.Event
+ Semaphore: TypeAlias = threading.Semaphore
+ StopIteration: TypeAlias = StopIteration
+ # type annotations
+ Awaitable: TypeAlias = Union[T]
+ Iterable: TypeAlias = typing.Iterable
+ Iterator: TypeAlias = typing.Iterator
+ Generator: TypeAlias = typing.Generator
+
+ @classmethod
+ def Mock(cls, *args, **kwargs):
+ from unittest.mock import Mock
+
+ return Mock(*args, **kwargs)
+
+ @staticmethod
+ def event_wait(
+ event: CrossSync._Sync_Impl.Event,
+ timeout: float | None = None,
+ async_break_early: bool = True,
+ ) -> None:
+ event.wait(timeout=timeout)
+
+ @staticmethod
+ def gather_partials(
+ partial_list: Sequence[Callable[[], T]],
+ return_exceptions: bool = False,
+ sync_executor: concurrent.futures.ThreadPoolExecutor | None = None,
+ ) -> list[T | BaseException]:
+ if not partial_list:
+ return []
+ if not sync_executor:
+ raise ValueError("sync_executor is required for sync version")
+ futures_list = [sync_executor.submit(partial) for partial in partial_list]
+ results_list: list[T | BaseException] = []
+ for future in futures_list:
+ found_exc = future.exception()
+ if found_exc is not None:
+ if return_exceptions:
+ results_list.append(found_exc)
+ else:
+ raise found_exc
+ else:
+ results_list.append(future.result())
+ return results_list
+
+ @staticmethod
+ def wait(
+ futures: Sequence[CrossSync._Sync_Impl.Future[T]],
+ timeout: float | None = None,
+ ) -> tuple[
+ set[CrossSync._Sync_Impl.Future[T]], set[CrossSync._Sync_Impl.Future[T]]
+ ]:
+ if not futures:
+ return set(), set()
+ return concurrent.futures.wait(futures, timeout=timeout)
+
+ @staticmethod
+ def create_task(
+ fn: Callable[..., T],
+ *fn_args,
+ sync_executor: concurrent.futures.ThreadPoolExecutor | None = None,
+ task_name: str | None = None,
+ **fn_kwargs,
+ ) -> CrossSync._Sync_Impl.Task[T]:
+ """
+ abstraction over asyncio.create_task. Sync version implemented with threadpool executor
+
+ sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version
+ """
+ if not sync_executor:
+ raise ValueError("sync_executor is required for sync version")
+ return sync_executor.submit(fn, *fn_args, **fn_kwargs)
+
+ @staticmethod
+ def yield_to_event_loop() -> None:
+ """
+ No-op for sync version
+ """
+ pass
+
+ @staticmethod
+ def verify_async_event_loop() -> None:
+ """
+ No-op for sync version
+ """
+ pass
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py
new file mode 100644
index 000000000000..424a344860e3
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py
@@ -0,0 +1,250 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Helper functions used in various places in the library.
+"""
+from __future__ import annotations
+
+from typing import Sequence, List, Tuple, TYPE_CHECKING, Union
+import time
+import enum
+from collections import namedtuple
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+
+from google.api_core import exceptions as core_exceptions
+from google.api_core.retry import RetryFailureReason
+from google.cloud.bigtable.data.exceptions import RetryExceptionGroup
+
+if TYPE_CHECKING:
+ import grpc
+ from google.cloud.bigtable.data._async.client import _DataApiTargetAsync
+ from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget
+
+"""
+Helper functions used in various places in the library.
+"""
+
+# Type alias for the output of sample_keys
+RowKeySamples = List[Tuple[bytes, int]]
+
+# type alias for the output of query.shard()
+ShardedQuery = List[ReadRowsQuery]
+
+# used by read_rows_sharded to limit how many requests are attempted in parallel
+_CONCURRENCY_LIMIT = 10
+
+# used to identify an active bigtable resource that needs to be warmed through PingAndWarm
+# each instance/app_profile_id pair needs to be individually tracked
+_WarmedInstanceKey = namedtuple(
+ "_WarmedInstanceKey", ["instance_name", "app_profile_id"]
+)
+
+
+# enum used on method calls when table defaults should be used
+class TABLE_DEFAULT(enum.Enum):
+ # default for mutate_row, sample_row_keys, check_and_mutate_row, and read_modify_write_row
+ DEFAULT = "DEFAULT"
+ # default for read_rows, read_rows_stream, read_rows_sharded, row_exists, and read_row
+ READ_ROWS = "READ_ROWS_DEFAULT"
+ # default for bulk_mutate_rows and mutations_batcher
+ MUTATE_ROWS = "MUTATE_ROWS_DEFAULT"
+
+
+def _attempt_timeout_generator(
+ per_request_timeout: float | None, operation_timeout: float
+):
+ """
+ Generator that yields the timeout value for each attempt of a retry loop.
+
+ Will return per_request_timeout until the operation_timeout is approached,
+ at which point it will return the remaining time in the operation_timeout.
+
+ Args:
+ per_request_timeout: The timeout value to use for each request, in seconds.
+ If None, the operation_timeout will be used for each request.
+ operation_timeout: The timeout value to use for the entire operationm in seconds.
+ Yields:
+ float: The timeout value to use for the next request, in seonds
+ """
+ per_request_timeout = (
+ per_request_timeout if per_request_timeout is not None else operation_timeout
+ )
+ deadline = operation_timeout + time.monotonic()
+ while True:
+ yield max(0, min(per_request_timeout, deadline - time.monotonic()))
+
+
+def _retry_exception_factory(
+ exc_list: list[Exception],
+ reason: RetryFailureReason,
+ timeout_val: float | None,
+) -> tuple[Exception, Exception | None]:
+ """
+ Build retry error based on exceptions encountered during operation
+
+ Args:
+ exc_list: list of exceptions encountered during operation
+ is_timeout: whether the operation failed due to timeout
+ timeout_val: the operation timeout value in seconds, for constructing
+ the error message
+ Returns:
+ tuple[Exception, Exception|None]:
+ tuple of the exception to raise, and a cause exception if applicable
+ """
+ if reason == RetryFailureReason.TIMEOUT:
+ timeout_val_str = f"of {timeout_val:0.1f}s " if timeout_val is not None else ""
+ # if failed due to timeout, raise deadline exceeded as primary exception
+ source_exc: Exception = core_exceptions.DeadlineExceeded(
+ f"operation_timeout{timeout_val_str} exceeded"
+ )
+ elif exc_list:
+ # otherwise, raise non-retryable error as primary exception
+ source_exc = exc_list.pop()
+ else:
+ source_exc = RuntimeError("failed with unspecified exception")
+ # use the retry exception group as the cause of the exception
+ cause_exc: Exception | None = RetryExceptionGroup(exc_list) if exc_list else None
+ source_exc.__cause__ = cause_exc
+ return source_exc, cause_exc
+
+
+def _get_timeouts(
+ operation: float | TABLE_DEFAULT,
+ attempt: float | None | TABLE_DEFAULT,
+ table: "_DataApiTargetAsync" | "_DataApiTarget",
+) -> tuple[float, float]:
+ """
+ Convert passed in timeout values to floats, using table defaults if necessary.
+
+ attempt will use operation value if None, or if larger than operation.
+
+ Will call _validate_timeouts on the outputs, and raise ValueError if the
+ resulting timeouts are invalid.
+
+ Args:
+ operation: The timeout value to use for the entire operation, in seconds.
+ attempt: The timeout value to use for each attempt, in seconds.
+ table: The table to use for default values.
+ Returns:
+ tuple[float, float]: A tuple of (operation_timeout, attempt_timeout)
+ """
+ # load table defaults if necessary
+ if operation == TABLE_DEFAULT.DEFAULT:
+ final_operation = table.default_operation_timeout
+ elif operation == TABLE_DEFAULT.READ_ROWS:
+ final_operation = table.default_read_rows_operation_timeout
+ elif operation == TABLE_DEFAULT.MUTATE_ROWS:
+ final_operation = table.default_mutate_rows_operation_timeout
+ else:
+ final_operation = operation
+ if attempt == TABLE_DEFAULT.DEFAULT:
+ attempt = table.default_attempt_timeout
+ elif attempt == TABLE_DEFAULT.READ_ROWS:
+ attempt = table.default_read_rows_attempt_timeout
+ elif attempt == TABLE_DEFAULT.MUTATE_ROWS:
+ attempt = table.default_mutate_rows_attempt_timeout
+
+ return _align_timeouts(final_operation, attempt)
+
+
+def _align_timeouts(operation: float, attempt: float | None) -> tuple[float, float]:
+ """
+ Convert passed in timeout values to floats.
+
+ attempt will use operation value if None, or if larger than operation.
+
+ Will call _validate_timeouts on the outputs, and raise ValueError if the
+ resulting timeouts are invalid.
+
+ Args:
+ operation: The timeout value to use for the entire operation, in seconds.
+ attempt: The timeout value to use for each attempt, in seconds.
+ Returns:
+ tuple[float, float]: A tuple of (operation_timeout, attempt_timeout)
+ """
+ if attempt is None:
+ # no timeout specified, use operation timeout for both
+ final_attempt = operation
+ else:
+ # cap attempt timeout at operation timeout
+ final_attempt = min(attempt, operation) if operation else attempt
+
+ _validate_timeouts(operation, final_attempt, allow_none=False)
+ return operation, final_attempt
+
+
+def _validate_timeouts(
+ operation_timeout: float, attempt_timeout: float | None, allow_none: bool = False
+):
+ """
+ Helper function that will verify that timeout values are valid, and raise
+ an exception if they are not.
+
+ Args:
+ operation_timeout: The timeout value to use for the entire operation, in seconds.
+ attempt_timeout: The timeout value to use for each attempt, in seconds.
+ allow_none: If True, attempt_timeout can be None. If False, None values will raise an exception.
+ Raises:
+ ValueError: if operation_timeout or attempt_timeout are invalid.
+ """
+ if operation_timeout is None:
+ raise ValueError("operation_timeout cannot be None")
+ if operation_timeout <= 0:
+ raise ValueError("operation_timeout must be greater than 0")
+ if not allow_none and attempt_timeout is None:
+ raise ValueError("attempt_timeout must not be None")
+ elif attempt_timeout is not None:
+ if attempt_timeout <= 0:
+ raise ValueError("attempt_timeout must be greater than 0")
+
+
+def _get_error_type(
+ call_code: Union["grpc.StatusCode", int, type[Exception]]
+) -> type[Exception]:
+ """Helper function for ensuring the object is an exception type.
+ If it is not, the proper GoogleAPICallError type is infered from the status
+ code.
+
+ Args:
+ - call_code: Exception type or gRPC status code.
+ """
+ if isinstance(call_code, type):
+ return call_code
+ else:
+ return type(core_exceptions.from_grpc_status(call_code, ""))
+
+
+def _get_retryable_errors(
+ call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT,
+ table: "_DataApiTargetAsync" | "_DataApiTarget",
+) -> list[type[Exception]]:
+ """
+ Convert passed in retryable error codes to a list of exception types.
+
+ Args:
+ call_codes: The error codes to convert. Can be a list of grpc.StatusCode values,
+ int values, or Exception types, or a TABLE_DEFAULT value.
+ table: The table to use for default values.
+ Returns:
+ list[type[Exception]]: A list of exception types to retry on.
+ """
+ # load table defaults if necessary
+ if call_codes == TABLE_DEFAULT.DEFAULT:
+ call_codes = table.default_retryable_errors
+ elif call_codes == TABLE_DEFAULT.READ_ROWS:
+ call_codes = table.default_read_rows_retryable_errors
+ elif call_codes == TABLE_DEFAULT.MUTATE_ROWS:
+ call_codes = table.default_mutate_rows_retryable_errors
+
+ return [_get_error_type(e) for e in call_codes]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py
new file mode 100644
index 000000000000..3bf7b562f1db
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py
@@ -0,0 +1,184 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Sequence, TYPE_CHECKING
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry as retries
+import google.cloud.bigtable_v2.types.bigtable as types_pb
+import google.cloud.bigtable.data.exceptions as bt_exceptions
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT
+from google.cloud.bigtable.data.mutations import _EntryWithProto
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data.mutations import RowMutationEntry
+ from google.cloud.bigtable_v2.services.bigtable.client import (
+ BigtableClient as GapicClientType,
+ )
+ from google.cloud.bigtable.data._sync_autogen.client import (
+ _DataApiTarget as TargetType,
+ )
+
+
+class _MutateRowsOperation:
+ """
+ MutateRowsOperation manages the logic of sending a set of row mutations,
+ and retrying on failed entries. It manages this using the _run_attempt
+ function, which attempts to mutate all outstanding entries, and raises
+ _MutateRowsIncomplete if any retryable errors are encountered.
+
+ Errors are exposed as a MutationsExceptionGroup, which contains a list of
+ exceptions organized by the related failed mutation entries.
+
+ Args:
+ gapic_client: the client to use for the mutate_rows call
+ target: the table or view associated with the request
+ mutation_entries: a list of RowMutationEntry objects to send to the server
+ operation_timeout: the timeout to use for the entire operation, in seconds.
+ attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds.
+ If not specified, the request will run until operation_timeout is reached.
+ """
+
+ def __init__(
+ self,
+ gapic_client: GapicClientType,
+ target: TargetType,
+ mutation_entries: list["RowMutationEntry"],
+ operation_timeout: float,
+ attempt_timeout: float | None,
+ retryable_exceptions: Sequence[type[Exception]] = (),
+ ):
+ total_mutations = sum((len(entry.mutations) for entry in mutation_entries))
+ if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT:
+ raise ValueError(
+ f"mutate_rows requests can contain at most {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across all entries. Found {total_mutations}."
+ )
+ self._target = target
+ self._gapic_fn = gapic_client.mutate_rows
+ self.is_retryable = retries.if_exception_type(
+ *retryable_exceptions, bt_exceptions._MutateRowsIncomplete
+ )
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+ self._operation = lambda: CrossSync._Sync_Impl.retry_target(
+ self._run_attempt,
+ self.is_retryable,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+ self.timeout_generator = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries]
+ self.remaining_indices = list(range(len(self.mutations)))
+ self.errors: dict[int, list[Exception]] = {}
+
+ def start(self):
+ """Start the operation, and run until completion
+
+ Raises:
+ MutationsExceptionGroup: if any mutations failed"""
+ try:
+ self._operation()
+ except Exception as exc:
+ incomplete_indices = self.remaining_indices.copy()
+ for idx in incomplete_indices:
+ self._handle_entry_error(idx, exc)
+ finally:
+ all_errors: list[Exception] = []
+ for idx, exc_list in self.errors.items():
+ if len(exc_list) == 0:
+ raise core_exceptions.ClientError(
+ f"Mutation {idx} failed with no associated errors"
+ )
+ elif len(exc_list) == 1:
+ cause_exc = exc_list[0]
+ else:
+ cause_exc = bt_exceptions.RetryExceptionGroup(exc_list)
+ entry = self.mutations[idx].entry
+ all_errors.append(
+ bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc)
+ )
+ if all_errors:
+ raise bt_exceptions.MutationsExceptionGroup(
+ all_errors, len(self.mutations)
+ )
+
+ def _run_attempt(self):
+ """Run a single attempt of the mutate_rows rpc.
+
+ Raises:
+ _MutateRowsIncomplete: if there are failed mutations eligible for
+ retry after the attempt is complete
+ GoogleAPICallError: if the gapic rpc fails"""
+ request_entries = [self.mutations[idx].proto for idx in self.remaining_indices]
+ active_request_indices = {
+ req_idx: orig_idx
+ for (req_idx, orig_idx) in enumerate(self.remaining_indices)
+ }
+ self.remaining_indices = []
+ if not request_entries:
+ return
+ try:
+ result_generator = self._gapic_fn(
+ request=types_pb.MutateRowsRequest(
+ entries=request_entries,
+ app_profile_id=self._target.app_profile_id,
+ **self._target._request_path,
+ ),
+ timeout=next(self.timeout_generator),
+ retry=None,
+ )
+ for result_list in result_generator:
+ for result in result_list.entries:
+ orig_idx = active_request_indices[result.index]
+ entry_error = core_exceptions.from_grpc_status(
+ result.status.code,
+ result.status.message,
+ details=result.status.details,
+ )
+ if result.status.code != 0:
+ self._handle_entry_error(orig_idx, entry_error)
+ elif orig_idx in self.errors:
+ del self.errors[orig_idx]
+ del active_request_indices[result.index]
+ except Exception as exc:
+ for idx in active_request_indices.values():
+ self._handle_entry_error(idx, exc)
+ raise
+ if self.remaining_indices:
+ raise bt_exceptions._MutateRowsIncomplete
+
+ def _handle_entry_error(self, idx: int, exc: Exception):
+ """Add an exception to the list of exceptions for a given mutation index,
+ and add the index to the list of remaining indices if the exception is
+ retryable.
+
+ Args:
+ idx: the index of the mutation that failed
+ exc: the exception to add to the list"""
+ entry = self.mutations[idx].entry
+ self.errors.setdefault(idx, []).append(exc)
+ if (
+ entry.is_idempotent()
+ and self.is_retryable(exc)
+ and (idx not in self.remaining_indices)
+ ):
+ self.remaining_indices.append(idx)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py
new file mode 100644
index 000000000000..3593475a98d2
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py
@@ -0,0 +1,304 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Sequence, TYPE_CHECKING
+from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB
+from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB
+from google.cloud.bigtable_v2.types import RowSet as RowSetPB
+from google.cloud.bigtable_v2.types import RowRange as RowRangePB
+from google.cloud.bigtable.data.row import Row, Cell
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+from google.cloud.bigtable.data.exceptions import InvalidChunk
+from google.cloud.bigtable.data.exceptions import _RowSetComplete
+from google.cloud.bigtable.data.exceptions import _ResetRow
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+from google.api_core import retry as retries
+from google.api_core.retry import exponential_sleep_generator
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data._sync_autogen.client import (
+ _DataApiTarget as TargetType,
+ )
+
+
+class _ReadRowsOperation:
+ """
+ ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream
+ into a stream of Row objects.
+
+ ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse
+ and turns them into a stream of Row objects using an internal
+ StateMachine.
+
+ ReadRowsOperation(request, client) handles row merging logic end-to-end, including
+ performing retries on stream errors.
+
+ Args:
+ query: The query to execute
+ target: The table or view to send the request to
+ operation_timeout: The total time to allow for the operation, in seconds
+ attempt_timeout: The time to allow for each individual attempt, in seconds
+ retryable_exceptions: A list of exceptions that should trigger a retry
+ """
+
+ __slots__ = (
+ "attempt_timeout_gen",
+ "operation_timeout",
+ "request",
+ "target",
+ "_predicate",
+ "_last_yielded_row_key",
+ "_remaining_count",
+ )
+
+ def __init__(
+ self,
+ query: ReadRowsQuery,
+ target: TargetType,
+ operation_timeout: float,
+ attempt_timeout: float,
+ retryable_exceptions: Sequence[type[Exception]] = (),
+ ):
+ self.attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self.operation_timeout = operation_timeout
+ if isinstance(query, dict):
+ self.request = ReadRowsRequestPB(
+ **query, **target._request_path, app_profile_id=target.app_profile_id
+ )
+ else:
+ self.request = query._to_pb(target)
+ self.target = target
+ self._predicate = retries.if_exception_type(*retryable_exceptions)
+ self._last_yielded_row_key: bytes | None = None
+ self._remaining_count: int | None = self.request.rows_limit or None
+
+ def start_operation(self) -> CrossSync._Sync_Impl.Iterable[Row]:
+ """Start the read_rows operation, retrying on retryable errors.
+
+ Yields:
+ Row: The next row in the stream"""
+ return CrossSync._Sync_Impl.retry_target_stream(
+ self._read_rows_attempt,
+ self._predicate,
+ exponential_sleep_generator(0.01, 60, multiplier=2),
+ self.operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ def _read_rows_attempt(self) -> CrossSync._Sync_Impl.Iterable[Row]:
+ """Attempt a single read_rows rpc call.
+ This function is intended to be wrapped by retry logic,
+ which will call this function until it succeeds or
+ a non-retryable error is raised.
+
+ Yields:
+ Row: The next row in the stream"""
+ if self._last_yielded_row_key is not None:
+ try:
+ self.request.rows = self._revise_request_rowset(
+ row_set=self.request.rows,
+ last_seen_row_key=self._last_yielded_row_key,
+ )
+ except _RowSetComplete:
+ return self.merge_rows(None)
+ if self._remaining_count is not None:
+ self.request.rows_limit = self._remaining_count
+ if self._remaining_count == 0:
+ return self.merge_rows(None)
+ gapic_stream = self.target.client._gapic_client.read_rows(
+ self.request, timeout=next(self.attempt_timeout_gen), retry=None
+ )
+ chunked_stream = self.chunk_stream(gapic_stream)
+ return self.merge_rows(chunked_stream)
+
+ def chunk_stream(
+ self,
+ stream: CrossSync._Sync_Impl.Awaitable[
+ CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB]
+ ],
+ ) -> CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk]:
+ """process chunks out of raw read_rows stream
+
+ Args:
+ stream: the raw read_rows stream from the gapic client
+ Yields:
+ ReadRowsResponsePB.CellChunk: the next chunk in the stream"""
+ for resp in stream:
+ resp = resp._pb
+ if resp.last_scanned_row_key:
+ if (
+ self._last_yielded_row_key is not None
+ and resp.last_scanned_row_key <= self._last_yielded_row_key
+ ):
+ raise InvalidChunk("last scanned out of order")
+ self._last_yielded_row_key = resp.last_scanned_row_key
+ current_key = None
+ for c in resp.chunks:
+ if current_key is None:
+ current_key = c.row_key
+ if current_key is None:
+ raise InvalidChunk("first chunk is missing a row key")
+ elif (
+ self._last_yielded_row_key
+ and current_key <= self._last_yielded_row_key
+ ):
+ raise InvalidChunk("row keys should be strictly increasing")
+ yield c
+ if c.reset_row:
+ current_key = None
+ elif c.commit_row:
+ self._last_yielded_row_key = current_key
+ if self._remaining_count is not None:
+ self._remaining_count -= 1
+ if self._remaining_count < 0:
+ raise InvalidChunk("emit count exceeds row limit")
+ current_key = None
+
+ @staticmethod
+ def merge_rows(
+ chunks: CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk] | None,
+ ) -> CrossSync._Sync_Impl.Iterable[Row]:
+ """Merge chunks into rows
+
+ Args:
+ chunks: the chunk stream to merge
+ Yields:
+ Row: the next row in the stream"""
+ if chunks is None:
+ return
+ it = chunks.__iter__()
+ while True:
+ try:
+ c = it.__next__()
+ except CrossSync._Sync_Impl.StopIteration:
+ return
+ row_key = c.row_key
+ if not row_key:
+ raise InvalidChunk("first row chunk is missing key")
+ cells = []
+ family: str | None = None
+ qualifier: bytes | None = None
+ try:
+ while True:
+ if c.reset_row:
+ raise _ResetRow(c)
+ k = c.row_key
+ f = c.family_name.value
+ q = c.qualifier.value if c.HasField("qualifier") else None
+ if k and k != row_key:
+ raise InvalidChunk("unexpected new row key")
+ if f:
+ family = f
+ if q is not None:
+ qualifier = q
+ else:
+ raise InvalidChunk("new family without qualifier")
+ elif family is None:
+ raise InvalidChunk("missing family")
+ elif q is not None:
+ if family is None:
+ raise InvalidChunk("new qualifier without family")
+ qualifier = q
+ elif qualifier is None:
+ raise InvalidChunk("missing qualifier")
+ ts = c.timestamp_micros
+ labels = c.labels if c.labels else []
+ value = c.value
+ if c.value_size > 0:
+ buffer = [value]
+ while c.value_size > 0:
+ c = it.__next__()
+ t = c.timestamp_micros
+ cl = c.labels
+ k = c.row_key
+ if (
+ c.HasField("family_name")
+ and c.family_name.value != family
+ ):
+ raise InvalidChunk("family changed mid cell")
+ if (
+ c.HasField("qualifier")
+ and c.qualifier.value != qualifier
+ ):
+ raise InvalidChunk("qualifier changed mid cell")
+ if t and t != ts:
+ raise InvalidChunk("timestamp changed mid cell")
+ if cl and cl != labels:
+ raise InvalidChunk("labels changed mid cell")
+ if k and k != row_key:
+ raise InvalidChunk("row key changed mid cell")
+ if c.reset_row:
+ raise _ResetRow(c)
+ buffer.append(c.value)
+ value = b"".join(buffer)
+ cells.append(
+ Cell(value, row_key, family, qualifier, ts, list(labels))
+ )
+ if c.commit_row:
+ yield Row(row_key, cells)
+ break
+ c = it.__next__()
+ except _ResetRow as e:
+ c = e.chunk
+ if (
+ c.row_key
+ or c.HasField("family_name")
+ or c.HasField("qualifier")
+ or c.timestamp_micros
+ or c.labels
+ or c.value
+ ):
+ raise InvalidChunk("reset row with data")
+ continue
+ except CrossSync._Sync_Impl.StopIteration:
+ raise InvalidChunk("premature end of stream")
+
+ @staticmethod
+ def _revise_request_rowset(row_set: RowSetPB, last_seen_row_key: bytes) -> RowSetPB:
+ """Revise the rows in the request to avoid ones we've already processed.
+
+ Args:
+ row_set: the row set from the request
+ last_seen_row_key: the last row key encountered
+ Returns:
+ RowSetPB: the new rowset after adusting for the last seen key
+ Raises:
+ _RowSetComplete: if there are no rows left to process after the revision"""
+ if row_set is None or (not row_set.row_ranges and (not row_set.row_keys)):
+ last_seen = last_seen_row_key
+ return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)])
+ adjusted_keys: list[bytes] = [
+ k for k in row_set.row_keys if k > last_seen_row_key
+ ]
+ adjusted_ranges: list[RowRangePB] = []
+ for row_range in row_set.row_ranges:
+ end_key = row_range.end_key_closed or row_range.end_key_open or None
+ if end_key is None or end_key > last_seen_row_key:
+ new_range = RowRangePB(row_range)
+ start_key = row_range.start_key_closed or row_range.start_key_open
+ if start_key is None or start_key <= last_seen_row_key:
+ new_range.start_key_open = last_seen_row_key
+ adjusted_ranges.append(new_range)
+ if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0:
+ raise _RowSetComplete()
+ return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py
new file mode 100644
index 000000000000..78ba129d98c5
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py
@@ -0,0 +1,96 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Callable
+from grpc import ChannelConnectivity
+from grpc import Channel
+
+
+class _WrappedChannel(Channel):
+ """
+ A wrapper around a gRPC channel. All methods are passed
+ through to the underlying channel.
+ """
+
+ def __init__(self, channel: Channel):
+ self._channel = channel
+
+ def unary_unary(self, *args, **kwargs):
+ return self._channel.unary_unary(*args, **kwargs)
+
+ def unary_stream(self, *args, **kwargs):
+ return self._channel.unary_stream(*args, **kwargs)
+
+ def stream_unary(self, *args, **kwargs):
+ return self._channel.stream_unary(*args, **kwargs)
+
+ def stream_stream(self, *args, **kwargs):
+ return self._channel.stream_stream(*args, **kwargs)
+
+ def channel_ready(self):
+ return self._channel.channel_ready()
+
+ def __enter__(self):
+ self._channel.__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return self._channel.__exit__(exc_type, exc_val, exc_tb)
+
+ def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity:
+ return self._channel.get_state(try_to_connect=try_to_connect)
+
+ def wait_for_state_change(self, last_observed_state):
+ return self._channel.wait_for_state_change(last_observed_state)
+
+ def __getattr__(self, name):
+ return getattr(self._channel, name)
+
+ def close(self, grace=None):
+ return self._channel.close()
+
+ def subscribe(self, callback, try_to_connect=False):
+ return self._channel.subscribe(callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ return self._channel.unsubscribe(callback)
+
+
+class SwappableChannel(_WrappedChannel):
+ """
+ Provides a grpc channel wrapper, that allows the internal channel to be swapped out
+
+ Args:
+ - channel_fn: a nullary function that returns a new channel instance.
+ It should be a partial with all channel configuration arguments built-in
+ """
+
+ def __init__(self, channel_fn: Callable[[], Channel]):
+ self._channel_fn = channel_fn
+ self._channel = channel_fn()
+
+ def create_channel(self) -> Channel:
+ """Create a fresh channel using the stored `channel_fn` partial"""
+ new_channel = self._channel_fn()
+ return new_channel
+
+ def swap_channel(self, new_channel: Channel) -> Channel:
+ """Replace the wrapped channel with a new instance. Typically created using `create_channel`"""
+ old_channel = self._channel
+ self._channel = new_channel
+ return old_channel
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py
new file mode 100644
index 000000000000..a403643f5027
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py
@@ -0,0 +1,1582 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import cast, Any, Callable, Optional, Set, Sequence, TYPE_CHECKING
+import abc
+import time
+import warnings
+import random
+import os
+import concurrent.futures
+from functools import partial
+from grpc import Channel
+from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType
+from google.cloud.bigtable.data.execute_query.metadata import (
+ SqlType,
+ _pb_metadata_to_metadata_types,
+)
+from google.cloud.bigtable.data.execute_query._parameters_formatting import (
+ _format_execute_query_params,
+ _to_param_types,
+)
+from google.cloud.bigtable_v2.services.bigtable.transports.base import (
+ DEFAULT_CLIENT_INFO,
+)
+from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest
+from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest
+from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest
+from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest
+from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest
+from google.cloud.client import ClientWithProject
+from google.cloud.environment_vars import BIGTABLE_EMULATOR
+from google.api_core import retry as retries
+from google.api_core.exceptions import DeadlineExceeded
+from google.api_core.exceptions import ServiceUnavailable
+from google.api_core.exceptions import Aborted
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+import google.auth.credentials
+import google.auth._default
+from google.api_core import client_options as client_options_lib
+from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+from google.cloud.bigtable.data.row import Row
+from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+from google.cloud.bigtable.data.exceptions import FailedQueryShardError
+from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup
+from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts
+from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+from google.cloud.bigtable.data._helpers import _validate_timeouts
+from google.cloud.bigtable.data._helpers import _get_error_type
+from google.cloud.bigtable.data._helpers import _get_retryable_errors
+from google.cloud.bigtable.data._helpers import _get_timeouts
+from google.cloud.bigtable.data._helpers import _attempt_timeout_generator
+from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry
+from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule
+from google.cloud.bigtable.data.row_filters import RowFilter
+from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter
+from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter
+from google.cloud.bigtable.data.row_filters import RowFilterChain
+from google.cloud.bigtable.data._cross_sync import CrossSync
+from typing import Iterable
+from grpc import insecure_channel
+from grpc import intercept_channel
+from google.cloud.bigtable_v2.services.bigtable.transports import (
+ BigtableGrpcTransport as TransportType,
+)
+from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient
+from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
+from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel as SwappableChannelType,
+)
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor as MetricsInterceptorType,
+)
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data._helpers import RowKeySamples
+ from google.cloud.bigtable.data._helpers import ShardedQuery
+ from google.cloud.bigtable.data._sync_autogen.mutations_batcher import (
+ MutationsBatcher,
+ )
+ from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import (
+ ExecuteQueryIterator,
+ )
+
+
+@CrossSync._Sync_Impl.add_mapping_decorator("DataClient")
+class BigtableDataClient(ClientWithProject):
+ def __init__(
+ self,
+ *,
+ project: str | None = None,
+ credentials: google.auth.credentials.Credentials | None = None,
+ client_options: dict[str, Any]
+ | "google.api_core.client_options.ClientOptions"
+ | None = None,
+ **kwargs,
+ ):
+ """Create a client instance for the Bigtable Data API
+
+
+
+ Args:
+ project: the project which the client acts on behalf of.
+ If not passed, falls back to the default inferred
+ from the environment.
+ credentials:
+ Thehe OAuth2 Credentials to use for this
+ client. If not passed (and if no ``_http`` object is
+ passed), falls back to the default inferred from the
+ environment.
+ client_options:
+ Client options used to set user options
+ on the client. API Endpoint should be set through client_options.
+ Raises:
+ """
+ if "pool_size" in kwargs:
+ warnings.warn("pool_size no longer supported")
+ self.client_info = DEFAULT_CLIENT_INFO
+ self.client_info.client_library_version = self._client_version()
+ if type(client_options) is dict:
+ client_options = client_options_lib.from_dict(client_options)
+ client_options = cast(
+ Optional[client_options_lib.ClientOptions], client_options
+ )
+ self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
+ if self._emulator_host is not None:
+ warnings.warn(
+ "Connecting to Bigtable emulator at {}".format(self._emulator_host),
+ RuntimeWarning,
+ stacklevel=2,
+ )
+ if credentials is None:
+ credentials = google.auth.credentials.AnonymousCredentials()
+ if project is None:
+ project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ self._metrics_interceptor = MetricsInterceptorType()
+ ClientWithProject.__init__(
+ self,
+ credentials=credentials,
+ project=project,
+ client_options=client_options,
+ )
+ self._gapic_client = GapicClient(
+ credentials=credentials,
+ client_options=client_options,
+ client_info=self.client_info,
+ transport=lambda *args, **kwargs: TransportType(
+ *args, **kwargs, channel=self._build_grpc_channel
+ ),
+ )
+ if (
+ credentials
+ and credentials.universe_domain != self.universe_domain
+ and (self._emulator_host is None)
+ ):
+ raise ValueError(
+ f"The configured universe domain ({self.universe_domain}) does not match the universe domain found in the credentials ({self._credentials.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default."
+ )
+ self._is_closed = CrossSync._Sync_Impl.Event()
+ self.transport = cast(TransportType, self._gapic_client.transport)
+ self._active_instances: Set[_WarmedInstanceKey] = set()
+ self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {}
+ self._channel_init_time = time.monotonic()
+ self._channel_refresh_task: CrossSync._Sync_Impl.Task[None] | None = None
+ self._executor: concurrent.futures.ThreadPoolExecutor | None = (
+ concurrent.futures.ThreadPoolExecutor()
+ if not CrossSync._Sync_Impl.is_async
+ else None
+ )
+ if self._emulator_host is None:
+ try:
+ self._start_background_channel_refresh()
+ except RuntimeError:
+ warnings.warn(
+ f"{self.__class__.__name__} should be started in an asyncio event loop. Channel refresh will not be started",
+ RuntimeWarning,
+ stacklevel=2,
+ )
+
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
+ """This method is called by the gapic transport to create a grpc channel.
+
+ The init arguments passed down are captured in a partial used by SwappableChannel
+ to create new channel instances in the future, as part of the channel refresh logic
+
+ Emulators always use an inseucre channel
+
+ Args:
+ - *args: positional arguments passed by the gapic layer to create a new channel with
+ - **kwargs: keyword arguments passed by the gapic layer to create a new channel with
+ Returns:
+ a custom wrapped swappable channel"""
+ create_channel_fn: Callable[[], Channel]
+ if self._emulator_host is not None:
+ create_channel_fn = partial(insecure_channel, self._emulator_host)
+ else:
+
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+ new_channel = SwappableChannelType(create_channel_fn)
+ return new_channel
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance."""
+ return self._gapic_client.universe_domain
+
+ @property
+ def api_endpoint(self) -> str:
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance."""
+ return self._gapic_client.api_endpoint
+
+ @staticmethod
+ def _client_version() -> str:
+ """Helper function to return the client version string for this client"""
+ version_str = f"{google.cloud.bigtable.__version__}-data"
+ return version_str
+
+ def _start_background_channel_refresh(self) -> None:
+ """Starts a background task to ping and warm grpc channel
+
+ Raises:
+ None"""
+ if (
+ not self._channel_refresh_task
+ and (not self._emulator_host)
+ and (not self._is_closed.is_set())
+ ):
+ CrossSync._Sync_Impl.verify_async_event_loop()
+ self._channel_refresh_task = CrossSync._Sync_Impl.create_task(
+ self._manage_channel,
+ sync_executor=self._executor,
+ task_name=f"{self.__class__.__name__} channel refresh",
+ )
+
+ def close(self, timeout: float | None = 2.0):
+ """Cancel all background tasks"""
+ self._is_closed.set()
+ if self._channel_refresh_task is not None:
+ self._channel_refresh_task.cancel()
+ CrossSync._Sync_Impl.wait([self._channel_refresh_task], timeout=timeout)
+ self.transport.close()
+ if self._executor:
+ self._executor.shutdown(wait=False)
+ self._channel_refresh_task = None
+
+ def _ping_and_warm_instances(
+ self,
+ instance_key: _WarmedInstanceKey | None = None,
+ channel: Channel | None = None,
+ ) -> list[BaseException | None]:
+ """Prepares the backend for requests on a channel
+
+ Pings each Bigtable instance registered in `_active_instances` on the client
+
+ Args:
+ instance_key: if provided, only warm the instance associated with the key
+ channel: grpc channel to warm. If none, warms `self.transport.grpc_channel`
+ Returns:
+ list[BaseException | None]: sequence of results or exceptions from the ping requests
+ """
+ channel = channel or self.transport.grpc_channel
+ instance_list = (
+ [instance_key] if instance_key is not None else self._active_instances
+ )
+ ping_rpc = channel.unary_unary(
+ "/google.bigtable.v2.Bigtable/PingAndWarm",
+ request_serializer=PingAndWarmRequest.serialize,
+ )
+ partial_list = [
+ partial(
+ ping_rpc,
+ request={"name": instance_name, "app_profile_id": app_profile_id},
+ metadata=[
+ (
+ "x-goog-request-params",
+ f"name={instance_name}&app_profile_id={app_profile_id}",
+ )
+ ],
+ wait_for_ready=True,
+ )
+ for (instance_name, app_profile_id) in instance_list
+ ]
+ result_list = CrossSync._Sync_Impl.gather_partials(
+ partial_list, return_exceptions=True, sync_executor=self._executor
+ )
+ return [r or None for r in result_list]
+
+ def _invalidate_channel_stubs(self):
+ """Helper to reset the cached stubs. Needed when changing out the grpc channel"""
+ self.transport._stubs = {}
+ self.transport._prep_wrapped_messages(self.client_info)
+
+ def _manage_channel(
+ self,
+ refresh_interval_min: float = 60 * 35,
+ refresh_interval_max: float = 60 * 45,
+ grace_period: float = 60 * 10,
+ ) -> None:
+ """Background task that periodically refreshes and warms a grpc channel
+
+ The backend will automatically close channels after 60 minutes, so
+ `refresh_interval` + `grace_period` should be < 60 minutes
+
+ Runs continuously until the client is closed
+
+ Args:
+ refresh_interval_min: minimum interval before initiating refresh
+ process in seconds. Actual interval will be a random value
+ between `refresh_interval_min` and `refresh_interval_max`
+ refresh_interval_max: maximum interval before initiating refresh
+ process in seconds. Actual interval will be a random value
+ between `refresh_interval_min` and `refresh_interval_max`
+ grace_period: time to allow previous channel to serve existing
+ requests before closing, in seconds"""
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
+ warnings.warn("Channel does not support auto-refresh.")
+ return
+ super_channel: SwappableChannelType = self.transport.grpc_channel
+ first_refresh = self._channel_init_time + random.uniform(
+ refresh_interval_min, refresh_interval_max
+ )
+ next_sleep = max(first_refresh - time.monotonic(), 0)
+ if next_sleep > 0:
+ self._ping_and_warm_instances(channel=super_channel)
+ while not self._is_closed.is_set():
+ CrossSync._Sync_Impl.event_wait(
+ self._is_closed, next_sleep, async_break_early=False
+ )
+ if self._is_closed.is_set():
+ break
+ start_timestamp = time.monotonic()
+ new_channel = super_channel.create_channel()
+ self._ping_and_warm_instances(channel=new_channel)
+ old_channel = super_channel.swap_channel(new_channel)
+ self._invalidate_channel_stubs()
+ if grace_period:
+ self._is_closed.wait(grace_period)
+ old_channel.close()
+ next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
+ next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
+
+ def _register_instance(
+ self, instance_id: str, app_profile_id: Optional[str], owner_id: int
+ ) -> None:
+ """Registers an instance with the client, and warms the channel for the instance
+ The client will periodically refresh grpc channel used to make
+ requests, and new channels will be warmed for each registered instance
+ Channels will not be refreshed unless at least one instance is registered
+
+ Args:
+ instance_id: id of the instance to register.
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Owners will be tracked in
+ _instance_owners, and instances will only be unregistered when all
+ owners call _remove_instance_registration. Can be obtained by calling
+ `id` identity funcion, using `id(owner)`"""
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ self._instance_owners.setdefault(instance_key, set()).add(owner_id)
+ if instance_key not in self._active_instances:
+ self._active_instances.add(instance_key)
+ if self._channel_refresh_task:
+ self._ping_and_warm_instances(instance_key)
+ else:
+ self._start_background_channel_refresh()
+
+ def _remove_instance_registration(
+ self, instance_id: str, app_profile_id: Optional[str], owner_id: int
+ ) -> bool:
+ """Removes an instance from the client's registered instances, to prevent
+ warming new channels for the instance
+
+ If instance_id is not registered, or is still in use by other tables, returns False
+
+ Args:
+ instance_id: id of the instance to remove
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Can be
+ obtained by the `id` identity funcion, using `id(owner)`.
+ Returns:
+ bool: True if instance was removed, else False"""
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ owner_list = self._instance_owners.get(instance_key, set())
+ try:
+ owner_list.remove(owner_id)
+ if len(owner_list) == 0:
+ self._active_instances.remove(instance_key)
+ return True
+ except KeyError:
+ return False
+
+ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> Table:
+ """Returns a table instance for making data API requests. All arguments are passed
+ directly to the Table constructor.
+
+
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Returns:
+ Table: a table instance for making data API requests
+ Raises:
+ None"""
+ return Table(self, instance_id, table_id, *args, **kwargs)
+
+ def get_authorized_view(
+ self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs
+ ) -> AuthorizedView:
+ """Returns an authorized view instance for making data API requests. All arguments are passed
+ directly to the AuthorizedView constructor.
+
+
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ authorized_view_id: The id for the authorized view to use for requests
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to Table's value
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults Table's value
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to Table's value
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults Table's value
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to Table's value
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to Table's value
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations. If not set,
+ defaults to Table's value
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations. If not set,
+ defaults to Table's value
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations. If not set, defaults to
+ Table's value
+ Returns:
+ AuthorizedView: a table instance for making data API requests
+ Raises:
+ None"""
+ return CrossSync._Sync_Impl.AuthorizedView(
+ self, instance_id, table_id, authorized_view_id, *args, **kwargs
+ )
+
+ def execute_query(
+ self,
+ query: str,
+ instance_id: str,
+ *,
+ parameters: dict[str, ExecuteQueryValueType] | None = None,
+ parameter_types: dict[str, SqlType.Type] | None = None,
+ app_profile_id: str | None = None,
+ operation_timeout: float = 600,
+ attempt_timeout: float | None = 20,
+ retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ Aborted,
+ ),
+ prepare_operation_timeout: float = 60,
+ prepare_attempt_timeout: float | None = 20,
+ prepare_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> "ExecuteQueryIterator":
+ """Executes an SQL query on an instance.
+ Returns an iterator to asynchronously stream back columns from selected rows.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``.
+ These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the
+ work happens.
+
+ Args:
+ query: Query to be run on Bigtable instance. The query can use ``@param``
+ placeholders to use parameter interpolation on the server. Values for all
+ parameters should be provided in ``parameters``. Types of parameters are
+ inferred but should be provided in ``parameter_types`` if the inference is
+ not possible (i.e. when value can be None, an empty list or an empty dict).
+ instance_id: The Bigtable instance ID to perform the query on.
+ instance_id is combined with the client's project to fully
+ specify the instance.
+ parameters: Dictionary with values for all parameters used in the ``query``.
+ parameter_types: Dictionary with types of parameters used in the ``query``.
+ Required to contain entries only for parameters whose type cannot be
+ detected automatically (i.e. the value can be None, an empty list or
+ an empty dict).
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ operation_timeout: the time budget for the entire executeQuery operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to 600 seconds.
+ attempt_timeout: the time budget for an individual executeQuery network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the 20 seconds.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered during executeQuery.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to 60 seconds.
+ prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the 20 seconds.
+ If None, defaults to prepare_operation_timeout.
+ prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects.
+ This dictionary provides the necessary type information for deserializing PROTO and
+ ENUM column values from the query results. When an entry is provided
+ for a PROTO or ENUM column, the client library will attempt to deserialize the raw data.
+
+ - For PROTO columns: The value in the dictionary should be the
+ Protobuf Message class (e.g., ``my_pb2.MyMessage``).
+ - For ENUM columns: The value should be the Protobuf EnumTypeWrapper
+ object (e.g., ``my_pb2.MyEnum``).
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum
+ }
+
+ If ``column_info`` is not provided, or if a specific column name is not found
+ in the dictionary:
+
+ - PROTO columns will be returned as raw bytes.
+ - ENUM columns will be returned as integers.
+
+ Note for Nested PROTO or ENUM Fields:
+
+ To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated
+ path from the top-level column name.
+
+ - For STRUCTs: ``struct_column_name.field_name``
+ - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types
+ for the map keys or values, respectively.
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ # Top-level column
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum,
+
+ # Nested field in a STRUCT column named 'my_struct'
+ "my_struct.nested_proto_field": my_pb2.OtherMessage,
+ "my_struct.nested_enum_field": my_pb2.AnotherEnum,
+
+ # Nested field in a MAP column named 'my_map'
+ "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums
+ "my_map.value": my_pb2.MapValueMessage,
+
+ # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column
+ "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto,
+ "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum
+ }
+
+ Returns:
+ ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if
+ a parameter is passed without an explicit type, and the type cannot be infered
+ google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails.
+ """
+ instance_name = self._gapic_client.instance_path(self.project, instance_id)
+ converted_param_types = _to_param_types(parameters, parameter_types)
+ prepare_request = {
+ "instance_name": instance_name,
+ "query": query,
+ "app_profile_id": app_profile_id,
+ "param_types": converted_param_types,
+ "proto_format": {},
+ }
+ prepare_predicate = retries.if_exception_type(
+ *[_get_error_type(e) for e in prepare_retryable_errors]
+ )
+ (prepare_operation_timeout, prepare_attempt_timeout) = _align_timeouts(
+ prepare_operation_timeout, prepare_attempt_timeout
+ )
+ prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+ target = partial(
+ self._gapic_client.prepare_query,
+ request=prepare_request,
+ timeout=prepare_attempt_timeout,
+ retry=None,
+ )
+ prepare_result = CrossSync._Sync_Impl.retry_target(
+ target,
+ prepare_predicate,
+ prepare_sleep_generator,
+ prepare_operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+ prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata)
+ retryable_excs = [_get_error_type(e) for e in retryable_errors]
+ pb_params = _format_execute_query_params(parameters, parameter_types)
+ request_body = {
+ "instance_name": instance_name,
+ "app_profile_id": app_profile_id,
+ "prepared_query": prepare_result.prepared_query,
+ "params": pb_params,
+ }
+ (operation_timeout, attempt_timeout) = _align_timeouts(
+ operation_timeout, attempt_timeout
+ )
+ return CrossSync._Sync_Impl.ExecuteQueryIterator(
+ self,
+ instance_id,
+ app_profile_id,
+ request_body,
+ prepare_metadata,
+ attempt_timeout,
+ operation_timeout,
+ retryable_excs=retryable_excs,
+ column_info=column_info,
+ )
+
+ def __enter__(self):
+ self._start_background_channel_refresh()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ self._gapic_client.__exit__(exc_type, exc_val, exc_tb)
+
+
+class _DataApiTarget(abc.ABC):
+ """
+ Abstract class containing API surface for BigtableDataClient. Should not be created directly
+
+ Can be instantiated as a Table or an AuthorizedView
+ """
+
+ def __init__(
+ self,
+ client: BigtableDataClient,
+ instance_id: str,
+ table_id: str,
+ app_profile_id: str | None = None,
+ *,
+ default_read_rows_operation_timeout: float = 600,
+ default_read_rows_attempt_timeout: float | None = 20,
+ default_mutate_rows_operation_timeout: float = 600,
+ default_mutate_rows_attempt_timeout: float | None = 60,
+ default_operation_timeout: float = 60,
+ default_attempt_timeout: float | None = 20,
+ default_read_rows_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ Aborted,
+ ),
+ default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ default_retryable_errors: Sequence[type[Exception]] = (
+ DeadlineExceeded,
+ ServiceUnavailable,
+ ),
+ ):
+ """Initialize a Table instance
+
+
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Raises:
+ None"""
+ _validate_timeouts(
+ default_operation_timeout, default_attempt_timeout, allow_none=True
+ )
+ _validate_timeouts(
+ default_read_rows_operation_timeout,
+ default_read_rows_attempt_timeout,
+ allow_none=True,
+ )
+ _validate_timeouts(
+ default_mutate_rows_operation_timeout,
+ default_mutate_rows_attempt_timeout,
+ allow_none=True,
+ )
+ self.client = client
+ self.instance_id = instance_id
+ self.instance_name = self.client._gapic_client.instance_path(
+ self.client.project, instance_id
+ )
+ self.table_id = table_id
+ self.table_name = self.client._gapic_client.table_path(
+ self.client.project, instance_id, table_id
+ )
+ self.app_profile_id: str | None = app_profile_id
+ self.default_operation_timeout: float = default_operation_timeout
+ self.default_attempt_timeout: float | None = default_attempt_timeout
+ self.default_read_rows_operation_timeout: float = (
+ default_read_rows_operation_timeout
+ )
+ self.default_read_rows_attempt_timeout: float | None = (
+ default_read_rows_attempt_timeout
+ )
+ self.default_mutate_rows_operation_timeout: float = (
+ default_mutate_rows_operation_timeout
+ )
+ self.default_mutate_rows_attempt_timeout: float | None = (
+ default_mutate_rows_attempt_timeout
+ )
+ self.default_read_rows_retryable_errors: Sequence[type[Exception]] = (
+ default_read_rows_retryable_errors or ()
+ )
+ self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
+ default_mutate_rows_retryable_errors or ()
+ )
+ self.default_retryable_errors: Sequence[type[Exception]] = (
+ default_retryable_errors or ()
+ )
+ try:
+ self._register_instance_future = CrossSync._Sync_Impl.create_task(
+ self.client._register_instance,
+ self.instance_id,
+ self.app_profile_id,
+ id(self),
+ sync_executor=self.client._executor,
+ )
+ except RuntimeError as e:
+ raise RuntimeError(
+ f"{self.__class__.__name__} must be created within an async event loop context."
+ ) from e
+
+ @property
+ @abc.abstractmethod
+ def _request_path(self) -> dict[str, str]:
+ """Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass
+
+ Unimplemented in base class"""
+ raise NotImplementedError
+
+ def __str__(self):
+ path_str = list(self._request_path.values())[0] if self._request_path else ""
+ return f"{self.__class__.__name__}<{path_str!r}>"
+
+ def read_rows_stream(
+ self,
+ query: ReadRowsQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> Iterable[Row]:
+ """Read a set of rows from the table, based on the specified query.
+ Returns an iterator to asynchronously stream back row data.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors
+ Returns:
+ Iterable[Row]: an asynchronous iterator that yields rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ (operation_timeout, attempt_timeout) = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+ row_merger = CrossSync._Sync_Impl._ReadRowsOperation(
+ query,
+ self,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_exceptions=retryable_excs,
+ )
+ return row_merger.start_operation()
+
+ def read_rows(
+ self,
+ query: ReadRowsQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> list[Row]:
+ """Read a set of rows from the table, based on the specified query.
+ Retruns results as a list of Row objects when the request is complete.
+ For streamed results, use read_rows_stream.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ If None, defaults to the Table's default_read_rows_attempt_timeout,
+ or the operation_timeout if that is also None.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ list[Row]: a list of Rows returned by the query
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ row_generator = self.read_rows_stream(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ return [row for row in row_generator]
+
+ def read_row(
+ self,
+ row_key: str | bytes,
+ *,
+ row_filter: RowFilter | None = None,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> Row | None:
+ """Read a single row from the table, based on the specified key.
+
+ Failed requests within operation_timeout will be retried based on the
+ retryable_errors list until operation_timeout is reached.
+
+ Args:
+ query: contains details about which rows to return
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ Row | None: a Row object if the row exists, otherwise None
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ if row_key is None:
+ raise ValueError("row_key must be string or bytes")
+ query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1)
+ results = self.read_rows(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ if len(results) == 0:
+ return None
+ return results[0]
+
+ def read_rows_sharded(
+ self,
+ sharded_query: ShardedQuery,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> list[Row]:
+ """Runs a sharded query in parallel, then return the results in a single list.
+ Results will be returned in the order of the input queries.
+
+ This function is intended to be run on the results on a query.shard() call.
+ For example::
+
+ table_shard_keys = await table.sample_row_keys()
+ query = ReadRowsQuery(...)
+ shard_queries = query.shard(table_shard_keys)
+ results = await table.read_rows_sharded(shard_queries)
+
+ Args:
+ sharded_query: a sharded query to execute
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ list[Row]: a list of Rows returned by the query
+ Raises:
+ ShardedReadRowsExceptionGroup: if any of the queries failed
+ ValueError: if the query_list is empty"""
+ if not sharded_query:
+ raise ValueError("empty sharded_query")
+ (operation_timeout, attempt_timeout) = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ rpc_timeout_generator = _attempt_timeout_generator(
+ operation_timeout, operation_timeout
+ )
+ concurrency_sem = CrossSync._Sync_Impl.Semaphore(_CONCURRENCY_LIMIT)
+
+ def read_rows_with_semaphore(query):
+ with concurrency_sem:
+ shard_timeout = next(rpc_timeout_generator)
+ if shard_timeout <= 0:
+ raise DeadlineExceeded(
+ "Operation timeout exceeded before starting query"
+ )
+ return self.read_rows(
+ query,
+ operation_timeout=shard_timeout,
+ attempt_timeout=min(attempt_timeout, shard_timeout),
+ retryable_errors=retryable_errors,
+ )
+
+ routine_list = [
+ partial(read_rows_with_semaphore, query) for query in sharded_query
+ ]
+ batch_result = CrossSync._Sync_Impl.gather_partials(
+ routine_list, return_exceptions=True, sync_executor=self.client._executor
+ )
+ error_dict = {}
+ shard_idx = 0
+ results_list = []
+ for result in batch_result:
+ if isinstance(result, Exception):
+ error_dict[shard_idx] = result
+ elif isinstance(result, BaseException):
+ raise result
+ else:
+ results_list.extend(result)
+ shard_idx += 1
+ if error_dict:
+ raise ShardedReadRowsExceptionGroup(
+ [
+ FailedQueryShardError(idx, sharded_query[idx], e)
+ for (idx, e) in error_dict.items()
+ ],
+ results_list,
+ len(sharded_query),
+ )
+ return results_list
+
+ def row_exists(
+ self,
+ row_key: str | bytes,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS,
+ ) -> bool:
+ """Return a boolean indicating whether the specified row exists in the table.
+ uses the filters: chain(limit cells per row = 1, strip value)
+
+ Args:
+ row_key: the key of the row to check
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_read_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_read_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_read_rows_retryable_errors.
+ Returns:
+ bool: a bool indicating whether the row exists
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ if row_key is None:
+ raise ValueError("row_key must be string or bytes")
+ strip_filter = StripValueTransformerFilter(flag=True)
+ limit_filter = CellsRowLimitFilter(1)
+ chain_filter = RowFilterChain(filters=[limit_filter, strip_filter])
+ query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter)
+ results = self.read_rows(
+ query,
+ operation_timeout=operation_timeout,
+ attempt_timeout=attempt_timeout,
+ retryable_errors=retryable_errors,
+ )
+ return len(results) > 0
+
+ def sample_row_keys(
+ self,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> RowKeySamples:
+ """Return a set of RowKeySamples that delimit contiguous sections of the table of
+ approximately equal size
+
+ RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that
+ can be parallelized across multiple backend nodes read_rows and read_rows_stream
+ requests will call sample_row_keys internally for this purpose when sharding is enabled
+
+ RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of
+ row_keys, along with offset positions in the table
+
+ Args:
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.i
+ Defaults to the Table's default_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_retryable_errors.
+ Returns:
+ RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions
+ from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
+ """
+ (operation_timeout, attempt_timeout) = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+ predicate = retries.if_exception_type(*retryable_excs)
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+
+ def execute_rpc():
+ results = self.client._gapic_client.sample_row_keys(
+ request=SampleRowKeysRequest(
+ app_profile_id=self.app_profile_id, **self._request_path
+ ),
+ timeout=next(attempt_timeout_gen),
+ retry=None,
+ )
+ return [(s.row_key, s.offset_bytes) for s in results]
+
+ return CrossSync._Sync_Impl.retry_target(
+ execute_rpc,
+ predicate,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ def mutations_batcher(
+ self,
+ *,
+ flush_interval: float | None = 5,
+ flush_limit_mutation_count: int | None = 1000,
+ flush_limit_bytes: int = 20 * _MB_SIZE,
+ flow_control_max_mutation_count: int = 100000,
+ flow_control_max_bytes: int = 100 * _MB_SIZE,
+ batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ) -> "MutationsBatcher":
+ """Returns a new mutations batcher instance.
+
+ Can be used to iteratively add mutations that are flushed as a group,
+ to avoid excess network calls
+
+ Args:
+ flush_interval: Automatically flush every flush_interval seconds. If None,
+ a table default will be used
+ flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count
+ mutations are added across all entries. If None, this limit is ignored.
+ flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added.
+ flow_control_max_mutation_count: Maximum number of inflight mutations.
+ flow_control_max_bytes: Maximum number of inflight bytes.
+ batch_operation_timeout: timeout for each mutate_rows operation, in seconds.
+ Defaults to the Table's default_mutate_rows_operation_timeout
+ batch_attempt_timeout: timeout for each individual request, in seconds.
+ Defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to batch_operation_timeout.
+ batch_retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors.
+ Returns:
+ MutationsBatcher: a MutationsBatcher context manager that can batch requests
+ """
+ return CrossSync._Sync_Impl.MutationsBatcher(
+ self,
+ flush_interval=flush_interval,
+ flush_limit_mutation_count=flush_limit_mutation_count,
+ flush_limit_bytes=flush_limit_bytes,
+ flow_control_max_mutation_count=flow_control_max_mutation_count,
+ flow_control_max_bytes=flow_control_max_bytes,
+ batch_operation_timeout=batch_operation_timeout,
+ batch_attempt_timeout=batch_attempt_timeout,
+ batch_retryable_errors=batch_retryable_errors,
+ )
+
+ def mutate_row(
+ self,
+ row_key: str | bytes,
+ mutations: list[Mutation] | Mutation,
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ):
+ """Mutates a row atomically.
+
+ Cells already present in the row are left unchanged unless explicitly changed
+ by ``mutation``.
+
+ Idempotent operations (i.e, all mutations have an explicit timestamp) will be
+ retried on server failure. Non-idempotent operations will not.
+
+ Args:
+ row_key: the row to apply mutations to
+ mutations: the set of mutations to apply to the row
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Only idempotent mutations will be retried. Defaults to the Table's
+ default_retryable_errors.
+ Raises:
+ google.api_core.exceptions.DeadlineExceeded: raised after operation timeout
+ will be chained with a RetryExceptionGroup containing all
+ GoogleAPIError exceptions from any retries that failed
+ google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be
+ safely retried.
+ ValueError: if invalid arguments are provided"""
+ (operation_timeout, attempt_timeout) = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ if not mutations:
+ raise ValueError("No mutations provided")
+ mutations_list = mutations if isinstance(mutations, list) else [mutations]
+ if all((mutation.is_idempotent() for mutation in mutations_list)):
+ predicate = retries.if_exception_type(
+ *_get_retryable_errors(retryable_errors, self)
+ )
+ else:
+ predicate = retries.if_exception_type()
+ sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60)
+ target = partial(
+ self.client._gapic_client.mutate_row,
+ request=MutateRowRequest(
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ mutations=[mutation._to_pb() for mutation in mutations_list],
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=attempt_timeout,
+ retry=None,
+ )
+ return CrossSync._Sync_Impl.retry_target(
+ target,
+ predicate,
+ sleep_generator,
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+
+ def bulk_mutate_rows(
+ self,
+ mutation_entries: list[RowMutationEntry],
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ):
+ """Applies mutations for multiple rows in a single batched request.
+
+ Each individual RowMutationEntry is applied atomically, but separate entries
+ may be applied in arbitrary order (even for entries targetting the same row)
+ In total, the row_mutations can contain at most 100000 individual mutations
+ across all entries
+
+ Idempotent entries (i.e., entries with mutations with explicit timestamps)
+ will be retried on failure. Non-idempotent will not, and will reported in a
+ raised exception group
+
+ Args:
+ mutation_entries: the batches of mutations to apply
+ Each entry will be applied atomically, but entries will be applied
+ in arbitrary order
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget.
+ Defaults to the Table's default_mutate_rows_operation_timeout
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ Defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to operation_timeout.
+ retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors
+ Raises:
+ MutationsExceptionGroup: if one or more mutations fails
+ Contains details about any failed entries in .exceptions
+ ValueError: if invalid arguments are provided"""
+ (operation_timeout, attempt_timeout) = _get_timeouts(
+ operation_timeout, attempt_timeout, self
+ )
+ retryable_excs = _get_retryable_errors(retryable_errors, self)
+ operation = CrossSync._Sync_Impl._MutateRowsOperation(
+ self.client._gapic_client,
+ self,
+ mutation_entries,
+ operation_timeout,
+ attempt_timeout,
+ retryable_exceptions=retryable_excs,
+ )
+ operation.start()
+
+ def check_and_mutate_row(
+ self,
+ row_key: str | bytes,
+ predicate: RowFilter | None,
+ *,
+ true_case_mutations: Mutation | list[Mutation] | None = None,
+ false_case_mutations: Mutation | list[Mutation] | None = None,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> bool:
+ """Mutates a row atomically based on the output of a predicate filter
+
+ Non-idempotent operation: will not be retried
+
+ Args:
+ row_key: the key of the row to mutate
+ predicate: the filter to be applied to the contents of the specified row.
+ Depending on whether or not any results are yielded,
+ either true_case_mutations or false_case_mutations will be executed.
+ If None, checks that the row contains any values at all.
+ true_case_mutations:
+ Changes to be atomically applied to the specified row if
+ predicate yields at least one cell when
+ applied to row_key. Entries are applied in order,
+ meaning that earlier mutations can be masked by later
+ ones. Must contain at least one entry if
+ false_case_mutations is empty, and at most 100000.
+ false_case_mutations:
+ Changes to be atomically applied to the specified row if
+ predicate_filter does not yield any cells when
+ applied to row_key. Entries are applied in order,
+ meaning that earlier mutations can be masked by later
+ ones. Must contain at least one entry if
+ `true_case_mutations` is empty, and at most 100000.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will not be retried. Defaults to the Table's default_operation_timeout
+ Returns:
+ bool indicating whether the predicate was true or false
+ Raises:
+ google.api_core.exceptions.GoogleAPIError: exceptions from grpc call"""
+ (operation_timeout, _) = _get_timeouts(operation_timeout, None, self)
+ if true_case_mutations is not None and (
+ not isinstance(true_case_mutations, list)
+ ):
+ true_case_mutations = [true_case_mutations]
+ true_case_list = [m._to_pb() for m in true_case_mutations or []]
+ if false_case_mutations is not None and (
+ not isinstance(false_case_mutations, list)
+ ):
+ false_case_mutations = [false_case_mutations]
+ false_case_list = [m._to_pb() for m in false_case_mutations or []]
+ result = self.client._gapic_client.check_and_mutate_row(
+ request=CheckAndMutateRowRequest(
+ true_mutations=true_case_list,
+ false_mutations=false_case_list,
+ predicate_filter=predicate._to_pb() if predicate is not None else None,
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=operation_timeout,
+ retry=None,
+ )
+ return result.predicate_matched
+
+ def read_modify_write_row(
+ self,
+ row_key: str | bytes,
+ rules: ReadModifyWriteRule | list[ReadModifyWriteRule],
+ *,
+ operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT,
+ ) -> Row:
+ """Reads and modifies a row atomically according to input ReadModifyWriteRules,
+ and returns the contents of all modified cells
+
+ The new value for the timestamp is the greater of the existing timestamp or
+ the current server time.
+
+ Non-idempotent operation: will not be retried
+
+ Args:
+ row_key: the key of the row to apply read/modify/write rules to
+ rules: A rule or set of rules to apply to the row.
+ Rules are applied in order, meaning that earlier rules will affect the
+ results of later ones.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will not be retried.
+ Defaults to the Table's default_operation_timeout.
+ Returns:
+ Row: a Row containing cell data that was modified as part of the operation
+ Raises:
+ google.api_core.exceptions.GoogleAPIError: exceptions from grpc call
+ ValueError: if invalid arguments are provided"""
+ (operation_timeout, _) = _get_timeouts(operation_timeout, None, self)
+ if operation_timeout <= 0:
+ raise ValueError("operation_timeout must be greater than 0")
+ if rules is not None and (not isinstance(rules, list)):
+ rules = [rules]
+ if not rules:
+ raise ValueError("rules must contain at least one item")
+ result = self.client._gapic_client.read_modify_write_row(
+ request=ReadModifyWriteRowRequest(
+ rules=[rule._to_pb() for rule in rules],
+ row_key=row_key.encode("utf-8")
+ if isinstance(row_key, str)
+ else row_key,
+ app_profile_id=self.app_profile_id,
+ **self._request_path,
+ ),
+ timeout=operation_timeout,
+ retry=None,
+ )
+ return Row._from_pb(result.row)
+
+ def close(self):
+ """Called to close the Table instance and release any resources held by it."""
+ if self._register_instance_future:
+ self._register_instance_future.cancel()
+ self.client._remove_instance_registration(
+ self.instance_id, self.app_profile_id, id(self)
+ )
+
+ def __enter__(self):
+ """Implement async context manager protocol
+
+ Ensure registration task has time to run, so that
+ grpc channels will be warmed for the specified instance"""
+ if self._register_instance_future:
+ self._register_instance_future
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Implement async context manager protocol
+
+ Unregister this instance with the client, so that
+ grpc channels will no longer be warmed"""
+ self.close()
+
+
+@CrossSync._Sync_Impl.add_mapping_decorator("Table")
+class Table(_DataApiTarget):
+ """
+ Main Data API surface for interacting with a Bigtable table.
+
+ Table object maintains table_id, and app_profile_id context, and passes them with
+ each call
+ """
+
+ @property
+ def _request_path(self) -> dict[str, str]:
+ return {"table_name": self.table_name}
+
+
+@CrossSync._Sync_Impl.add_mapping_decorator("AuthorizedView")
+class AuthorizedView(_DataApiTarget):
+ """
+ Provides access to an authorized view of a table.
+
+ An authorized view is a subset of a table that you configure to include specific table data.
+ Then you grant access to the authorized view separately from access to the table.
+
+ AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context,
+ and passed them with each call
+ """
+
+ def __init__(
+ self,
+ client,
+ instance_id,
+ table_id,
+ authorized_view_id,
+ app_profile_id: str | None = None,
+ **kwargs,
+ ):
+ """Initialize an AuthorizedView instance
+
+
+
+ Args:
+ instance_id: The Bigtable instance ID to associate with this client.
+ instance_id is combined with the client's project to fully
+ specify the instance
+ table_id: The ID of the table. table_id is combined with the
+ instance_id and the client's project to fully specify the table
+ authorized_view_id: The id for the authorized view to use for requests
+ app_profile_id: The app profile to associate with requests.
+ https://cloud.google.com/bigtable/docs/app-profiles
+ default_read_rows_operation_timeout: The default timeout for read rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_read_rows_attempt_timeout: The default timeout for individual
+ read rows rpc requests, in seconds. If not set, defaults to 20 seconds
+ default_mutate_rows_operation_timeout: The default timeout for mutate rows
+ operations, in seconds. If not set, defaults to 600 seconds (10 minutes)
+ default_mutate_rows_attempt_timeout: The default timeout for individual
+ mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds
+ default_operation_timeout: The default timeout for all other operations, in
+ seconds. If not set, defaults to 60 seconds
+ default_attempt_timeout: The default timeout for all other individual rpc
+ requests, in seconds. If not set, defaults to 20 seconds
+ default_read_rows_retryable_errors: a list of errors that will be retried
+ if encountered during read_rows and related operations.
+ Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted)
+ default_mutate_rows_retryable_errors: a list of errors that will be retried
+ if encountered during mutate_rows and related operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ default_retryable_errors: a list of errors that will be retried if
+ encountered during all other operations.
+ Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ Raises:
+ None"""
+ super().__init__(client, instance_id, table_id, app_profile_id, **kwargs)
+ self.authorized_view_id = authorized_view_id
+ self.authorized_view_name: str = self.client._gapic_client.authorized_view_path(
+ self.client.project, instance_id, table_id, authorized_view_id
+ )
+
+ @property
+ def _request_path(self) -> dict[str, str]:
+ return {"authorized_view_name": self.authorized_view_name}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
new file mode 100644
index 000000000000..9e47313b07b9
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
@@ -0,0 +1,59 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from grpc import UnaryUnaryClientInterceptor
+from grpc import UnaryStreamClientInterceptor
+
+
+class BigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ """Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow"""
+ try:
+ call = continuation(client_call_details, request)
+ return call
+ except Exception as rpc_error:
+ raise rpc_error
+
+ def intercept_unary_stream(self, continuation, client_call_details, request):
+ """Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys"""
+ try:
+ return self._streaming_generator_wrapper(
+ continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ raise rpc_error
+
+ @staticmethod
+ def _streaming_generator_wrapper(call):
+ """Wrapped generator to be returned by intercept_unary_stream."""
+ try:
+ for response in call:
+ yield response
+ except Exception as e:
+ raise e
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py
new file mode 100644
index 000000000000..84f0ba8c0618
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py
@@ -0,0 +1,451 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Sequence, TYPE_CHECKING, cast
+import atexit
+import warnings
+from collections import deque
+import concurrent.futures
+from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup
+from google.cloud.bigtable.data.exceptions import FailedMutationEntryError
+from google.cloud.bigtable.data._helpers import _get_retryable_errors
+from google.cloud.bigtable.data._helpers import _get_timeouts
+from google.cloud.bigtable.data._helpers import TABLE_DEFAULT
+from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT
+from google.cloud.bigtable.data.mutations import Mutation
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data.mutations import RowMutationEntry
+ from google.cloud.bigtable.data._sync_autogen.client import (
+ _DataApiTarget as TargetType,
+ )
+_MB_SIZE = 1024 * 1024
+
+
+@CrossSync._Sync_Impl.add_mapping_decorator("_FlowControl")
+class _FlowControl:
+ """
+ Manages flow control for batched mutations. Mutations are registered against
+ the FlowControl object before being sent, which will block if size or count
+ limits have reached capacity. As mutations completed, they are removed from
+ the FlowControl object, which will notify any blocked requests that there
+ is additional capacity.
+
+ Flow limits are not hard limits. If a single mutation exceeds the configured
+ limits, it will be allowed as a single batch when the capacity is available.
+
+ Args:
+ max_mutation_count: maximum number of mutations to send in a single rpc.
+ This corresponds to individual mutations in a single RowMutationEntry.
+ max_mutation_bytes: maximum number of bytes to send in a single rpc.
+ Raises:
+ ValueError: if max_mutation_count or max_mutation_bytes is less than 0
+ """
+
+ def __init__(self, max_mutation_count: int, max_mutation_bytes: int):
+ self._max_mutation_count = max_mutation_count
+ self._max_mutation_bytes = max_mutation_bytes
+ if self._max_mutation_count < 1:
+ raise ValueError("max_mutation_count must be greater than 0")
+ if self._max_mutation_bytes < 1:
+ raise ValueError("max_mutation_bytes must be greater than 0")
+ self._capacity_condition = CrossSync._Sync_Impl.Condition()
+ self._in_flight_mutation_count = 0
+ self._in_flight_mutation_bytes = 0
+
+ def _has_capacity(self, additional_count: int, additional_size: int) -> bool:
+ """Checks if there is capacity to send a new entry with the given size and count
+
+ FlowControl limits are not hard limits. If a single mutation exceeds
+ the configured flow limits, it will be sent in a single batch when
+ previous batches have completed.
+
+ Args:
+ additional_count: number of mutations in the pending entry
+ additional_size: size of the pending entry
+ Returns:
+ bool: True if there is capacity to send the pending entry, False otherwise
+ """
+ acceptable_size = max(self._max_mutation_bytes, additional_size)
+ acceptable_count = max(self._max_mutation_count, additional_count)
+ new_size = self._in_flight_mutation_bytes + additional_size
+ new_count = self._in_flight_mutation_count + additional_count
+ return new_size <= acceptable_size and new_count <= acceptable_count
+
+ def remove_from_flow(
+ self, mutations: RowMutationEntry | list[RowMutationEntry]
+ ) -> None:
+ """Removes mutations from flow control. This method should be called once
+ for each mutation that was sent to add_to_flow, after the corresponding
+ operation is complete.
+
+ Args:
+ mutations: mutation or list of mutations to remove from flow control"""
+ if not isinstance(mutations, list):
+ mutations = [mutations]
+ total_count = sum((len(entry.mutations) for entry in mutations))
+ total_size = sum((entry.size() for entry in mutations))
+ self._in_flight_mutation_count -= total_count
+ self._in_flight_mutation_bytes -= total_size
+ with self._capacity_condition:
+ self._capacity_condition.notify_all()
+
+ def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]):
+ """Generator function that registers mutations with flow control. As mutations
+ are accepted into the flow control, they are yielded back to the caller,
+ to be sent in a batch. If the flow control is at capacity, the generator
+ will block until there is capacity available.
+
+ Args:
+ mutations: list mutations to break up into batches
+ Yields:
+ list[RowMutationEntry]:
+ list of mutations that have reserved space in the flow control.
+ Each batch contains at least one mutation."""
+ if not isinstance(mutations, list):
+ mutations = [mutations]
+ start_idx = 0
+ end_idx = 0
+ while end_idx < len(mutations):
+ start_idx = end_idx
+ batch_mutation_count = 0
+ with self._capacity_condition:
+ while end_idx < len(mutations):
+ next_entry = mutations[end_idx]
+ next_size = next_entry.size()
+ next_count = len(next_entry.mutations)
+ if (
+ self._has_capacity(next_count, next_size)
+ and batch_mutation_count + next_count
+ <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT
+ ):
+ end_idx += 1
+ batch_mutation_count += next_count
+ self._in_flight_mutation_bytes += next_size
+ self._in_flight_mutation_count += next_count
+ elif start_idx != end_idx:
+ break
+ else:
+ self._capacity_condition.wait_for(
+ lambda: self._has_capacity(next_count, next_size)
+ )
+ yield mutations[start_idx:end_idx]
+
+
+class MutationsBatcher:
+ """
+ Allows users to send batches using context manager API.
+
+ Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining
+ to use as few network requests as required
+
+ Will automatically flush the batcher:
+ - every flush_interval seconds
+ - after queue size reaches flush_limit_mutation_count
+ - after queue reaches flush_limit_bytes
+ - when batcher is closed or destroyed
+
+ Args:
+ table: table or autrhorized_view used to preform rpc calls
+ flush_interval: Automatically flush every flush_interval seconds.
+ If None, no time-based flushing is performed.
+ flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count
+ mutations are added across all entries. If None, this limit is ignored.
+ flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added.
+ flow_control_max_mutation_count: Maximum number of inflight mutations.
+ flow_control_max_bytes: Maximum number of inflight bytes.
+ batch_operation_timeout: timeout for each mutate_rows operation, in seconds.
+ If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout.
+ batch_attempt_timeout: timeout for each individual request, in seconds.
+ If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout.
+ If None, defaults to batch_operation_timeout.
+ batch_retryable_errors: a list of errors that will be retried if encountered.
+ Defaults to the Table's default_mutate_rows_retryable_errors.
+ """
+
+ def __init__(
+ self,
+ table: TargetType,
+ *,
+ flush_interval: float | None = 5,
+ flush_limit_mutation_count: int | None = 1000,
+ flush_limit_bytes: int = 20 * _MB_SIZE,
+ flow_control_max_mutation_count: int = 100000,
+ flow_control_max_bytes: int = 100 * _MB_SIZE,
+ batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ batch_retryable_errors: Sequence[type[Exception]]
+ | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS,
+ ):
+ (self._operation_timeout, self._attempt_timeout) = _get_timeouts(
+ batch_operation_timeout, batch_attempt_timeout, table
+ )
+ self._retryable_errors: list[type[Exception]] = _get_retryable_errors(
+ batch_retryable_errors, table
+ )
+ self._closed = CrossSync._Sync_Impl.Event()
+ self._target = table
+ self._staged_entries: list[RowMutationEntry] = []
+ (self._staged_count, self._staged_bytes) = (0, 0)
+ self._flow_control = CrossSync._Sync_Impl._FlowControl(
+ flow_control_max_mutation_count, flow_control_max_bytes
+ )
+ self._flush_limit_bytes = flush_limit_bytes
+ self._flush_limit_count = (
+ flush_limit_mutation_count
+ if flush_limit_mutation_count is not None
+ else float("inf")
+ )
+ self._sync_rpc_executor = (
+ concurrent.futures.ThreadPoolExecutor(max_workers=8)
+ if not CrossSync._Sync_Impl.is_async
+ else None
+ )
+ self._sync_flush_executor = (
+ concurrent.futures.ThreadPoolExecutor(max_workers=4)
+ if not CrossSync._Sync_Impl.is_async
+ else None
+ )
+ self._flush_timer = CrossSync._Sync_Impl.create_task(
+ self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor
+ )
+ self._flush_jobs: set[CrossSync._Sync_Impl.Future[None]] = set()
+ self._entries_processed_since_last_raise: int = 0
+ self._exceptions_since_last_raise: int = 0
+ self._exception_list_limit: int = 10
+ self._oldest_exceptions: list[Exception] = []
+ self._newest_exceptions: deque[Exception] = deque(
+ maxlen=self._exception_list_limit
+ )
+ atexit.register(self._on_exit)
+
+ def _timer_routine(self, interval: float | None) -> None:
+ """Set up a background task to flush the batcher every interval seconds
+
+ If interval is None, an empty future is returned
+
+ Args:
+ flush_interval: Automatically flush every flush_interval seconds.
+ If None, no time-based flushing is performed."""
+ if not interval or interval <= 0:
+ return None
+ while not self._closed.is_set():
+ CrossSync._Sync_Impl.event_wait(
+ self._closed, timeout=interval, async_break_early=False
+ )
+ if not self._closed.is_set() and self._staged_entries:
+ self._schedule_flush()
+
+ def append(self, mutation_entry: RowMutationEntry):
+ """Add a new set of mutations to the internal queue
+
+ Args:
+ mutation_entry: new entry to add to flush queue
+ Raises:
+ RuntimeError: if batcher is closed
+ ValueError: if an invalid mutation type is added"""
+ if self._closed.is_set():
+ raise RuntimeError("Cannot append to closed MutationsBatcher")
+ if isinstance(cast(Mutation, mutation_entry), Mutation):
+ raise ValueError(
+ f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher"
+ )
+ self._staged_entries.append(mutation_entry)
+ self._staged_count += len(mutation_entry.mutations)
+ self._staged_bytes += mutation_entry.size()
+ if (
+ self._staged_count >= self._flush_limit_count
+ or self._staged_bytes >= self._flush_limit_bytes
+ ):
+ self._schedule_flush()
+ CrossSync._Sync_Impl.yield_to_event_loop()
+
+ def _schedule_flush(self) -> CrossSync._Sync_Impl.Future[None] | None:
+ """Update the flush task to include the latest staged entries
+
+ Returns:
+ Future[None] | None:
+ future representing the background task, if started"""
+ if self._staged_entries:
+ (entries, self._staged_entries) = (self._staged_entries, [])
+ (self._staged_count, self._staged_bytes) = (0, 0)
+ new_task = CrossSync._Sync_Impl.create_task(
+ self._flush_internal, entries, sync_executor=self._sync_flush_executor
+ )
+ if not new_task.done():
+ self._flush_jobs.add(new_task)
+ new_task.add_done_callback(self._flush_jobs.remove)
+ return new_task
+ return None
+
+ def _flush_internal(self, new_entries: list[RowMutationEntry]):
+ """Flushes a set of mutations to the server, and updates internal state
+
+ Args:
+ new_entries list of RowMutationEntry objects to flush"""
+ in_process_requests: list[
+ CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]]
+ ] = []
+ for batch in self._flow_control.add_to_flow(new_entries):
+ batch_task = CrossSync._Sync_Impl.create_task(
+ self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor
+ )
+ in_process_requests.append(batch_task)
+ found_exceptions = self._wait_for_batch_results(*in_process_requests)
+ self._entries_processed_since_last_raise += len(new_entries)
+ self._add_exceptions(found_exceptions)
+
+ def _execute_mutate_rows(
+ self, batch: list[RowMutationEntry]
+ ) -> list[FailedMutationEntryError]:
+ """Helper to execute mutation operation on a batch
+
+ Args:
+ batch: list of RowMutationEntry objects to send to server
+ timeout: timeout in seconds. Used as operation_timeout and attempt_timeout.
+ If not given, will use table defaults
+ Returns:
+ list[FailedMutationEntryError]:
+ list of FailedMutationEntryError objects for mutations that failed.
+ FailedMutationEntryError objects will not contain index information"""
+ try:
+ operation = CrossSync._Sync_Impl._MutateRowsOperation(
+ self._target.client._gapic_client,
+ self._target,
+ batch,
+ operation_timeout=self._operation_timeout,
+ attempt_timeout=self._attempt_timeout,
+ retryable_exceptions=self._retryable_errors,
+ )
+ operation.start()
+ except MutationsExceptionGroup as e:
+ for subexc in e.exceptions:
+ subexc.index = None
+ return list(e.exceptions)
+ finally:
+ self._flow_control.remove_from_flow(batch)
+ return []
+
+ def _add_exceptions(self, excs: list[Exception]):
+ """Add new list of exceptions to internal store. To avoid unbounded memory,
+ the batcher will store the first and last _exception_list_limit exceptions,
+ and discard any in between.
+
+ Args:
+ excs: list of exceptions to add to the internal store"""
+ self._exceptions_since_last_raise += len(excs)
+ if excs and len(self._oldest_exceptions) < self._exception_list_limit:
+ addition_count = self._exception_list_limit - len(self._oldest_exceptions)
+ self._oldest_exceptions.extend(excs[:addition_count])
+ excs = excs[addition_count:]
+ if excs:
+ self._newest_exceptions.extend(excs[-self._exception_list_limit :])
+
+ def _raise_exceptions(self):
+ """Raise any unreported exceptions from background flush operations
+
+ Raises:
+ MutationsExceptionGroup: exception group with all unreported exceptions"""
+ if self._oldest_exceptions or self._newest_exceptions:
+ (oldest, self._oldest_exceptions) = (self._oldest_exceptions, [])
+ newest = list(self._newest_exceptions)
+ self._newest_exceptions.clear()
+ (entry_count, self._entries_processed_since_last_raise) = (
+ self._entries_processed_since_last_raise,
+ 0,
+ )
+ (exc_count, self._exceptions_since_last_raise) = (
+ self._exceptions_since_last_raise,
+ 0,
+ )
+ raise MutationsExceptionGroup.from_truncated_lists(
+ first_list=oldest,
+ last_list=newest,
+ total_excs=exc_count,
+ entry_count=entry_count,
+ )
+
+ def __enter__(self):
+ """Allow use of context manager API"""
+ return self
+
+ def __exit__(self, exc_type, exc, tb):
+ """Allow use of context manager API.
+
+ Flushes the batcher and cleans up resources."""
+ self.close()
+
+ @property
+ def closed(self) -> bool:
+ """Returns:
+ - True if the batcher is closed, False otherwise"""
+ return self._closed.is_set()
+
+ def close(self):
+ """Flush queue and clean up resources"""
+ self._closed.set()
+ self._flush_timer.cancel()
+ self._schedule_flush()
+ if self._sync_flush_executor:
+ with self._sync_flush_executor:
+ self._sync_flush_executor.shutdown(wait=True)
+ if self._sync_rpc_executor:
+ with self._sync_rpc_executor:
+ self._sync_rpc_executor.shutdown(wait=True)
+ CrossSync._Sync_Impl.wait([*self._flush_jobs, self._flush_timer])
+ atexit.unregister(self._on_exit)
+ self._raise_exceptions()
+
+ def _on_exit(self):
+ """Called when program is exited. Raises warning if unflushed mutations remain"""
+ if not self._closed.is_set() and self._staged_entries:
+ warnings.warn(
+ f"MutationsBatcher for target {self._target!r} was not closed. {len(self._staged_entries)} Unflushed mutations will not be sent to the server."
+ )
+
+ @staticmethod
+ def _wait_for_batch_results(
+ *tasks: CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]]
+ | CrossSync._Sync_Impl.Future[None],
+ ) -> list[Exception]:
+ """Takes in a list of futures representing _execute_mutate_rows tasks,
+ waits for them to complete, and returns a list of errors encountered.
+
+ Args:
+ *tasks: futures representing _execute_mutate_rows or _flush_internal tasks
+ Returns:
+ list[Exception]:
+ list of Exceptions encountered by any of the tasks. Errors are expected
+ to be FailedMutationEntryError, representing a failed mutation operation.
+ If a task fails with a different exception, it will be included in the
+ output list. Successful tasks will not be represented in the output list.
+ """
+ if not tasks:
+ return []
+ exceptions: list[Exception] = []
+ for task in tasks:
+ try:
+ exc_list = task.result()
+ if exc_list:
+ for exc in exc_list:
+ exc.index = None
+ exceptions.extend(exc_list)
+ except Exception as e:
+ exceptions.append(e)
+ return exceptions
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py
new file mode 100644
index 000000000000..b19e0e5ea126
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py
@@ -0,0 +1,343 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+import sys
+
+from typing import Any, TYPE_CHECKING
+
+from google.api_core import exceptions as core_exceptions
+from google.cloud.bigtable.data.row import Row
+
+is_311_plus = sys.version_info >= (3, 11)
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data.mutations import RowMutationEntry
+ from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
+
+
+class InvalidChunk(core_exceptions.GoogleAPICallError):
+ """Exception raised to invalid chunk data from back-end."""
+
+
+class _RowSetComplete(Exception):
+ """
+ Internal exception for _ReadRowsOperation
+ Raised in revise_request_rowset when there are no rows left to process when starting a retry attempt
+ """
+
+ pass
+
+
+class _ResetRow(Exception): # noqa: F811
+ """
+ Internal exception for _ReadRowsOperation
+
+ Denotes that the server sent a reset_row marker, telling the client to drop
+ all previous chunks for row_key and re-read from the beginning.
+
+ Args:
+ chunk: the reset_row chunk
+ """
+
+ def __init__(self, chunk):
+ self.chunk = chunk
+
+
+class _MutateRowsIncomplete(RuntimeError):
+ """
+ Exception raised when a mutate_rows call has unfinished work.
+ """
+
+ pass
+
+
+class _BigtableExceptionGroup(ExceptionGroup if is_311_plus else Exception): # type: ignore # noqa: F821
+ """
+ Represents one or more exceptions that occur during a bulk Bigtable operation
+
+ In Python 3.11+, this is an unmodified exception group. In < 3.10, it is a
+ custom exception with some exception group functionality backported, but does
+ Not implement the full API
+ """
+
+ def __init__(self, message, excs):
+ if is_311_plus:
+ super().__init__(message, excs)
+ else:
+ if len(excs) == 0:
+ raise ValueError("exceptions must be a non-empty sequence")
+ self.exceptions = tuple(excs)
+ # simulate an exception group in Python < 3.11 by adding exception info
+ # to the message
+ first_line = "--+---------------- 1 ----------------"
+ last_line = "+------------------------------------"
+ message_parts = [message + "\n" + first_line]
+ # print error info for each exception in the group
+ for idx, e in enumerate(excs[:15]):
+ # apply index header
+ if idx != 0:
+ message_parts.append(
+ f"+---------------- {str(idx + 1).rjust(2)} ----------------"
+ )
+ cause = e.__cause__
+ # if this exception was had a cause, print the cause first
+ # used to display root causes of FailedMutationEntryError and FailedQueryShardError
+ # format matches the error output of Python 3.11+
+ if cause is not None:
+ message_parts.extend(
+ f"| {type(cause).__name__}: {cause}".splitlines()
+ )
+ message_parts.append("| ")
+ message_parts.append(
+ "| The above exception was the direct cause of the following exception:"
+ )
+ message_parts.append("| ")
+ # attach error message for this sub-exception
+ # if the subexception is also a _BigtableExceptionGroup,
+ # error messages will be nested
+ message_parts.extend(f"| {type(e).__name__}: {e}".splitlines())
+ # truncate the message if there are more than 15 exceptions
+ if len(excs) > 15:
+ message_parts.append("+---------------- ... ---------------")
+ message_parts.append(f"| and {len(excs) - 15} more")
+ if last_line not in message_parts[-1]:
+ # in the case of nested _BigtableExceptionGroups, the last line
+ # does not need to be added, since one was added by the final sub-exception
+ message_parts.append(last_line)
+ super().__init__("\n ".join(message_parts))
+
+ def __new__(cls, message, excs):
+ if is_311_plus:
+ return super().__new__(cls, message, excs)
+ else:
+ return super().__new__(cls)
+
+ def __str__(self):
+ if is_311_plus:
+ # don't return built-in sub-exception message
+ return self.args[0]
+ return super().__str__()
+
+ def __repr__(self):
+ """
+ repr representation should strip out sub-exception details
+ """
+ if is_311_plus:
+ return super().__repr__()
+ message = self.args[0].split("\n")[0]
+ return f"{self.__class__.__name__}({message!r}, {self.exceptions!r})"
+
+
+class MutationsExceptionGroup(_BigtableExceptionGroup):
+ """
+ Represents one or more exceptions that occur during a bulk mutation operation
+
+ Exceptions will typically be of type FailedMutationEntryError, but other exceptions may
+ be included if they are raised during the mutation operation
+ """
+
+ @staticmethod
+ def _format_message(
+ excs: list[Exception], total_entries: int, exc_count: int | None = None
+ ) -> str:
+ """
+ Format a message for the exception group
+
+ Args:
+ excs: the exceptions in the group
+ total_entries: the total number of entries attempted, successful or not
+ exc_count: the number of exceptions associated with the request
+ if None, this will be len(excs)
+ Returns:
+ str: the formatted message
+ """
+ exc_count = exc_count if exc_count is not None else len(excs)
+ entry_str = "entry" if exc_count == 1 else "entries"
+ return f"{exc_count} failed {entry_str} from {total_entries} attempted."
+
+ def __init__(
+ self, excs: list[Exception], total_entries: int, message: str | None = None
+ ):
+ """
+ Args:
+ excs: the exceptions in the group
+ total_entries: the total number of entries attempted, successful or not
+ message: the message for the exception group. If None, a default message
+ will be generated
+ """
+ message = (
+ message
+ if message is not None
+ else self._format_message(excs, total_entries)
+ )
+ super().__init__(message, excs)
+ self.total_entries_attempted = total_entries
+
+ def __new__(
+ cls, excs: list[Exception], total_entries: int, message: str | None = None
+ ):
+ """
+ Args:
+ excs: the exceptions in the group
+ total_entries: the total number of entries attempted, successful or not
+ message: the message for the exception group. If None, a default message
+ Returns:
+ MutationsExceptionGroup: the new instance
+ """
+ message = (
+ message if message is not None else cls._format_message(excs, total_entries)
+ )
+ instance = super().__new__(cls, message, excs)
+ instance.total_entries_attempted = total_entries
+ return instance
+
+ @classmethod
+ def from_truncated_lists(
+ cls,
+ first_list: list[Exception],
+ last_list: list[Exception],
+ total_excs: int,
+ entry_count: int,
+ ) -> MutationsExceptionGroup:
+ """
+ Create a MutationsExceptionGroup from two lists of exceptions, representing
+ a larger set that has been truncated. The MutationsExceptionGroup will
+ contain the union of the two lists as sub-exceptions, and the error message
+ describe the number of exceptions that were truncated.
+
+ Args:
+ first_list: the set of oldest exceptions to add to the ExceptionGroup
+ last_list: the set of newest exceptions to add to the ExceptionGroup
+ total_excs: the total number of exceptions associated with the request
+ Should be len(first_list) + len(last_list) + number of dropped exceptions
+ in the middle
+ entry_count: the total number of entries attempted, successful or not
+ Returns:
+ MutationsExceptionGroup: the new instance
+ """
+ first_count, last_count = len(first_list), len(last_list)
+ if first_count + last_count >= total_excs:
+ # no exceptions were dropped
+ return cls(first_list + last_list, entry_count)
+ excs = first_list + last_list
+ truncation_count = total_excs - (first_count + last_count)
+ base_message = cls._format_message(excs, entry_count, total_excs)
+ first_message = f"first {first_count}" if first_count else ""
+ last_message = f"last {last_count}" if last_count else ""
+ conjunction = " and " if first_message and last_message else ""
+ message = f"{base_message} ({first_message}{conjunction}{last_message} attached as sub-exceptions; {truncation_count} truncated)"
+ return cls(excs, entry_count, message)
+
+
+class FailedMutationEntryError(Exception):
+ """
+ Represents a single failed RowMutationEntry in a bulk_mutate_rows request.
+ A collection of FailedMutationEntryErrors will be raised in a MutationsExceptionGroup
+ """
+
+ def __init__(
+ self,
+ failed_idx: int | None,
+ failed_mutation_entry: "RowMutationEntry",
+ cause: Exception,
+ ):
+ idempotent_msg = (
+ "idempotent" if failed_mutation_entry.is_idempotent() else "non-idempotent"
+ )
+ index_msg = f" at index {failed_idx}" if failed_idx is not None else ""
+ message = f"Failed {idempotent_msg} mutation entry{index_msg}"
+ super().__init__(message)
+ self.__cause__ = cause
+ self.index = failed_idx
+ self.entry = failed_mutation_entry
+
+
+class RetryExceptionGroup(_BigtableExceptionGroup):
+ """Represents one or more exceptions that occur during a retryable operation"""
+
+ @staticmethod
+ def _format_message(excs: list[Exception]):
+ if len(excs) == 0:
+ return "No exceptions"
+ plural = "s" if len(excs) > 1 else ""
+ return f"{len(excs)} failed attempt{plural}"
+
+ def __init__(self, excs: list[Exception]):
+ super().__init__(self._format_message(excs), excs)
+
+ def __new__(cls, excs: list[Exception]):
+ return super().__new__(cls, cls._format_message(excs), excs)
+
+
+class ShardedReadRowsExceptionGroup(_BigtableExceptionGroup):
+ """
+ Represents one or more exceptions that occur during a sharded read rows operation
+ """
+
+ @staticmethod
+ def _format_message(excs: list[FailedQueryShardError], total_queries: int):
+ query_str = "query" if total_queries == 1 else "queries"
+ plural_str = "" if len(excs) == 1 else "s"
+ return f"{len(excs)} sub-exception{plural_str} (from {total_queries} {query_str} attempted)"
+
+ def __init__(
+ self,
+ excs: list[FailedQueryShardError],
+ succeeded: list[Row],
+ total_queries: int,
+ ):
+ super().__init__(self._format_message(excs, total_queries), excs)
+ self.successful_rows = succeeded
+
+ def __new__(
+ cls, excs: list[FailedQueryShardError], succeeded: list[Row], total_queries: int
+ ):
+ instance = super().__new__(cls, cls._format_message(excs, total_queries), excs)
+ instance.successful_rows = succeeded
+ return instance
+
+
+class FailedQueryShardError(Exception):
+ """
+ Represents an individual failed query in a sharded read rows operation
+ """
+
+ def __init__(
+ self,
+ failed_index: int,
+ failed_query: "ReadRowsQuery" | dict[str, Any],
+ cause: Exception,
+ ):
+ message = f"Failed query at index {failed_index}"
+ super().__init__(message)
+ self.__cause__ = cause
+ self.index = failed_index
+ self.query = failed_query
+
+
+class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError):
+ """Exception raised to invalid query response data from back-end."""
+
+ # Set to internal. This is representative of an internal error.
+ code = 13
+
+
+class ParameterTypeInferenceFailed(ValueError):
+ """Exception raised when query parameter types were not provided and cannot be inferred."""
+
+
+class EarlyMetadataCallError(RuntimeError):
+ """Execption raised when metadata is request from an ExecuteQueryIterator before the first row has been read, or the query has completed"""
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py
new file mode 100644
index 000000000000..029e79b9390a
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py
@@ -0,0 +1,43 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import (
+ ExecuteQueryIteratorAsync,
+)
+from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import (
+ ExecuteQueryIterator,
+)
+from google.cloud.bigtable.data.execute_query.metadata import (
+ Metadata,
+ SqlType,
+)
+from google.cloud.bigtable.data.execute_query.values import (
+ ExecuteQueryValueType,
+ QueryResultRow,
+ Struct,
+)
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+CrossSync.add_mapping("ExecuteQueryIterator", ExecuteQueryIteratorAsync)
+CrossSync._Sync_Impl.add_mapping("ExecuteQueryIterator", ExecuteQueryIterator)
+
+__all__ = [
+ "ExecuteQueryValueType",
+ "SqlType",
+ "QueryResultRow",
+ "Struct",
+ "Metadata",
+ "ExecuteQueryIteratorAsync",
+ "ExecuteQueryIterator",
+]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py
new file mode 100644
index 000000000000..6d5e14bcf4a0
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py
new file mode 100644
index 000000000000..2beda4cd65be
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py
@@ -0,0 +1,315 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Dict,
+ Optional,
+ Sequence,
+ Tuple,
+ TYPE_CHECKING,
+)
+from google.api_core import retry as retries
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+
+from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor
+from google.cloud.bigtable.data._helpers import (
+ _attempt_timeout_generator,
+ _retry_exception_factory,
+)
+from google.cloud.bigtable.data.exceptions import (
+ EarlyMetadataCallError,
+ InvalidExecuteQueryResponse,
+)
+from google.cloud.bigtable.data.execute_query.values import QueryResultRow
+from google.cloud.bigtable.data.execute_query.metadata import Metadata
+from google.cloud.bigtable.data.execute_query._reader import (
+ _QueryResultRowReader,
+ _Reader,
+)
+from google.cloud.bigtable_v2.types.bigtable import (
+ ExecuteQueryRequest as ExecuteQueryRequestPB,
+ ExecuteQueryResponse,
+)
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ if CrossSync.is_async:
+ from google.cloud.bigtable.data import BigtableDataClientAsync as DataClientType
+ else:
+ from google.cloud.bigtable.data import BigtableDataClient as DataClientType
+
+__CROSS_SYNC_OUTPUT__ = (
+ "google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator"
+)
+
+
+def _has_resume_token(response: ExecuteQueryResponse) -> bool:
+ response_pb = response._pb # proto-plus attribute retrieval is slow.
+ if response_pb.HasField("results"):
+ results = response_pb.results
+ return len(results.resume_token) > 0
+ return False
+
+
+@CrossSync.convert_class(sync_name="ExecuteQueryIterator")
+class ExecuteQueryIteratorAsync:
+ @CrossSync.convert(
+ docstring_format_vars={
+ "NO_LOOP": (
+ "RuntimeError: if the instance is not created within an async event loop context.",
+ "None",
+ ),
+ "TASK_OR_THREAD": ("asyncio Tasks", "threads"),
+ }
+ )
+ def __init__(
+ self,
+ client: DataClientType,
+ instance_id: str,
+ app_profile_id: Optional[str],
+ request_body: Dict[str, Any],
+ prepare_metadata: Metadata,
+ attempt_timeout: float | None,
+ operation_timeout: float,
+ req_metadata: Sequence[Tuple[str, str]] = (),
+ retryable_excs: Sequence[type[Exception]] = (),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> None:
+ """
+ Collects responses from ExecuteQuery requests and parses them into QueryResultRows.
+
+ **Please Note** this is not meant to be constructed directly by applications. It should always
+ be created via the client. The constructor is subject to change.
+
+ It is **not thread-safe**. It should not be used by multiple {TASK_OR_THREAD}.
+
+ Args:
+ client: bigtable client
+ instance_id: id of the instance on which the query is executed
+ request_body: dict representing the body of the ExecuteQueryRequest
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget
+ req_metadata: metadata used while sending the gRPC request
+ retryable_excs: a list of errors that will be retried if encountered.
+ column_info: dict with mappings between column names and additional column information
+ for protobuf deserialization.
+ Raises:
+ {NO_LOOP}
+ :class:`ValueError ` as a safeguard if data is processed in an unexpected state
+ """
+ self._table_name = None
+ self._app_profile_id = app_profile_id
+ self._client = client
+ self._instance_id = instance_id
+ self._prepare_metadata: Metadata = prepare_metadata
+ self._final_metadata: Metadata | None = None
+ self._byte_cursor = _ByteCursor()
+ self._reader: _Reader[QueryResultRow] = _QueryResultRowReader()
+ self.has_received_token = False
+ self._result_generator = self._next_impl()
+ self._register_instance_task = None
+ self._fully_consumed = False
+ self._is_closed = False
+ self._request_body = request_body
+ self._attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self._stream = CrossSync.retry_target_stream(
+ self._make_request_with_resume_token,
+ retries.if_exception_type(*retryable_excs),
+ retries.exponential_sleep_generator(0.01, 60, multiplier=2),
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+ self._req_metadata = req_metadata
+ self._column_info = column_info
+ try:
+ self._register_instance_task = CrossSync.create_task(
+ self._client._register_instance,
+ self._instance_id,
+ self.app_profile_id,
+ id(self),
+ sync_executor=self._client._executor,
+ )
+ except RuntimeError as e:
+ raise RuntimeError(
+ f"{self.__class__.__name__} must be created within an async event loop context."
+ ) from e
+
+ @property
+ def is_closed(self) -> bool:
+ """Returns True if the iterator is closed, False otherwise."""
+ return self._is_closed
+
+ @property
+ def app_profile_id(self) -> Optional[str]:
+ """Returns the app_profile_id of the iterator."""
+ return self._app_profile_id
+
+ @property
+ def table_name(self) -> Optional[str]:
+ """Returns the table_name of the iterator."""
+ return self._table_name
+
+ @CrossSync.convert
+ async def _make_request_with_resume_token(self):
+ """
+ perfoms the rpc call using the correct resume token.
+ """
+ resume_token = self._byte_cursor.prepare_for_new_request()
+ request = ExecuteQueryRequestPB(
+ {
+ **self._request_body,
+ "resume_token": resume_token,
+ }
+ )
+ return await self._client._gapic_client.execute_query(
+ request,
+ timeout=next(self._attempt_timeout_gen),
+ metadata=self._req_metadata,
+ retry=None,
+ )
+
+ @CrossSync.convert
+ async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]:
+ """
+ Generator wrapping the response stream which parses the stream results
+ and returns full `QueryResultRow`s.
+ """
+ try:
+ async for response in self._stream:
+ try:
+ # we've received a resume token, so we can finalize the metadata
+ if self._final_metadata is None and _has_resume_token(response):
+ self._finalize_metadata()
+
+ batches_to_parse = self._byte_cursor.consume(response)
+ if not batches_to_parse:
+ continue
+ # metadata must be set at this point since there must be a resume_token
+ # for byte_cursor to yield data
+ if not self.metadata:
+ raise ValueError(
+ "Error parsing response before finalizing metadata"
+ )
+ results = self._reader.consume(
+ batches_to_parse, self.metadata, self._column_info
+ )
+ if results is None:
+ continue
+
+ except ValueError as e:
+ raise InvalidExecuteQueryResponse(
+ "Invalid ExecuteQuery response received"
+ ) from e
+
+ for result in results:
+ yield result
+ # this means the stream has finished with no responses. In that case we know the
+ # latest_prepare_reponses was used successfully so we can finalize the metadata
+ if self._final_metadata is None:
+ self._finalize_metadata()
+ self._fully_consumed = True
+ finally:
+ self._close_internal()
+
+ @CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"})
+ async def __anext__(self) -> QueryResultRow:
+ """
+ Yields QueryResultRows representing the results of the query.
+
+ :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state
+ """
+ if self._is_closed:
+ raise CrossSync.StopIteration
+ return await self._result_generator.__anext__()
+
+ @CrossSync.convert(sync_name="__iter__")
+ def __aiter__(self):
+ return self
+
+ @CrossSync.convert
+ def _finalize_metadata(self) -> None:
+ """
+ Sets _final_metadata to the metadata of the latest prepare_response.
+ The iterator should call this after either the first resume token is received or the
+ stream completes succesfully with no responses.
+
+ This can't be set on init because the metadata will be able to change due to plan refresh.
+ Plan refresh isn't implemented yet, but we want functionality to stay the same when it is.
+
+ For example the following scenario for query "SELECT * FROM table":
+ - Make a request, table has one column family 'cf'
+ - Return an incomplete batch
+ - request fails with transient error
+ - Meanwhile the table has had a second column family added 'cf2'
+ - Retry the request, get an error indicating the `prepared_query` has expired
+ - Refresh the prepared_query and retry the request, the new prepared_query
+ contains both 'cf' & 'cf2'
+ - It sends a new incomplete batch and resets the old outdated batch
+ - It send the next chunk with a checksum and resume_token, closing the batch.
+ In this we need to use the updated schema from the refreshed prepare request.
+ """
+ self._final_metadata = self._prepare_metadata
+
+ @property
+ def metadata(self) -> Metadata:
+ """
+ Returns query metadata from the server or None if the iterator has been closed
+ or if metadata has not been set yet.
+
+ Metadata will not be set until the first row has been yielded or response with no rows
+ completes.
+
+ raises: :class:`EarlyMetadataCallError` when called before the first row has been returned
+ or the iterator has completed with no rows in the response.
+ """
+ if not self._final_metadata:
+ raise EarlyMetadataCallError()
+ return self._final_metadata
+
+ @CrossSync.convert
+ async def close(self) -> None:
+ """
+ Cancel all background tasks. Should be called after all rows were processed.
+
+ Called automatically by iterator
+
+ :raises: :class:`ValueError ` if called in an invalid state
+ """
+ # this doesn't need to be async anymore but we wrap the sync api to avoid a breaking
+ # change
+ self._close_internal()
+
+ def _close_internal(self) -> None:
+ if self._is_closed:
+ return
+ # Throw an error if the iterator has been successfully consumed but there is
+ # still buffered data
+ if self._fully_consumed and not self._byte_cursor.empty():
+ raise ValueError("Unexpected buffered data at end of executeQuery reqest")
+ self._is_closed = True
+ if self._register_instance_task is not None:
+ self._register_instance_task.cancel()
+ self._client._remove_instance_registration(
+ self._instance_id, self.app_profile_id, id(self)
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py
new file mode 100644
index 000000000000..16eacbe9b81d
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py
@@ -0,0 +1,123 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional
+
+from google.cloud.bigtable.data.execute_query._checksum import _CRC32C
+from google.cloud.bigtable_v2 import ExecuteQueryResponse
+
+
+class _ByteCursor:
+ """
+ Buffers bytes from `ExecuteQuery` responses until resume_token is received or end-of-stream
+ is reached. :class:`google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse` obtained from
+ the server should be passed to the ``consume`` method and its non-None results should be passed
+ to appropriate :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered
+ bytes.
+
+ This class consumes data obtained externally to be usable in both sync and async clients.
+
+ See :class:`google.cloud.bigtable.execute_query_reader._Reader` for more context.
+ """
+
+ def __init__(self):
+ self._batch_buffer = bytearray()
+ self._batches: List[bytes] = []
+ self._resume_token = None
+
+ def reset(self):
+ self._batch_buffer = bytearray()
+ self._batches = []
+
+ def prepare_for_new_request(self):
+ """
+ Prepares this ``_ByteCursor`` for retrying an ``ExecuteQuery`` request.
+
+ Clears internal buffers of this ``_ByteCursor`` and returns last received
+ ``resume_token`` to be used in retried request.
+
+ This is the only method that returns ``resume_token`` to the user.
+ Returning the token to the user is tightly coupled with clearing internal
+ buffers to prevent accidental retry without clearing the state, what would
+ cause invalid results. ``resume_token`` are not needed in other cases,
+ thus they is no separate getter for it.
+
+ Returns:
+ bytes: Last received resume_token.
+ """
+ # The first response of any retried stream will always contain reset, so
+ # this isn't actually necessary, but we do it for safety
+ self.reset()
+ return self._resume_token
+
+ def empty(self) -> bool:
+ return not self._batch_buffer and not self._batches
+
+ def consume(self, response: ExecuteQueryResponse) -> Optional[List[bytes]]:
+ """
+ Reads results bytes from an ``ExecuteQuery`` response and adds them to a buffer.
+
+ If the response contains a ``resume_token``:
+ - the ``resume_token`` is saved in this ``_ByteCursor``, and
+ - internal buffers are flushed and returned to the caller.
+
+ ``resume_token`` is not available directly, but can be retrieved by calling
+ :meth:`._ByteCursor.prepare_for_new_request` when preparing to retry a request.
+
+ Args:
+ response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse):
+ Response obtained from the stream.
+
+ Returns:
+ bytes or None: List of bytes if buffers were flushed or None otherwise.
+ Each element in the list represents the bytes of a `ProtoRows` message.
+
+ Raises:
+ ValueError: If provided ``ExecuteQueryResponse`` is not valid
+ or contains bytes representing response of a different kind than previously
+ processed responses.
+ """
+ response_pb = response._pb # proto-plus attribute retrieval is slow.
+
+ if response_pb.HasField("results"):
+ results = response_pb.results
+ if results.reset:
+ self.reset()
+ if results.HasField("proto_rows_batch"):
+ self._batch_buffer.extend(results.proto_rows_batch.batch_data)
+ # Note that 0 is a valid checksum so we must check for field presence
+ if results.HasField("batch_checksum"):
+ expected_checksum = results.batch_checksum
+ checksum = _CRC32C.checksum(self._batch_buffer)
+ if expected_checksum != checksum:
+ raise ValueError(
+ f"Unexpected checksum mismatch. Expected: {expected_checksum}, got: {checksum}"
+ )
+ # We have a complete batch so we move it to batches and reset the
+ # batch_buffer
+ self._batches.append(memoryview(self._batch_buffer))
+ self._batch_buffer = bytearray()
+
+ if results.resume_token:
+ self._resume_token = results.resume_token
+
+ if self._batches:
+ if self._batch_buffer:
+ raise ValueError("Unexpected resume_token without checksum")
+ return_value = self._batches
+ self._batches = []
+ return return_value
+ else:
+ raise ValueError(f"Unexpected ExecuteQueryResponse: {response}")
+ return None
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py
new file mode 100644
index 000000000000..b45a164d5835
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import warnings
+
+with warnings.catch_warnings(record=True) as import_warning:
+ import google_crc32c # type: ignore
+
+
+class _CRC32C(object):
+ """
+ Wrapper around ``google_crc32c`` library
+ """
+
+ warn_emitted = False
+
+ @classmethod
+ def checksum(cls, val: bytearray) -> int:
+ """
+ Returns the crc32c checksum of the data.
+ """
+ if import_warning and not cls.warn_emitted:
+ cls.warn_emitted = True
+ warnings.warn(
+ "Using pure python implementation of `google-crc32` for ExecuteQuery response "
+ "validation. This is significantly slower than the c extension. If possible, "
+ "run in an environment that supports the c extension.",
+ RuntimeWarning,
+ )
+ memory_view = memoryview(val)
+ return google_crc32c.value(bytes(memory_view))
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py
new file mode 100644
index 000000000000..ed7e946e8455
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py
@@ -0,0 +1,155 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+from typing import Any, Dict, Optional
+
+from google.api_core.datetime_helpers import DatetimeWithNanoseconds
+
+from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed
+from google.cloud.bigtable.data.execute_query.metadata import SqlType
+from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType
+from google.cloud.bigtable_v2.types.data import Value
+
+
+def _format_execute_query_params(
+ params: Optional[Dict[str, ExecuteQueryValueType]],
+ parameter_types: Optional[Dict[str, SqlType.Type]],
+) -> Dict[str, Value]:
+ """
+ Takes a dictionary of param_name -> param_value and optionally parameter types.
+ If the parameters types are not provided, this function tries to infer them.
+
+ Args:
+ params (Optional[Dict[str, ExecuteQueryValueType]]): mapping from parameter names
+ like they appear in query (without @ at the beginning) to their values.
+ Only values of type ExecuteQueryValueType are permitted.
+ parameter_types (Optional[Dict[str, SqlType.Type]]): mapping of parameter names
+ to their types.
+
+ Raises:
+ ValueError: raised when parameter types cannot be inferred and were not
+ provided explicitly.
+
+ Returns:
+ dictionary prasable to a protobuf represenging parameters as defined
+ in ExecuteQueryRequest.params
+ """
+ if not params:
+ return {}
+ parameter_types = parameter_types or {}
+
+ result_values = {}
+ for key, value in params.items():
+ user_provided_type = parameter_types.get(key)
+ try:
+ if user_provided_type:
+ if not isinstance(user_provided_type, SqlType.Type):
+ raise ValueError(
+ f"Parameter type for {key} should be provided as an instance of SqlType.Type subclass."
+ )
+ param_type = user_provided_type
+ else:
+ param_type = _detect_type(value)
+
+ value_pb_dict = _convert_value_to_pb_value_dict(value, param_type)
+ except ValueError as err:
+ raise ValueError(f"Error when parsing parameter {key}") from err
+ result_values[key] = value_pb_dict
+
+ return result_values
+
+
+def _to_param_types(
+ params: Optional[Dict[str, ExecuteQueryValueType]],
+ param_types: Optional[Dict[str, SqlType.Type]],
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Takes the params and user supplied types and creates a param_type dict for the PrepareQuery api
+
+ Args:
+ params: Dict of param name to param value
+ param_types: Dict of param name to param type for params with types that cannot be inferred
+
+ Returns:
+ Dict containing the param name and type for each parameter
+ """
+ if params is None:
+ return {}
+ formatted_types = {}
+ for param_key, param_value in params.items():
+ if param_types and param_key in param_types:
+ formatted_types[param_key] = param_types[param_key]._to_type_pb_dict()
+ else:
+ formatted_types[param_key] = _detect_type(param_value)._to_type_pb_dict()
+ return formatted_types
+
+
+def _convert_value_to_pb_value_dict(
+ value: ExecuteQueryValueType, param_type: SqlType.Type
+) -> Any:
+ """
+ Takes a value and converts it to a dictionary parsable to a protobuf.
+
+ Args:
+ value (ExecuteQueryValueType): value
+ param_type (SqlType.Type): object describing which ExecuteQuery type the value represents.
+
+ Returns:
+ dictionary parsable to a protobuf.
+ """
+ # type field will be set only in top-level Value.
+ value_dict = param_type._to_value_pb_dict(value)
+ value_dict["type_"] = param_type._to_type_pb_dict()
+ return value_dict
+
+
+_TYPES_TO_TYPE_DICTS = [
+ (bytes, SqlType.Bytes()),
+ (str, SqlType.String()),
+ (bool, SqlType.Bool()),
+ (int, SqlType.Int64()),
+ (DatetimeWithNanoseconds, SqlType.Timestamp()),
+ (datetime.datetime, SqlType.Timestamp()),
+ (datetime.date, SqlType.Date()),
+]
+
+
+def _detect_type(value: ExecuteQueryValueType) -> SqlType.Type:
+ """
+ Infers the ExecuteQuery type based on value. Raises error if type is amiguous.
+ raises ParameterTypeInferenceFailed if not possible.
+ """
+ if value is None:
+ raise ParameterTypeInferenceFailed(
+ "Cannot infer type of None, please provide the type manually."
+ )
+
+ if isinstance(value, list):
+ raise ParameterTypeInferenceFailed(
+ "Cannot infer type of ARRAY parameters, please provide the type manually."
+ )
+
+ if isinstance(value, float):
+ raise ParameterTypeInferenceFailed(
+ "Cannot infer type of float, must specify either FLOAT32 or FLOAT64 type manually."
+ )
+
+ for field_type, type_dict in _TYPES_TO_TYPE_DICTS:
+ if isinstance(value, field_type):
+ return type_dict
+
+ raise ParameterTypeInferenceFailed(
+ f"Cannot infer type of {type(value).__name__}, please provide the type manually."
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py
new file mode 100644
index 000000000000..a43539e55de0
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py
@@ -0,0 +1,265 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from typing import Any, Callable, Dict, Type, Optional, Union
+
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+from google.cloud.bigtable.data.execute_query.values import Struct
+from google.cloud.bigtable.data.execute_query.metadata import SqlType
+from google.cloud.bigtable_v2 import Value as PBValue
+from google.api_core.datetime_helpers import DatetimeWithNanoseconds
+
+_REQUIRED_PROTO_FIELDS = {
+ SqlType.Bytes: "bytes_value",
+ SqlType.String: "string_value",
+ SqlType.Int64: "int_value",
+ SqlType.Float32: "float_value",
+ SqlType.Float64: "float_value",
+ SqlType.Bool: "bool_value",
+ SqlType.Timestamp: "timestamp_value",
+ SqlType.Date: "date_value",
+ SqlType.Struct: "array_value",
+ SqlType.Array: "array_value",
+ SqlType.Map: "array_value",
+ SqlType.Proto: "bytes_value",
+ SqlType.Enum: "int_value",
+}
+
+
+def _parse_array_type(
+ value: PBValue,
+ metadata_type: SqlType.Array,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> list[Any]:
+ """
+ used for parsing an array represented as a protobuf to a python list.
+ """
+ return list(
+ map(
+ lambda val: _parse_pb_value_to_python_value(
+ val, metadata_type.element_type, column_name, column_info
+ ),
+ value.array_value.values,
+ )
+ )
+
+
+def _parse_map_type(
+ value: PBValue,
+ metadata_type: SqlType.Map,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> dict[Any, Any]:
+ """
+ used for parsing a map represented as a protobuf to a python dict.
+
+ Values of type `Map` are stored in a `Value.array_value` where each entry
+ is another `Value.array_value` with two elements (the key and the value,
+ in that order).
+ Normally encoded Map values won't have repeated keys, however, the client
+ must handle the case in which they do. If the same key appears
+ multiple times, the _last_ value takes precedence.
+ """
+
+ try:
+ return dict(
+ map(
+ lambda map_entry: (
+ _parse_pb_value_to_python_value(
+ map_entry.array_value.values[0],
+ metadata_type.key_type,
+ f"{column_name}.key" if column_name is not None else None,
+ column_info,
+ ),
+ _parse_pb_value_to_python_value(
+ map_entry.array_value.values[1],
+ metadata_type.value_type,
+ f"{column_name}.value" if column_name is not None else None,
+ column_info,
+ ),
+ ),
+ value.array_value.values,
+ )
+ )
+ except IndexError:
+ raise ValueError("Invalid map entry - less or more than two values.")
+
+
+def _parse_struct_type(
+ value: PBValue,
+ metadata_type: SqlType.Struct,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Struct:
+ """
+ used for parsing a struct represented as a protobuf to a
+ google.cloud.bigtable.data.execute_query.Struct
+ """
+ if len(value.array_value.values) != len(metadata_type.fields):
+ raise ValueError("Mismatched lengths of values and types.")
+
+ struct = Struct()
+ for value, field in zip(value.array_value.values, metadata_type.fields):
+ field_name, field_type = field
+ nested_column_name: str | None
+ if column_name and field_name:
+ # qualify the column name for nested lookups
+ nested_column_name = f"{column_name}.{field_name}"
+ else:
+ nested_column_name = None
+ struct.add_field(
+ field_name,
+ _parse_pb_value_to_python_value(
+ value, field_type, nested_column_name, column_info
+ ),
+ )
+
+ return struct
+
+
+def _parse_timestamp_type(
+ value: PBValue,
+ metadata_type: SqlType.Timestamp,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> DatetimeWithNanoseconds:
+ """
+ used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds
+ """
+ return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)
+
+
+def _parse_proto_type(
+ value: PBValue,
+ metadata_type: SqlType.Proto,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Message | bytes:
+ """
+ Parses a serialized protobuf message into a Message object using type information
+ provided in column_info.
+
+ Args:
+ value: The value to parse, expected to have a bytes_value attribute.
+ metadata_type: The expected SQL type (Proto).
+ column_name: The name of the column.
+ column_info: (Optional) A dictionary mapping column names to their
+ corresponding Protobuf Message classes. This information is used
+ to deserialize the raw bytes.
+
+ Returns:
+ A deserialized Protobuf Message object if parsing is successful.
+ If the required type information is not found in column_info, the function
+ returns the original serialized data as bytes (value.bytes_value).
+ This fallback ensures that the raw data is still accessible.
+
+ Raises:
+ google.protobuf.message.DecodeError: If `value.bytes_value` cannot be
+ parsed as the Message type specified in `column_info`.
+ """
+ if (
+ column_name is not None
+ and column_info is not None
+ and column_info.get(column_name) is not None
+ ):
+ default_proto_message = column_info.get(column_name)
+ if isinstance(default_proto_message, Message):
+ proto_message = type(default_proto_message)()
+ proto_message.ParseFromString(value.bytes_value)
+ return proto_message
+ return value.bytes_value
+
+
+def _parse_enum_type(
+ value: PBValue,
+ metadata_type: SqlType.Enum,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> int | str:
+ """
+ Parses an integer value into a Protobuf enum name string using type information
+ provided in column_info.
+
+ Args:
+ value: The value to parse, expected to have an int_value attribute.
+ metadata_type: The expected SQL type (Enum).
+ column_name: The name of the column.
+ column_info: (Optional) A dictionary mapping column names to their
+ corresponding Protobuf EnumTypeWrapper objects. This information
+ is used to convert the integer to an enum name.
+
+ Returns:
+ A string representing the name of the enum value if conversion is successful.
+ If conversion fails for any reason, such as the required EnumTypeWrapper
+ not being found in column_info, or if an error occurs during the name lookup
+ (e.g., the integer is not a valid enum value), the function returns the
+ original integer value (value.int_value). This fallback ensures the
+ raw integer representation is still accessible.
+ """
+ if (
+ column_name is not None
+ and column_info is not None
+ and column_info.get(column_name) is not None
+ ):
+ proto_enum = column_info.get(column_name)
+ if isinstance(proto_enum, EnumTypeWrapper):
+ return proto_enum.Name(value.int_value)
+ return value.int_value
+
+
+ParserCallable = Callable[
+ [PBValue, Any, Optional[str], Optional[Dict[str, Union[Message, EnumTypeWrapper]]]],
+ Any,
+]
+
+_TYPE_PARSERS: Dict[Type[SqlType.Type], ParserCallable] = {
+ SqlType.Timestamp: _parse_timestamp_type,
+ SqlType.Struct: _parse_struct_type,
+ SqlType.Array: _parse_array_type,
+ SqlType.Map: _parse_map_type,
+ SqlType.Proto: _parse_proto_type,
+ SqlType.Enum: _parse_enum_type,
+}
+
+
+def _parse_pb_value_to_python_value(
+ value: PBValue,
+ metadata_type: SqlType.Type,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Any:
+ """
+ used for converting the value represented as a protobufs to a python object.
+ """
+ value_kind = value.WhichOneof("kind")
+ if not value_kind:
+ return None
+
+ kind = type(metadata_type)
+ if not value.HasField(_REQUIRED_PROTO_FIELDS[kind]):
+ raise ValueError(
+ f"{_REQUIRED_PROTO_FIELDS[kind]} field for {kind.__name__} type not found in a Value."
+ )
+
+ if kind in _TYPE_PARSERS:
+ parser = _TYPE_PARSERS[kind]
+ return parser(value, metadata_type, column_name, column_info)
+ elif kind in _REQUIRED_PROTO_FIELDS:
+ field_name = _REQUIRED_PROTO_FIELDS[kind]
+ return getattr(value, field_name)
+ else:
+ raise ValueError(f"Unknown kind {kind}")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py
new file mode 100644
index 000000000000..467c2030fe67
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py
@@ -0,0 +1,142 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from typing import (
+ List,
+ TypeVar,
+ Generic,
+ Iterable,
+ Optional,
+ Sequence,
+)
+from abc import ABC, abstractmethod
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+
+from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue
+
+from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import (
+ _parse_pb_value_to_python_value,
+)
+
+from google.cloud.bigtable.helpers import batched
+
+from google.cloud.bigtable.data.execute_query.values import QueryResultRow
+from google.cloud.bigtable.data.execute_query.metadata import Metadata
+
+
+T = TypeVar("T")
+
+
+class _Reader(ABC, Generic[T]):
+ """
+ An interface for classes that consume and parse bytes returned by ``_ByteCursor``.
+ Parsed bytes should be gathered into bundles (rows or columns) of expected size
+ and converted to an appropriate type ``T`` that will be returned as a semantically
+ meaningful result to the library user by
+ :meth:`google.cloud.bigtable.instance.Instance.execute_query` or
+ :meth:`google.cloud.bigtable.data._async.client.BigtableDataClientAsync.execute_query`
+ methods.
+
+ This class consumes data obtained externally to be usable in both sync and async clients.
+
+ See :class:`google.cloud.bigtable.byte_cursor._ByteCursor` for more context.
+ """
+
+ @abstractmethod
+ def consume(
+ self,
+ batches_to_consume: List[bytes],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> Optional[Iterable[T]]:
+ """This method receives a list of batches of bytes to be parsed as ProtoRows messages.
+ It then uses the metadata to group the values in the parsed messages into rows. Returns
+ None if batches_to_consume is empty
+ Args:
+ bytes_to_consume (bytes): chunk of parsable byte batches received from
+ :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume`
+ method.
+ metadata: metadata used to transform values to rows
+ column_info: (Optional) dict with mappings between column names and additional column information
+ for protobuf deserialization.
+
+ Returns:
+ Iterable[T] or None: Iterable if gathered values can form one or more instances of T,
+ or None if there is not enough data to construct at least one instance of T with
+ appropriate number of entries.
+ """
+ raise NotImplementedError
+
+
+class _QueryResultRowReader(_Reader[QueryResultRow]):
+ """
+ A :class:`._Reader` consuming bytes representing
+ :class:`google.cloud.bigtable_v2.types.Type`
+ and producing :class:`google.cloud.bigtable.execute_query.QueryResultRow`.
+
+ Number of entries in each row is determined by number of columns in
+ :class:`google.cloud.bigtable.execute_query.Metadata` obtained from
+ :class:`google.cloud.bigtable.byte_cursor._ByteCursor` passed in the constructor.
+ """
+
+ def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]:
+ proto_rows = ProtoRows.pb().FromString(bytes_to_parse)
+ return proto_rows.values
+
+ def _construct_query_result_row(
+ self,
+ values: Sequence[PBValue],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> QueryResultRow:
+ result = QueryResultRow()
+ columns = metadata.columns
+
+ assert len(values) == len(
+ columns
+ ), "This function should be called only when count of values matches count of columns."
+
+ for column, value in zip(columns, values):
+ parsed_value = _parse_pb_value_to_python_value(
+ value, column.column_type, column.column_name, column_info
+ )
+ result.add_field(column.column_name, parsed_value)
+ return result
+
+ def consume(
+ self,
+ batches_to_consume: List[bytes],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> Optional[Iterable[QueryResultRow]]:
+ num_columns = len(metadata.columns)
+ rows = []
+ for batch_bytes in batches_to_consume:
+ values = self._parse_proto_rows(batch_bytes)
+ for row_data in batched(values, n=num_columns):
+ if len(row_data) == num_columns:
+ rows.append(
+ self._construct_query_result_row(
+ row_data, metadata, column_info
+ )
+ )
+ else:
+ raise ValueError(
+ "Unexpected error, recieved bad number of values. "
+ f"Expected {num_columns} got {len(row_data)}."
+ )
+
+ return rows
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py
new file mode 100644
index 000000000000..68594d0e867a
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py
@@ -0,0 +1,259 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Any, Dict, Optional, Sequence, Tuple, TYPE_CHECKING
+from google.api_core import retry as retries
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
+from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor
+from google.cloud.bigtable.data._helpers import (
+ _attempt_timeout_generator,
+ _retry_exception_factory,
+)
+from google.cloud.bigtable.data.exceptions import (
+ EarlyMetadataCallError,
+ InvalidExecuteQueryResponse,
+)
+from google.cloud.bigtable.data.execute_query.values import QueryResultRow
+from google.cloud.bigtable.data.execute_query.metadata import Metadata
+from google.cloud.bigtable.data.execute_query._reader import (
+ _QueryResultRowReader,
+ _Reader,
+)
+from google.cloud.bigtable_v2.types.bigtable import (
+ ExecuteQueryRequest as ExecuteQueryRequestPB,
+ ExecuteQueryResponse,
+)
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data import BigtableDataClient as DataClientType
+
+
+def _has_resume_token(response: ExecuteQueryResponse) -> bool:
+ response_pb = response._pb
+ if response_pb.HasField("results"):
+ results = response_pb.results
+ return len(results.resume_token) > 0
+ return False
+
+
+class ExecuteQueryIterator:
+ def __init__(
+ self,
+ client: DataClientType,
+ instance_id: str,
+ app_profile_id: Optional[str],
+ request_body: Dict[str, Any],
+ prepare_metadata: Metadata,
+ attempt_timeout: float | None,
+ operation_timeout: float,
+ req_metadata: Sequence[Tuple[str, str]] = (),
+ retryable_excs: Sequence[type[Exception]] = (),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+ ) -> None:
+ """Collects responses from ExecuteQuery requests and parses them into QueryResultRows.
+
+ **Please Note** this is not meant to be constructed directly by applications. It should always
+ be created via the client. The constructor is subject to change.
+
+ It is **not thread-safe**. It should not be used by multiple threads.
+
+ Args:
+ client: bigtable client
+ instance_id: id of the instance on which the query is executed
+ request_body: dict representing the body of the ExecuteQueryRequest
+ attempt_timeout: the time budget for an individual network request, in seconds.
+ If it takes longer than this time to complete, the request will be cancelled with
+ a DeadlineExceeded exception, and a retry will be attempted.
+ operation_timeout: the time budget for the entire operation, in seconds.
+ Failed requests will be retried within the budget
+ req_metadata: metadata used while sending the gRPC request
+ retryable_excs: a list of errors that will be retried if encountered.
+ column_info: dict with mappings between column names and additional column information
+ for protobuf deserialization.
+ Raises:
+ None
+ :class:`ValueError ` as a safeguard if data is processed in an unexpected state
+ """
+ self._table_name = None
+ self._app_profile_id = app_profile_id
+ self._client = client
+ self._instance_id = instance_id
+ self._prepare_metadata: Metadata = prepare_metadata
+ self._final_metadata: Metadata | None = None
+ self._byte_cursor = _ByteCursor()
+ self._reader: _Reader[QueryResultRow] = _QueryResultRowReader()
+ self.has_received_token = False
+ self._result_generator = self._next_impl()
+ self._register_instance_task = None
+ self._fully_consumed = False
+ self._is_closed = False
+ self._request_body = request_body
+ self._attempt_timeout_gen = _attempt_timeout_generator(
+ attempt_timeout, operation_timeout
+ )
+ self._stream = CrossSync._Sync_Impl.retry_target_stream(
+ self._make_request_with_resume_token,
+ retries.if_exception_type(*retryable_excs),
+ retries.exponential_sleep_generator(0.01, 60, multiplier=2),
+ operation_timeout,
+ exception_factory=_retry_exception_factory,
+ )
+ self._req_metadata = req_metadata
+ self._column_info = column_info
+ try:
+ self._register_instance_task = CrossSync._Sync_Impl.create_task(
+ self._client._register_instance,
+ self._instance_id,
+ self.app_profile_id,
+ id(self),
+ sync_executor=self._client._executor,
+ )
+ except RuntimeError as e:
+ raise RuntimeError(
+ f"{self.__class__.__name__} must be created within an async event loop context."
+ ) from e
+
+ @property
+ def is_closed(self) -> bool:
+ """Returns True if the iterator is closed, False otherwise."""
+ return self._is_closed
+
+ @property
+ def app_profile_id(self) -> Optional[str]:
+ """Returns the app_profile_id of the iterator."""
+ return self._app_profile_id
+
+ @property
+ def table_name(self) -> Optional[str]:
+ """Returns the table_name of the iterator."""
+ return self._table_name
+
+ def _make_request_with_resume_token(self):
+ """perfoms the rpc call using the correct resume token."""
+ resume_token = self._byte_cursor.prepare_for_new_request()
+ request = ExecuteQueryRequestPB(
+ {**self._request_body, "resume_token": resume_token}
+ )
+ return self._client._gapic_client.execute_query(
+ request,
+ timeout=next(self._attempt_timeout_gen),
+ metadata=self._req_metadata,
+ retry=None,
+ )
+
+ def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]:
+ """Generator wrapping the response stream which parses the stream results
+ and returns full `QueryResultRow`s."""
+ try:
+ for response in self._stream:
+ try:
+ if self._final_metadata is None and _has_resume_token(response):
+ self._finalize_metadata()
+ batches_to_parse = self._byte_cursor.consume(response)
+ if not batches_to_parse:
+ continue
+ if not self.metadata:
+ raise ValueError(
+ "Error parsing response before finalizing metadata"
+ )
+ results = self._reader.consume(
+ batches_to_parse, self.metadata, self._column_info
+ )
+ if results is None:
+ continue
+ except ValueError as e:
+ raise InvalidExecuteQueryResponse(
+ "Invalid ExecuteQuery response received"
+ ) from e
+ for result in results:
+ yield result
+ if self._final_metadata is None:
+ self._finalize_metadata()
+ self._fully_consumed = True
+ finally:
+ self._close_internal()
+
+ def __next__(self) -> QueryResultRow:
+ """Yields QueryResultRows representing the results of the query.
+
+ :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state
+ """
+ if self._is_closed:
+ raise CrossSync._Sync_Impl.StopIteration
+ return self._result_generator.__next__()
+
+ def __iter__(self):
+ return self
+
+ def _finalize_metadata(self) -> None:
+ """Sets _final_metadata to the metadata of the latest prepare_response.
+ The iterator should call this after either the first resume token is received or the
+ stream completes succesfully with no responses.
+
+ This can't be set on init because the metadata will be able to change due to plan refresh.
+ Plan refresh isn't implemented yet, but we want functionality to stay the same when it is.
+
+ For example the following scenario for query "SELECT * FROM table":
+ - Make a request, table has one column family 'cf'
+ - Return an incomplete batch
+ - request fails with transient error
+ - Meanwhile the table has had a second column family added 'cf2'
+ - Retry the request, get an error indicating the `prepared_query` has expired
+ - Refresh the prepared_query and retry the request, the new prepared_query
+ contains both 'cf' & 'cf2'
+ - It sends a new incomplete batch and resets the old outdated batch
+ - It send the next chunk with a checksum and resume_token, closing the batch.
+ In this we need to use the updated schema from the refreshed prepare request."""
+ self._final_metadata = self._prepare_metadata
+
+ @property
+ def metadata(self) -> Metadata:
+ """Returns query metadata from the server or None if the iterator has been closed
+ or if metadata has not been set yet.
+
+ Metadata will not be set until the first row has been yielded or response with no rows
+ completes.
+
+ raises: :class:`EarlyMetadataCallError` when called before the first row has been returned
+ or the iterator has completed with no rows in the response."""
+ if not self._final_metadata:
+ raise EarlyMetadataCallError()
+ return self._final_metadata
+
+ def close(self) -> None:
+ """Cancel all background tasks. Should be called after all rows were processed.
+
+ Called automatically by iterator
+
+ :raises: :class:`ValueError ` if called in an invalid state
+ """
+ self._close_internal()
+
+ def _close_internal(self) -> None:
+ if self._is_closed:
+ return
+ if self._fully_consumed and (not self._byte_cursor.empty()):
+ raise ValueError("Unexpected buffered data at end of executeQuery reqest")
+ self._is_closed = True
+ if self._register_instance_task is not None:
+ self._register_instance_task.cancel()
+ self._client._remove_instance_registration(
+ self._instance_id, self.app_profile_id, id(self)
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py
new file mode 100644
index 000000000000..74b6cb836688
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py
@@ -0,0 +1,425 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module provides the SqlType class used for specifying types in
+ExecuteQuery and some utilities.
+
+The SqlTypes are used in Metadata returned by the ExecuteQuery operation as well
+as for specifying query parameter types explicitly.
+"""
+
+from collections import defaultdict
+import datetime
+from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
+
+from google.api_core.datetime_helpers import DatetimeWithNanoseconds
+from google.protobuf import timestamp_pb2 # type: ignore
+from google.type import date_pb2 # type: ignore
+
+from google.cloud.bigtable.data.execute_query.values import _NamedList
+from google.cloud.bigtable_v2 import ResultSetMetadata
+from google.cloud.bigtable_v2 import Type as PBType
+
+
+class SqlType:
+ """
+ Classes denoting types of values returned by Bigtable's ExecuteQuery operation.
+
+ Used in :class:`.Metadata`.
+ """
+
+ class Type:
+ expected_type: Optional[type] = None
+ value_pb_dict_field_name: Optional[str] = None
+ type_field_name: Optional[str] = None
+
+ @classmethod
+ def from_pb_type(cls, pb_type: Optional[PBType] = None):
+ return cls()
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ if not self.type_field_name:
+ raise NotImplementedError(
+ "Fill in expected_type and value_pb_dict_field_name"
+ )
+
+ return {self.type_field_name: {}}
+
+ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]:
+ if self.expected_type is None or self.value_pb_dict_field_name is None:
+ raise NotImplementedError(
+ "Fill in expected_type and value_pb_dict_field_name"
+ )
+
+ if value is None:
+ return {}
+
+ if not isinstance(value, self.expected_type):
+ raise ValueError(
+ f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}"
+ )
+
+ return {self.value_pb_dict_field_name: value}
+
+ def __eq__(self, other):
+ return isinstance(other, type(self))
+
+ def __str__(self) -> str:
+ return self.__class__.__name__
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+ class Struct(_NamedList[Type], Type):
+ """Struct SQL type."""
+
+ @classmethod
+ def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct":
+ if type_pb is None:
+ raise ValueError("missing required argument type_pb")
+ fields: List[Tuple[Optional[str], SqlType.Type]] = []
+ for field in type_pb.struct_type.fields:
+ fields.append((field.field_name, _pb_type_to_metadata_type(field.type)))
+ return cls(fields)
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Struct is not supported as a query parameter")
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Struct is not supported as a query parameter")
+
+ def __eq__(self, other: object):
+ # Cannot use super() here - we'd either have to:
+ # - call super() in these base classes, which would in turn call Object.__eq__
+ # to compare objects by identity and return a False, or
+ # - do not call super() in these base classes, which would result in calling only
+ # one of the __eq__ methods (a super() in the base class would be required to call the other one), or
+ # - call super() in only one of the base classes, but that would be error prone and changing
+ # the order of base classes would introduce unexpected behaviour.
+ # we also have to disable mypy because it doesn't see that SqlType.Struct == _NamedList[Type]
+ return SqlType.Type.__eq__(self, other) and _NamedList.__eq__(self, other) # type: ignore
+
+ def __str__(self):
+ return super(_NamedList, self).__str__()
+
+ class Array(Type):
+ """Array SQL type."""
+
+ def __init__(self, element_type: "SqlType.Type"):
+ if isinstance(element_type, SqlType.Array):
+ raise ValueError("Arrays of arrays are not supported.")
+ if isinstance(element_type, SqlType.Map):
+ raise ValueError("Arrays of Maps are not supported.")
+ self._element_type = element_type
+
+ @property
+ def element_type(self):
+ return self._element_type
+
+ @classmethod
+ def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Array":
+ if type_pb is None:
+ raise ValueError("missing required argument type_pb")
+ return cls(_pb_type_to_metadata_type(type_pb.array_type.element_type))
+
+ def _to_value_pb_dict(self, value: Any):
+ if value is None:
+ return {}
+
+ return {
+ "array_value": {
+ "values": [
+ self.element_type._to_value_pb_dict(entry) for entry in value
+ ]
+ }
+ }
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ return {
+ "array_type": {"element_type": self.element_type._to_type_pb_dict()}
+ }
+
+ def __eq__(self, other):
+ return super().__eq__(other) and self.element_type == other.element_type
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}<{str(self.element_type)}>"
+
+ class Map(Type):
+ """Map SQL type."""
+
+ def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"):
+ self._key_type = key_type
+ self._value_type = value_type
+
+ @property
+ def key_type(self):
+ return self._key_type
+
+ @property
+ def value_type(self):
+ return self._value_type
+
+ @classmethod
+ def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Map":
+ if type_pb is None:
+ raise ValueError("missing required argument type_pb")
+ return cls(
+ _pb_type_to_metadata_type(type_pb.map_type.key_type),
+ _pb_type_to_metadata_type(type_pb.map_type.value_type),
+ )
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Map is not supported as a query parameter")
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Map is not supported as a query parameter")
+
+ def __eq__(self, other):
+ return (
+ super().__eq__(other)
+ and self.key_type == other.key_type
+ and self.value_type == other.value_type
+ )
+
+ def __str__(self) -> str:
+ return (
+ f"{self.__class__.__name__}<"
+ f"{str(self._key_type)},{str(self._value_type)}>"
+ )
+
+ class Bytes(Type):
+ """Bytes SQL type."""
+
+ expected_type = bytes
+ value_pb_dict_field_name = "bytes_value"
+ type_field_name = "bytes_type"
+
+ class String(Type):
+ """String SQL type."""
+
+ expected_type = str
+ value_pb_dict_field_name = "string_value"
+ type_field_name = "string_type"
+
+ class Int64(Type):
+ """Int64 SQL type."""
+
+ expected_type = int
+ value_pb_dict_field_name = "int_value"
+ type_field_name = "int64_type"
+
+ class Float64(Type):
+ """Float64 SQL type."""
+
+ expected_type = float
+ value_pb_dict_field_name = "float_value"
+ type_field_name = "float64_type"
+
+ class Float32(Type):
+ """Float32 SQL type."""
+
+ expected_type = float
+ value_pb_dict_field_name = "float_value"
+ type_field_name = "float32_type"
+
+ class Bool(Type):
+ """Bool SQL type."""
+
+ expected_type = bool
+ value_pb_dict_field_name = "bool_value"
+ type_field_name = "bool_type"
+
+ class Timestamp(Type):
+ """
+ Timestamp SQL type.
+
+ Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does
+ not currently support nanoseconds precision. We support this for potential
+ compatibility in the future. Nanoseconds are currently ignored.
+ """
+
+ type_field_name = "timestamp_type"
+ expected_types = (
+ datetime.datetime,
+ DatetimeWithNanoseconds,
+ )
+
+ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]:
+ if value is None:
+ return {}
+
+ if not isinstance(value, self.expected_types):
+ raise ValueError(
+ f"Expected one of {', '.join((_type.__name__ for _type in self.expected_types))}"
+ )
+
+ if isinstance(value, DatetimeWithNanoseconds):
+ return {"timestamp_value": value.timestamp_pb()}
+ else: # value must be an instance of datetime.datetime
+ ts = timestamp_pb2.Timestamp()
+ ts.FromDatetime(value)
+ return {"timestamp_value": ts}
+
+ class Date(Type):
+ """Date SQL type."""
+
+ type_field_name = "date_type"
+ expected_type = datetime.date
+
+ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]:
+ if value is None:
+ return {}
+
+ if not isinstance(value, self.expected_type):
+ raise ValueError(
+ f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}"
+ )
+
+ return {
+ "date_value": date_pb2.Date(
+ year=value.year,
+ month=value.month,
+ day=value.day,
+ )
+ }
+
+ class Proto(Type):
+ """Proto SQL type."""
+
+ type_field_name = "proto_type"
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Proto is not supported as a query parameter")
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Proto is not supported as a query parameter")
+
+ class Enum(Type):
+ """Enum SQL type."""
+
+ type_field_name = "enum_type"
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Enum is not supported as a query parameter")
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Enum is not supported as a query parameter")
+
+
+class Metadata:
+ """
+ Metadata class for the ExecuteQuery operation.
+
+ Args:
+ columns (List[Tuple[Optional[str], SqlType.Type]]): List of column
+ metadata tuples. Each tuple contains the column name and the column
+ type.
+ """
+
+ class Column:
+ def __init__(self, column_name: Optional[str], column_type: SqlType.Type):
+ self._column_name = column_name
+ self._column_type = column_type
+
+ @property
+ def column_name(self) -> Optional[str]:
+ return self._column_name
+
+ @property
+ def column_type(self) -> SqlType.Type:
+ return self._column_type
+
+ @property
+ def columns(self) -> List[Column]:
+ return self._columns
+
+ def __init__(
+ self, columns: Optional[List[Tuple[Optional[str], SqlType.Type]]] = None
+ ):
+ self._columns: List[Metadata.Column] = []
+ self._column_indexes: Dict[str, List[int]] = defaultdict(list)
+ self._duplicate_names: Set[str] = set()
+
+ if columns:
+ for column_name, column_type in columns:
+ if column_name is not None:
+ if column_name in self._column_indexes:
+ self._duplicate_names.add(column_name)
+ self._column_indexes[column_name].append(len(self._columns))
+ self._columns.append(Metadata.Column(column_name, column_type))
+
+ def __getitem__(self, index_or_name: Union[str, int]) -> Column:
+ if isinstance(index_or_name, str):
+ if index_or_name in self._duplicate_names:
+ raise KeyError(
+ f"Ambigious column name: '{index_or_name}', use index instead."
+ f" Field present on indexes {', '.join(map(str, self._column_indexes[index_or_name]))}."
+ )
+ if index_or_name not in self._column_indexes:
+ raise KeyError(f"No such column: {index_or_name}")
+ index = self._column_indexes[index_or_name][0]
+ else:
+ index = index_or_name
+ return self._columns[index]
+
+ def __len__(self):
+ return len(self._columns)
+
+ def __str__(self) -> str:
+ columns_str = ", ".join([str(column) for column in self._columns])
+ return f"{self.__class__.__name__}([{columns_str}])"
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+
+def _pb_metadata_to_metadata_types(
+ metadata_pb: ResultSetMetadata,
+) -> Metadata:
+ if "proto_schema" in metadata_pb:
+ fields: List[Tuple[Optional[str], SqlType.Type]] = []
+ if not metadata_pb.proto_schema.columns:
+ raise ValueError("Invalid empty ResultSetMetadata received.")
+ for column_metadata in metadata_pb.proto_schema.columns:
+ fields.append(
+ (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type))
+ )
+ return Metadata(fields)
+ raise ValueError("Invalid ResultSetMetadata object received.")
+
+
+_PROTO_TYPE_TO_METADATA_TYPE_FACTORY: Dict[str, Type[SqlType.Type]] = {
+ "bytes_type": SqlType.Bytes,
+ "string_type": SqlType.String,
+ "int64_type": SqlType.Int64,
+ "float32_type": SqlType.Float32,
+ "float64_type": SqlType.Float64,
+ "bool_type": SqlType.Bool,
+ "timestamp_type": SqlType.Timestamp,
+ "date_type": SqlType.Date,
+ "proto_type": SqlType.Proto,
+ "enum_type": SqlType.Enum,
+ "struct_type": SqlType.Struct,
+ "array_type": SqlType.Array,
+ "map_type": SqlType.Map,
+}
+
+
+def _pb_type_to_metadata_type(type_pb: PBType) -> SqlType.Type:
+ kind = PBType.pb(type_pb).WhichOneof("kind")
+ if kind in _PROTO_TYPE_TO_METADATA_TYPE_FACTORY:
+ return _PROTO_TYPE_TO_METADATA_TYPE_FACTORY[kind].from_pb_type(type_pb)
+ raise ValueError(f"Unrecognized response data type: {type_pb}")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py
new file mode 100644
index 000000000000..80a0bff6f7b9
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py
@@ -0,0 +1,123 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import defaultdict
+from typing import (
+ Optional,
+ List,
+ Dict,
+ Set,
+ Union,
+ TypeVar,
+ Generic,
+ Tuple,
+ Mapping,
+)
+from google.type import date_pb2 # type: ignore
+from google.api_core.datetime_helpers import DatetimeWithNanoseconds
+
+T = TypeVar("T")
+
+
+class _NamedList(Generic[T]):
+ """
+ A class designed to store a list of elements, which can be accessed by
+ name or index.
+ This class is different from namedtuple, because namedtuple has some
+ restrictions on names of fields and we do not want to have them.
+ """
+
+ _str_cls_name = "_NamedList"
+
+ def __init__(self, fields: Optional[List[Tuple[Optional[str], T]]] = None):
+ self._fields: List[Tuple[Optional[str], T]] = []
+ self._field_indexes: Dict[str, List[int]] = defaultdict(list)
+ self._duplicate_names: Set[str] = set()
+
+ if fields:
+ for field_name, field_type in fields:
+ self.add_field(field_name, field_type)
+
+ def add_field(self, name: Optional[str], value: T):
+ if name:
+ if name in self._field_indexes:
+ self._duplicate_names.add(name)
+ self._field_indexes[name].append(len(self._fields))
+ self._fields.append((name, value))
+
+ @property
+ def fields(self):
+ return self._fields
+
+ def __getitem__(self, index_or_name: Union[str, int]):
+ if isinstance(index_or_name, str):
+ if index_or_name in self._duplicate_names:
+ raise KeyError(
+ f"Ambigious field name: '{index_or_name}', use index instead."
+ f" Field present on indexes {', '.join(map(str, self._field_indexes[index_or_name]))}."
+ )
+ if index_or_name not in self._field_indexes:
+ raise KeyError(f"No such field: {index_or_name}")
+ index = self._field_indexes[index_or_name][0]
+ else:
+ index = index_or_name
+ return self._fields[index][1]
+
+ def __len__(self):
+ return len(self._fields)
+
+ def __eq__(self, other):
+ if not isinstance(other, _NamedList):
+ return False
+
+ return (
+ self._fields == other._fields
+ and self._field_indexes == other._field_indexes
+ )
+
+ def __str__(self) -> str:
+ fields_str = ", ".join([str(field) for field in self._fields])
+ return f"{self.__class__.__name__}([{fields_str}])"
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+
+ExecuteQueryValueType = Union[
+ int,
+ float,
+ bool,
+ bytes,
+ str,
+ # Note that Bigtable SQL does not currently support nanosecond precision,
+ # only microseconds. We use this for compatibility with potential future
+ # support
+ DatetimeWithNanoseconds,
+ date_pb2.Date,
+ "Struct",
+ List["ExecuteQueryValueType"],
+ Mapping[Union[str, int, bytes], "ExecuteQueryValueType"],
+]
+
+
+class QueryResultRow(_NamedList[ExecuteQueryValueType]):
+ """
+ Represents a single row of the result
+ """
+
+
+class Struct(_NamedList[ExecuteQueryValueType]):
+ """
+ Represents a struct value in the result
+ """
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py
new file mode 100644
index 000000000000..f19b1e49e862
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py
@@ -0,0 +1,457 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+from typing import Any
+import time
+from dataclasses import dataclass
+from abc import ABC, abstractmethod
+from sys import getsizeof
+
+import google.cloud.bigtable_v2.types.bigtable as types_pb
+import google.cloud.bigtable_v2.types.data as data_pb
+
+from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE
+
+
+# special value for SetCell mutation timestamps. If set, server will assign a timestamp
+_SERVER_SIDE_TIMESTAMP = -1
+
+# mutation entries above this should be rejected
+_MUTATE_ROWS_REQUEST_MUTATION_LIMIT = 100_000
+
+
+class Mutation(ABC):
+ """
+ Abstract base class for mutations.
+
+ This class defines the interface for different types of mutations that can be
+ applied to Bigtable rows.
+ """
+
+ @abstractmethod
+ def _to_dict(self) -> dict[str, Any]:
+ """
+ Convert the mutation to a dictionary representation.
+
+ Returns:
+ dict[str, Any]: A dictionary representation of the mutation.
+ """
+ raise NotImplementedError
+
+ def _to_pb(self) -> data_pb.Mutation:
+ """
+ Convert the mutation to a protobuf representation.
+
+ Returns:
+ Mutation: A protobuf representation of the mutation.
+ """
+ return data_pb.Mutation(**self._to_dict())
+
+ def is_idempotent(self) -> bool:
+ """
+ Check if the mutation is idempotent
+
+ Idempotent mutations can be safely retried on failure.
+
+ Returns:
+ bool: True if the mutation is idempotent, False otherwise.
+ """
+ return True
+
+ def __str__(self) -> str:
+ """
+ Return a string representation of the mutation.
+
+ Returns:
+ str: A string representation of the mutation.
+ """
+ return str(self._to_dict())
+
+ def size(self) -> int:
+ """
+ Get the size of the mutation in bytes
+
+ Returns:
+ int: The size of the mutation in bytes.
+ """
+ return getsizeof(self._to_dict())
+
+ @classmethod
+ def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation:
+ """
+ Create a `Mutation` instance from a dictionary representation.
+
+ Args:
+ input_dict: A dictionary representation of the mutation.
+ Returns:
+ Mutation: A Mutation instance created from the dictionary.
+ Raises:
+ ValueError: If the input dictionary is invalid or does not represent a valid mutation type.
+ """
+ instance: Mutation | None = None
+ try:
+ if "set_cell" in input_dict:
+ details = input_dict["set_cell"]
+ instance = SetCell(
+ details["family_name"],
+ details["column_qualifier"],
+ details["value"],
+ details["timestamp_micros"],
+ )
+ elif "delete_from_column" in input_dict:
+ details = input_dict["delete_from_column"]
+ time_range = details.get("time_range", {})
+ start = time_range.get("start_timestamp_micros", None)
+ end = time_range.get("end_timestamp_micros", None)
+ instance = DeleteRangeFromColumn(
+ details["family_name"], details["column_qualifier"], start, end
+ )
+ elif "delete_from_family" in input_dict:
+ details = input_dict["delete_from_family"]
+ instance = DeleteAllFromFamily(details["family_name"])
+ elif "delete_from_row" in input_dict:
+ instance = DeleteAllFromRow()
+ elif "add_to_cell" in input_dict:
+ details = input_dict["add_to_cell"]
+ instance = AddToCell(
+ details["family_name"],
+ details["column_qualifier"]["raw_value"],
+ details["input"]["int_value"],
+ details["timestamp"]["raw_timestamp_micros"],
+ )
+ except KeyError as e:
+ raise ValueError("Invalid mutation dictionary") from e
+ if instance is None:
+ raise ValueError("No valid mutation found")
+ if not issubclass(instance.__class__, cls):
+ raise ValueError("Mutation type mismatch")
+ return instance
+
+
+class SetCell(Mutation):
+ """
+ Mutation to set the value of a cell.
+
+ Args:
+ family: The name of the column family to which the new cell belongs.
+ qualifier: The column qualifier of the new cell.
+ new_value: The value of the new cell.
+ timestamp_micros: The timestamp of the new cell. If `None`,
+ the current timestamp will be used. Timestamps will be sent with
+ millisecond precision. Extra precision will be truncated. If -1, the
+ server will assign a timestamp. Note that `SetCell` mutations with
+ server-side timestamps are non-idempotent operations and will not be retried.
+
+ Raises:
+ TypeError: If `qualifier` is not `bytes` or `str`.
+ TypeError: If `new_value` is not `bytes`, `str`, or `int`.
+ ValueError: If `timestamp_micros` is less than `_SERVER_SIDE_TIMESTAMP`.
+ """
+
+ def __init__(
+ self,
+ family: str,
+ qualifier: bytes | str,
+ new_value: bytes | str | int,
+ timestamp_micros: int | None = None,
+ ):
+ qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier
+ if not isinstance(qualifier, bytes):
+ raise TypeError("qualifier must be bytes or str")
+ if isinstance(new_value, str):
+ new_value = new_value.encode()
+ elif isinstance(new_value, int):
+ if abs(new_value) > _MAX_INCREMENT_VALUE:
+ raise ValueError(
+ "int values must be between -2**63 and 2**63 (64-bit signed int)"
+ )
+ new_value = new_value.to_bytes(8, "big", signed=True)
+ if not isinstance(new_value, bytes):
+ raise TypeError("new_value must be bytes, str, or int")
+ if timestamp_micros is None:
+ # use current timestamp, with milisecond precision
+ timestamp_micros = time.time_ns() // 1000
+ timestamp_micros = timestamp_micros - (timestamp_micros % 1000)
+ if timestamp_micros < _SERVER_SIDE_TIMESTAMP:
+ raise ValueError(
+ f"timestamp_micros must be positive (or {_SERVER_SIDE_TIMESTAMP} for server-side timestamp)"
+ )
+ self.family = family
+ self.qualifier = qualifier
+ self.new_value = new_value
+ self.timestamp_micros = timestamp_micros
+
+ def _to_dict(self) -> dict[str, Any]:
+ return {
+ "set_cell": {
+ "family_name": self.family,
+ "column_qualifier": self.qualifier,
+ "timestamp_micros": self.timestamp_micros,
+ "value": self.new_value,
+ }
+ }
+
+ def is_idempotent(self) -> bool:
+ return self.timestamp_micros != _SERVER_SIDE_TIMESTAMP
+
+
+@dataclass
+class DeleteRangeFromColumn(Mutation):
+ """
+ Mutation to delete a range of cells from a column.
+
+ Args:
+ family: The name of the column family.
+ qualifier: The column qualifier.
+ start_timestamp_micros: The start timestamp of the range to
+ delete. `None` represents 0. Defaults to `None`.
+ end_timestamp_micros: The end timestamp of the range to
+ delete. `None` represents infinity. Defaults to `None`.
+ Raises:
+ ValueError: If `start_timestamp_micros` is greater than `end_timestamp_micros`.
+ """
+
+ family: str
+ qualifier: bytes
+ # None represents 0
+ start_timestamp_micros: int | None = None
+ # None represents infinity
+ end_timestamp_micros: int | None = None
+
+ def __post_init__(self):
+ if (
+ self.start_timestamp_micros is not None
+ and self.end_timestamp_micros is not None
+ and self.start_timestamp_micros > self.end_timestamp_micros
+ ):
+ raise ValueError("start_timestamp_micros must be <= end_timestamp_micros")
+
+ def _to_dict(self) -> dict[str, Any]:
+ timestamp_range = {}
+ if self.start_timestamp_micros is not None:
+ timestamp_range["start_timestamp_micros"] = self.start_timestamp_micros
+ if self.end_timestamp_micros is not None:
+ timestamp_range["end_timestamp_micros"] = self.end_timestamp_micros
+ return {
+ "delete_from_column": {
+ "family_name": self.family,
+ "column_qualifier": self.qualifier,
+ "time_range": timestamp_range,
+ }
+ }
+
+
+@dataclass
+class DeleteAllFromFamily(Mutation):
+ """
+ Mutation to delete all cells from a column family.
+
+ Args:
+ family_to_delete: The name of the column family to delete.
+ """
+
+ family_to_delete: str
+
+ def _to_dict(self) -> dict[str, Any]:
+ return {
+ "delete_from_family": {
+ "family_name": self.family_to_delete,
+ }
+ }
+
+
+@dataclass
+class DeleteAllFromRow(Mutation):
+ """
+ Mutation to delete all cells from a row.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ return {
+ "delete_from_row": {},
+ }
+
+
+@dataclass
+class AddToCell(Mutation):
+ """
+ Adds an int64 value to an aggregate cell. The column family must be an
+ aggregate family and have an "int64" input type or this mutation will be
+ rejected.
+
+ Note: The timestamp values are in microseconds but must match the
+ granularity of the table (defaults to `MILLIS`). Therefore, the given value
+ must be a multiple of 1000 (millisecond granularity). For example:
+ `1571902339435000`.
+
+ Args:
+ family: The name of the column family to which the cell belongs.
+ qualifier: The column qualifier of the cell.
+ value: The value to be accumulated into the cell.
+ timestamp_micros: The timestamp of the cell. Must be provided for
+ cell aggregation to work correctly.
+
+
+ Raises:
+ TypeError: If `qualifier` is not `bytes` or `str`.
+ TypeError: If `value` is not `int`.
+ TypeError: If `timestamp_micros` is not `int`.
+ ValueError: If `value` is out of bounds for a 64-bit signed int.
+ ValueError: If `timestamp_micros` is less than 0.
+ """
+
+ def __init__(
+ self,
+ family: str,
+ qualifier: bytes | str,
+ value: int,
+ timestamp_micros: int,
+ ):
+ qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier
+ if not isinstance(qualifier, bytes):
+ raise TypeError("qualifier must be bytes or str")
+ if not isinstance(value, int):
+ raise TypeError("value must be int")
+ if not isinstance(timestamp_micros, int):
+ raise TypeError("timestamp_micros must be int")
+ if abs(value) > _MAX_INCREMENT_VALUE:
+ raise ValueError(
+ "int values must be between -2**63 and 2**63 (64-bit signed int)"
+ )
+
+ if timestamp_micros < 0:
+ raise ValueError("timestamp must be non-negative")
+
+ self.family = family
+ self.qualifier = qualifier
+ self.value = value
+ self.timestamp = timestamp_micros
+
+ def _to_dict(self) -> dict[str, Any]:
+ return {
+ "add_to_cell": {
+ "family_name": self.family,
+ "column_qualifier": {"raw_value": self.qualifier},
+ "timestamp": {"raw_timestamp_micros": self.timestamp},
+ "input": {"int_value": self.value},
+ }
+ }
+
+ def is_idempotent(self) -> bool:
+ return False
+
+
+class RowMutationEntry:
+ """
+ A single entry in a `MutateRows` request.
+
+ This class represents a set of mutations to apply to a specific row in a
+ Bigtable table.
+
+ Args:
+ row_key: The key of the row to mutate.
+ mutations: The mutation or list of mutations to apply
+ to the row.
+
+ Raises:
+ ValueError: If `mutations` is empty or contains more than
+ `_MUTATE_ROWS_REQUEST_MUTATION_LIMIT` mutations.
+ """
+
+ def __init__(self, row_key: bytes | str, mutations: Mutation | list[Mutation]):
+ if isinstance(row_key, str):
+ row_key = row_key.encode("utf-8")
+ if isinstance(mutations, Mutation):
+ mutations = [mutations]
+ if len(mutations) == 0:
+ raise ValueError("mutations must not be empty")
+ elif len(mutations) > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT:
+ raise ValueError(
+ f"entries must have <= {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations"
+ )
+ self.row_key = row_key
+ self.mutations = tuple(mutations)
+
+ def _to_dict(self) -> dict[str, Any]:
+ """
+ Convert the mutation entry to a dictionary representation.
+
+ Returns:
+ dict[str, Any]: A dictionary representation of the mutation entry
+ """
+ return {
+ "row_key": self.row_key,
+ "mutations": [mutation._to_dict() for mutation in self.mutations],
+ }
+
+ def _to_pb(self) -> types_pb.MutateRowsRequest.Entry:
+ """
+ Convert the mutation entry to a protobuf representation.
+
+ Returns:
+ MutateRowsRequest.Entry: A protobuf representation of the mutation entry.
+ """
+ return types_pb.MutateRowsRequest.Entry(
+ row_key=self.row_key,
+ mutations=[mutation._to_pb() for mutation in self.mutations],
+ )
+
+ def is_idempotent(self) -> bool:
+ """
+ Check if all mutations in the entry are idempotent.
+
+ Returns:
+ bool: True if all mutations in the entry are idempotent, False otherwise.
+ """
+ return all(mutation.is_idempotent() for mutation in self.mutations)
+
+ def size(self) -> int:
+ """
+ Get the size of the mutation entry in bytes.
+
+ Returns:
+ int: The size of the mutation entry in bytes.
+ """
+ return getsizeof(self._to_dict())
+
+ @classmethod
+ def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry:
+ """
+ Create a `RowMutationEntry` instance from a dictionary representation.
+
+ Args:
+ input_dict: A dictionary representation of the mutation entry.
+
+ Returns:
+ RowMutationEntry: A RowMutationEntry instance created from the dictionary.
+ """
+ return RowMutationEntry(
+ row_key=input_dict["row_key"],
+ mutations=[
+ Mutation._from_dict(mutation) for mutation in input_dict["mutations"]
+ ],
+ )
+
+
+@dataclass
+class _EntryWithProto:
+ """
+ A dataclass to hold a RowMutationEntry and its corresponding proto representation.
+
+ Used in _MutateRowsOperation to avoid repeated conversion of RowMutationEntry to proto.
+ """
+
+ entry: RowMutationEntry
+ proto: types_pb.MutateRowsRequest.Entry
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py
new file mode 100644
index 000000000000..e4446f755c00
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py
@@ -0,0 +1,112 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+import abc
+
+import google.cloud.bigtable_v2.types.data as data_pb
+
+# value must fit in 64-bit signed integer
+_MAX_INCREMENT_VALUE = (1 << 63) - 1
+
+
+class ReadModifyWriteRule(abc.ABC):
+ """
+ Abstract base class for read-modify-write rules.
+ """
+
+ def __init__(self, family: str, qualifier: bytes | str):
+ qualifier = (
+ qualifier if isinstance(qualifier, bytes) else qualifier.encode("utf-8")
+ )
+ self.family = family
+ self.qualifier = qualifier
+
+ @abc.abstractmethod
+ def _to_dict(self) -> dict[str, str | bytes | int]:
+ raise NotImplementedError
+
+ def _to_pb(self) -> data_pb.ReadModifyWriteRule:
+ return data_pb.ReadModifyWriteRule(**self._to_dict())
+
+
+class IncrementRule(ReadModifyWriteRule):
+ """
+ Rule to increment a cell's value.
+
+ Args:
+ family:
+ The family name of the cell to increment.
+ qualifier:
+ The qualifier of the cell to increment.
+ increment_amount:
+ The amount to increment the cell's value. Must be between -2**63 and 2**63 (64-bit signed int).
+ Raises:
+ TypeError:
+ If increment_amount is not an integer.
+ ValueError:
+ If increment_amount is not between -2**63 and 2**63 (64-bit signed int).
+ """
+
+ def __init__(self, family: str, qualifier: bytes | str, increment_amount: int = 1):
+ if not isinstance(increment_amount, int):
+ raise TypeError("increment_amount must be an integer")
+ if abs(increment_amount) > _MAX_INCREMENT_VALUE:
+ raise ValueError(
+ "increment_amount must be between -2**63 and 2**63 (64-bit signed int)"
+ )
+ super().__init__(family, qualifier)
+ self.increment_amount = increment_amount
+
+ def _to_dict(self) -> dict[str, str | bytes | int]:
+ return {
+ "family_name": self.family,
+ "column_qualifier": self.qualifier,
+ "increment_amount": self.increment_amount,
+ }
+
+
+class AppendValueRule(ReadModifyWriteRule):
+ """
+ Rule to append a value to a cell's value.
+
+ Args:
+ family:
+ The family name of the cell to append to.
+ qualifier:
+ The qualifier of the cell to append to.
+ append_value:
+ The value to append to the cell's value.
+ Raises:
+ TypeError: If append_value is not bytes or str.
+ """
+
+ def __init__(self, family: str, qualifier: bytes | str, append_value: bytes | str):
+ append_value = (
+ append_value.encode("utf-8")
+ if isinstance(append_value, str)
+ else append_value
+ )
+ if not isinstance(append_value, bytes):
+ raise TypeError("append_value must be bytes or str")
+ super().__init__(family, qualifier)
+ self.append_value = append_value
+
+ def _to_dict(self) -> dict[str, str | bytes | int]:
+ return {
+ "family_name": self.family,
+ "column_qualifier": self.qualifier,
+ "append_value": self.append_value,
+ }
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py
new file mode 100644
index 000000000000..7652bfbb9af7
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py
@@ -0,0 +1,536 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+from typing import TYPE_CHECKING, Any
+from bisect import bisect_left
+from bisect import bisect_right
+from collections import defaultdict
+from google.cloud.bigtable.data.row_filters import RowFilter
+
+from google.cloud.bigtable_v2.types import RowRange as RowRangePB
+from google.cloud.bigtable_v2.types import RowSet as RowSetPB
+from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data import RowKeySamples
+ from google.cloud.bigtable.data import ShardedQuery
+
+
+class RowRange:
+ """
+ Represents a range of keys in a ReadRowsQuery
+
+ Args:
+ start_key: The start key of the range. If empty, the range is unbounded on the left.
+ end_key: The end key of the range. If empty, the range is unbounded on the right.
+ start_is_inclusive: Whether the start key is inclusive. If None, the start key is
+ inclusive.
+ end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive.
+ Raises:
+ ValueError: if start_key is greater than end_key, or start_is_inclusive
+ ValueError: if end_is_inclusive is set when the corresponding key is None
+ ValueError: if start_key or end_key is not a string or bytes.
+ """
+
+ __slots__ = ("_pb",)
+
+ def __init__(
+ self,
+ start_key: str | bytes | None = None,
+ end_key: str | bytes | None = None,
+ start_is_inclusive: bool | None = None,
+ end_is_inclusive: bool | None = None,
+ ):
+ # convert empty key inputs to None for consistency
+ start_key = None if not start_key else start_key
+ end_key = None if not end_key else end_key
+ # check for invalid combinations of arguments
+ if start_is_inclusive is None:
+ start_is_inclusive = True
+
+ if end_is_inclusive is None:
+ end_is_inclusive = False
+ # ensure that start_key and end_key are bytes
+ if isinstance(start_key, str):
+ start_key = start_key.encode()
+ elif start_key is not None and not isinstance(start_key, bytes):
+ raise ValueError("start_key must be a string or bytes")
+ if isinstance(end_key, str):
+ end_key = end_key.encode()
+ elif end_key is not None and not isinstance(end_key, bytes):
+ raise ValueError("end_key must be a string or bytes")
+ # ensure that start_key is less than or equal to end_key
+ if start_key is not None and end_key is not None and start_key > end_key:
+ raise ValueError("start_key must be less than or equal to end_key")
+
+ init_dict = {}
+ if start_key is not None:
+ if start_is_inclusive:
+ init_dict["start_key_closed"] = start_key
+ else:
+ init_dict["start_key_open"] = start_key
+ if end_key is not None:
+ if end_is_inclusive:
+ init_dict["end_key_closed"] = end_key
+ else:
+ init_dict["end_key_open"] = end_key
+ self._pb = RowRangePB(**init_dict)
+
+ @property
+ def start_key(self) -> bytes | None:
+ """
+ Returns the start key of the range. If None, the range is unbounded on the left.
+ """
+ return self._pb.start_key_closed or self._pb.start_key_open or None
+
+ @property
+ def end_key(self) -> bytes | None:
+ """
+ Returns the end key of the range. If None, the range is unbounded on the right.
+
+ Returns:
+ bytes | None: The end key of the range, or None if the range is unbounded on the right.
+ """
+ return self._pb.end_key_closed or self._pb.end_key_open or None
+
+ @property
+ def start_is_inclusive(self) -> bool:
+ """
+ Indicates if the range is inclusive of the start key.
+
+ If the range is unbounded on the left, this will return True.
+
+ Returns:
+ bool: Whether the range is inclusive of the start key.
+ """
+ return not bool(self._pb.start_key_open)
+
+ @property
+ def end_is_inclusive(self) -> bool:
+ """
+ Indicates if the range is inclusive of the end key.
+
+ If the range is unbounded on the right, this will return True.
+
+ Returns:
+ bool: Whether the range is inclusive of the end key.
+ """
+ return not bool(self._pb.end_key_open)
+
+ def _to_pb(self) -> RowRangePB:
+ """
+ Converts this object to a protobuf
+
+ Returns:
+ RowRangePB: The protobuf representation of this object
+ """
+ return self._pb
+
+ @classmethod
+ def _from_pb(cls, data: RowRangePB) -> RowRange:
+ """
+ Creates a RowRange from a protobuf
+
+ Args:
+ data (RowRangePB): The protobuf to convert
+ Returns:
+ RowRange: The converted RowRange
+ """
+ instance = cls()
+ instance._pb = data
+ return instance
+
+ @classmethod
+ def _from_dict(cls, data: dict[str, bytes | str]) -> RowRange:
+ """
+ Creates a RowRange from a protobuf
+
+ Args:
+ data (dict[str, bytes | str]): The dictionary to convert
+ Returns:
+ RowRange: The converted RowRange
+ """
+ formatted_data = {
+ k: v.encode() if isinstance(v, str) else v for k, v in data.items()
+ }
+ instance = cls()
+ instance._pb = RowRangePB(**formatted_data)
+ return instance
+
+ def __bool__(self) -> bool:
+ """
+ Empty RowRanges (representing a full table scan) are falsy, because
+ they can be substituted with None. Non-empty RowRanges are truthy.
+
+ Returns:
+ bool: True if the RowRange is not empty, False otherwise
+ """
+ return bool(
+ self._pb.start_key_closed
+ or self._pb.start_key_open
+ or self._pb.end_key_closed
+ or self._pb.end_key_open
+ )
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, RowRange):
+ return NotImplemented
+ return self._pb == other._pb
+
+ def __str__(self) -> str:
+ """
+ Represent range as a string, e.g. "[b'a', b'z)"
+
+ Unbounded start or end keys are represented as "-inf" or "+inf"
+
+ Returns:
+ str: The string representation of the range
+ """
+ left = "[" if self.start_is_inclusive else "("
+ right = "]" if self.end_is_inclusive else ")"
+ start = repr(self.start_key) if self.start_key is not None else "-inf"
+ end = repr(self.end_key) if self.end_key is not None else "+inf"
+ return f"{left}{start}, {end}{right}"
+
+ def __repr__(self) -> str:
+ args_list = []
+ args_list.append(f"start_key={self.start_key!r}")
+ args_list.append(f"end_key={self.end_key!r}")
+ if self.start_is_inclusive is False:
+ # only show start_is_inclusive if it is different from the default
+ args_list.append(f"start_is_inclusive={self.start_is_inclusive}")
+ if self.end_is_inclusive is True and self.end_key is not None:
+ # only show end_is_inclusive if it is different from the default
+ args_list.append(f"end_is_inclusive={self.end_is_inclusive}")
+ return f"RowRange({', '.join(args_list)})"
+
+
+class ReadRowsQuery:
+ """
+ Class to encapsulate details of a read row request
+
+ Args:
+ row_keys: row keys to include in the query
+ a query can contain multiple keys, but ranges should be preferred
+ row_ranges: ranges of rows to include in the query
+ limit: the maximum number of rows to return. None or 0 means no limit
+ default: None (no limit)
+ row_filter: a RowFilter to apply to the query
+ """
+
+ slots = ("_limit", "_filter", "_row_set")
+
+ def __init__(
+ self,
+ row_keys: list[str | bytes] | str | bytes | None = None,
+ row_ranges: list[RowRange] | RowRange | None = None,
+ limit: int | None = None,
+ row_filter: RowFilter | None = None,
+ ):
+ if row_keys is None:
+ row_keys = []
+ if row_ranges is None:
+ row_ranges = []
+ if not isinstance(row_ranges, list):
+ row_ranges = [row_ranges]
+ if not isinstance(row_keys, list):
+ row_keys = [row_keys]
+ row_keys = [key.encode() if isinstance(key, str) else key for key in row_keys]
+ self._row_set = RowSetPB(
+ row_keys=row_keys, row_ranges=[r._pb for r in row_ranges]
+ )
+ self.limit = limit or None
+ self.filter = row_filter
+
+ @property
+ def row_keys(self) -> list[bytes]:
+ """
+ Return the row keys in this query
+
+ Returns:
+ list[bytes]: the row keys in this query
+ """
+ return list(self._row_set.row_keys)
+
+ @property
+ def row_ranges(self) -> list[RowRange]:
+ """
+ Return the row ranges in this query
+
+ Returns:
+ list[RowRange]: the row ranges in this query
+ """
+ return [RowRange._from_pb(r) for r in self._row_set.row_ranges]
+
+ @property
+ def limit(self) -> int | None:
+ """
+ Return the maximum number of rows to return by this query
+
+ None or 0 means no limit
+
+ Returns:
+ int | None: the maximum number of rows to return by this query
+ """
+ return self._limit or None
+
+ @limit.setter
+ def limit(self, new_limit: int | None):
+ """
+ Set the maximum number of rows to return by this query.
+
+ None or 0 means no limit
+
+ Args:
+ new_limit: the new limit to apply to this query
+ Raises:
+ ValueError: if new_limit is < 0
+ """
+ if new_limit is not None and new_limit < 0:
+ raise ValueError("limit must be >= 0")
+ self._limit = new_limit
+
+ @property
+ def filter(self) -> RowFilter | None:
+ """
+ Return the RowFilter applied to this query
+
+ Returns:
+ RowFilter | None: the RowFilter applied to this query
+ """
+ return self._filter
+
+ @filter.setter
+ def filter(self, row_filter: RowFilter | None):
+ """
+ Set a RowFilter to apply to this query
+
+ Args:
+ row_filter: a RowFilter to apply to this query
+ """
+ self._filter = row_filter
+
+ def add_key(self, row_key: str | bytes):
+ """
+ Add a row key to this query
+
+ A query can contain multiple keys, but ranges should be preferred
+
+ Args:
+ row_key: a key to add to this query
+ Raises:
+ ValueError: if an input is not a string or bytes
+ """
+ if isinstance(row_key, str):
+ row_key = row_key.encode()
+ elif not isinstance(row_key, bytes):
+ raise ValueError("row_key must be string or bytes")
+ if row_key not in self._row_set.row_keys:
+ self._row_set.row_keys.append(row_key)
+
+ def add_range(
+ self,
+ row_range: RowRange,
+ ):
+ """
+ Add a range of row keys to this query.
+
+ Args:
+ row_range: a range of row keys to add to this query
+ """
+ if row_range not in self.row_ranges:
+ self._row_set.row_ranges.append(row_range._pb)
+
+ def shard(self, shard_keys: RowKeySamples) -> ShardedQuery:
+ """
+ Split this query into multiple queries that can be evenly distributed
+ across nodes and run in parallel
+
+ Args:
+ shard_keys: a list of row keys that define the boundaries of segments.
+ Returns:
+ ShardedQuery: a ShardedQuery that can be used in sharded_read_rows calls
+ Raises:
+ AttributeError: if the query contains a limit
+ """
+ if self.limit is not None:
+ raise AttributeError("Cannot shard query with a limit")
+ if len(self.row_keys) == 0 and len(self.row_ranges) == 0:
+ # empty query represents full scan
+ # ensure that we have at least one key or range
+ full_scan_query = ReadRowsQuery(
+ row_ranges=RowRange(), row_filter=self.filter
+ )
+ return full_scan_query.shard(shard_keys)
+
+ sharded_queries: dict[int, ReadRowsQuery] = defaultdict(
+ lambda: ReadRowsQuery(row_filter=self.filter)
+ )
+ # the split_points divde our key space into segments
+ # each split_point defines last key that belongs to a segment
+ # our goal is to break up the query into subqueries that each operate in a single segment
+ split_points = [sample[0] for sample in shard_keys if sample[0]]
+
+ # handle row_keys
+ # use binary search to find the segment that each key belongs to
+ for this_key in list(self.row_keys):
+ # bisect_left: in case of exact match, pick left side (keys are inclusive ends)
+ segment_index = bisect_left(split_points, this_key)
+ sharded_queries[segment_index].add_key(this_key)
+
+ # handle row_ranges
+ for this_range in self.row_ranges:
+ # defer to _shard_range helper
+ for segment_index, added_range in self._shard_range(
+ this_range, split_points
+ ):
+ sharded_queries[segment_index].add_range(added_range)
+ # return list of queries ordered by segment index
+ # pull populated segments out of sharded_queries dict
+ keys = sorted(list(sharded_queries.keys()))
+ # return list of queries
+ return [sharded_queries[k] for k in keys]
+
+ @staticmethod
+ def _shard_range(
+ orig_range: RowRange, split_points: list[bytes]
+ ) -> list[tuple[int, RowRange]]:
+ """
+ Helper function for sharding row_range into subranges that fit into
+ segments of the key-space, determined by split_points
+
+ Args:
+ orig_range: a row range to split
+ split_points: a list of row keys that define the boundaries of segments.
+ each point represents the inclusive end of a segment
+ Returns:
+ list[tuple[int, RowRange]]: a list of tuples, containing a segment index and a new sub-range.
+ """
+ # 1. find the index of the segment the start key belongs to
+ if orig_range.start_key is None:
+ # if range is open on the left, include first segment
+ start_segment = 0
+ else:
+ # use binary search to find the segment the start key belongs to
+ # bisect method determines how we break ties when the start key matches a split point
+ # if inclusive, bisect_left to the left segment, otherwise bisect_right
+ bisect = bisect_left if orig_range.start_is_inclusive else bisect_right
+ start_segment = bisect(split_points, orig_range.start_key)
+
+ # 2. find the index of the segment the end key belongs to
+ if orig_range.end_key is None:
+ # if range is open on the right, include final segment
+ end_segment = len(split_points)
+ else:
+ # use binary search to find the segment the end key belongs to.
+ end_segment = bisect_left(
+ split_points, orig_range.end_key, lo=start_segment
+ )
+ # note: end_segment will always bisect_left, because split points represent inclusive ends
+ # whether the end_key is includes the split point or not, the result is the same segment
+ # 3. create new range definitions for each segment this_range spans
+ if start_segment == end_segment:
+ # this_range is contained in a single segment.
+ # Add this_range to that segment's query only
+ return [(start_segment, orig_range)]
+ else:
+ results: list[tuple[int, RowRange]] = []
+ # this_range spans multiple segments. Create a new range for each segment's query
+ # 3a. add new range for first segment this_range spans
+ # first range spans from start_key to the split_point representing the last key in the segment
+ last_key_in_first_segment = split_points[start_segment]
+ start_range = RowRange(
+ start_key=orig_range.start_key,
+ start_is_inclusive=orig_range.start_is_inclusive,
+ end_key=last_key_in_first_segment,
+ end_is_inclusive=True,
+ )
+ results.append((start_segment, start_range))
+ # 3b. add new range for last segment this_range spans
+ # we start the final range using the end key from of the previous segment, with is_inclusive=False
+ previous_segment = end_segment - 1
+ last_key_before_segment = split_points[previous_segment]
+ end_range = RowRange(
+ start_key=last_key_before_segment,
+ start_is_inclusive=False,
+ end_key=orig_range.end_key,
+ end_is_inclusive=orig_range.end_is_inclusive,
+ )
+ results.append((end_segment, end_range))
+ # 3c. add new spanning range to all segments other than the first and last
+ for this_segment in range(start_segment + 1, end_segment):
+ prev_segment = this_segment - 1
+ prev_end_key = split_points[prev_segment]
+ this_end_key = split_points[prev_segment + 1]
+ new_range = RowRange(
+ start_key=prev_end_key,
+ start_is_inclusive=False,
+ end_key=this_end_key,
+ end_is_inclusive=True,
+ )
+ results.append((this_segment, new_range))
+ return results
+
+ def _to_pb(self, table) -> ReadRowsRequestPB:
+ """
+ Convert this query into a dictionary that can be used to construct a
+ ReadRowsRequest protobuf
+ """
+ return ReadRowsRequestPB(
+ app_profile_id=table.app_profile_id,
+ filter=self.filter._to_pb() if self.filter else None,
+ rows_limit=self.limit or 0,
+ rows=self._row_set,
+ **table._request_path,
+ )
+
+ def __eq__(self, other):
+ """
+ RowRanges are equal if they have the same row keys, row ranges,
+ filter and limit, or if they both represent a full scan with the
+ same filter and limit
+
+ Args:
+ other: the object to compare to
+ Returns:
+ bool: True if the objects are equal, False otherwise
+ """
+ if not isinstance(other, ReadRowsQuery):
+ return False
+ # empty queries are equal
+ if len(self.row_keys) == 0 and len(other.row_keys) == 0:
+ this_range_empty = len(self.row_ranges) == 0 or all(
+ [bool(r) is False for r in self.row_ranges]
+ )
+ other_range_empty = len(other.row_ranges) == 0 or all(
+ [bool(r) is False for r in other.row_ranges]
+ )
+ if this_range_empty and other_range_empty:
+ return self.filter == other.filter and self.limit == other.limit
+ # otherwise, sets should have same sizes
+ if len(self.row_keys) != len(other.row_keys):
+ return False
+ if len(self.row_ranges) != len(other.row_ranges):
+ return False
+ ranges_match = all([row in other.row_ranges for row in self.row_ranges])
+ return (
+ self.row_keys == other.row_keys
+ and ranges_match
+ and self.filter == other.filter
+ and self.limit == other.limit
+ )
+
+ def __repr__(self):
+ return f"ReadRowsQuery(row_keys={list(self.row_keys)}, row_ranges={list(self.row_ranges)}, row_filter={self.filter}, limit={self.limit})"
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py
new file mode 100644
index 000000000000..50e65a958c51
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py
@@ -0,0 +1,535 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from collections import OrderedDict
+from typing import Generator, overload, Any
+from functools import total_ordering
+
+from google.cloud.bigtable_v2.types import Row as RowPB
+
+# Type aliases used internally for readability.
+_family_type = str
+_qualifier_type = bytes
+
+
+class Row:
+ """
+ Model class for row data returned from server
+
+ Does not represent all data contained in the row, only data returned by a
+ query.
+ Expected to be read-only to users, and written by backend
+
+ Can be indexed by family and qualifier to get cells in the row::
+
+ cells = row["family", "qualifier"]
+
+ Args:
+ key: Row key
+ cells: List of cells in the row
+ """
+
+ __slots__ = ("row_key", "cells", "_index_data")
+
+ def __init__(
+ self,
+ key: bytes,
+ cells: list[Cell],
+ ):
+ """
+ Row objects are not intended to be created by users.
+ They are returned by the Bigtable backend.
+ """
+ self.row_key = key
+ self.cells: list[Cell] = cells
+ # index is lazily created when needed
+ self._index_data: OrderedDict[
+ _family_type, OrderedDict[_qualifier_type, list[Cell]]
+ ] | None = None
+
+ @property
+ def _index(
+ self,
+ ) -> OrderedDict[_family_type, OrderedDict[_qualifier_type, list[Cell]]]:
+ """
+ Returns an index of cells associated with each family and qualifier.
+
+ The index is lazily created when needed
+
+ Returns:
+ OrderedDict: Index of cells
+ """
+ if self._index_data is None:
+ self._index_data = OrderedDict()
+ for cell in self.cells:
+ self._index_data.setdefault(cell.family, OrderedDict()).setdefault(
+ cell.qualifier, []
+ ).append(cell)
+ return self._index_data
+
+ @classmethod
+ def _from_pb(cls, row_pb: RowPB) -> Row:
+ """
+ Creates a row from a protobuf representation
+
+ Row objects are not intended to be created by users.
+ They are returned by the Bigtable backend.
+
+ Args:
+ row_pb (RowPB): Protobuf representation of the row
+ Returns:
+ Row: Row object created from the protobuf representation
+ """
+ row_key: bytes = row_pb.key
+ cell_list: list[Cell] = []
+ for family in row_pb.families:
+ for column in family.columns:
+ for cell in column.cells:
+ new_cell = Cell(
+ value=cell.value,
+ row_key=row_key,
+ family=family.name,
+ qualifier=column.qualifier,
+ timestamp_micros=cell.timestamp_micros,
+ labels=list(cell.labels) if cell.labels else None,
+ )
+ cell_list.append(new_cell)
+ return cls(row_key, cells=cell_list)
+
+ def get_cells(
+ self, family: str | None = None, qualifier: str | bytes | None = None
+ ) -> list[Cell]:
+ """
+ Returns cells sorted in Bigtable native order:
+ - Family lexicographically ascending
+ - Qualifier ascending
+ - Timestamp in reverse chronological order
+
+ If family or qualifier not passed, will include all
+
+ Can also be accessed through indexing::
+ cells = row["family", "qualifier"]
+ cells = row["family"]
+
+ Args:
+ family: family to filter cells by
+ qualifier: qualifier to filter cells by
+ Returns:
+ list[Cell]: List of cells in the row matching the filter
+ Raises:
+ ValueError: If family or qualifier is not found in the row
+ """
+ if family is None:
+ if qualifier is not None:
+ # get_cells(None, "qualifier") is not allowed
+ raise ValueError("Qualifier passed without family")
+ else:
+ # return all cells on get_cells()
+ return self.cells
+ if qualifier is None:
+ # return all cells in family on get_cells(family)
+ return list(self._get_all_from_family(family))
+ if isinstance(qualifier, str):
+ qualifier = qualifier.encode("utf-8")
+ # return cells in family and qualifier on get_cells(family, qualifier)
+ if family not in self._index:
+ raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'")
+ if qualifier not in self._index[family]:
+ raise ValueError(
+ f"Qualifier '{qualifier!r}' not found in family '{family}' in row '{self.row_key!r}'"
+ )
+ return self._index[family][qualifier]
+
+ def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]:
+ """
+ Returns all cells in the row for the family_id
+
+ Args:
+ family: family to filter cells by
+ Yields:
+ Cell: cells in the row for the family_id
+ Raises:
+ ValueError: If family is not found in the row
+ """
+ if family not in self._index:
+ raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'")
+ for qualifier in self._index[family]:
+ yield from self._index[family][qualifier]
+
+ def __str__(self) -> str:
+ """
+ Human-readable string representation::
+
+ {
+ (family='fam', qualifier=b'col'): [b'value', (+1 more),],
+ (family='fam', qualifier=b'col2'): [b'other'],
+ }
+
+ Returns:
+ str: Human-readable string representation of the row
+ """
+ output = ["{"]
+ for family, qualifier in self._get_column_components():
+ cell_list = self[family, qualifier]
+ line = [f" (family={family!r}, qualifier={qualifier!r}): "]
+ if len(cell_list) == 0:
+ line.append("[],")
+ elif len(cell_list) == 1:
+ line.append(f"[{cell_list[0]}],")
+ else:
+ line.append(f"[{cell_list[0]}, (+{len(cell_list) - 1} more)],")
+ output.append("".join(line))
+ output.append("}")
+ return "\n".join(output)
+
+ def __repr__(self):
+ cell_str_buffer = ["{"]
+ for family, qualifier in self._get_column_components():
+ cell_list = self[family, qualifier]
+ repr_list = [cell._to_dict() for cell in cell_list]
+ cell_str_buffer.append(f" ('{family}', {qualifier!r}): {repr_list},")
+ cell_str_buffer.append("}")
+ cell_str = "\n".join(cell_str_buffer)
+ output = f"Row(key={self.row_key!r}, cells={cell_str})"
+ return output
+
+ def _to_dict(self) -> dict[str, Any]:
+ """
+ Returns a dictionary representation of the cell in the Bigtable Row
+ proto format
+
+ https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#row
+ """
+ family_list = []
+ for family_name, qualifier_dict in self._index.items():
+ qualifier_list = []
+ for qualifier_name, cell_list in qualifier_dict.items():
+ cell_dicts = [cell._to_dict() for cell in cell_list]
+ qualifier_list.append(
+ {"qualifier": qualifier_name, "cells": cell_dicts}
+ )
+ family_list.append({"name": family_name, "columns": qualifier_list})
+ return {"key": self.row_key, "families": family_list}
+
+ # Sequence and Mapping methods
+ def __iter__(self):
+ """
+ Allow iterating over all cells in the row
+
+ Returns:
+ Iterator: Iterator over the cells in the row
+ """
+ return iter(self.cells)
+
+ def __contains__(self, item):
+ """
+ Implements `in` operator
+
+ Works for both cells in the internal list, and `family` or
+ `(family, qualifier)` pairs associated with the cells
+
+ Args:
+ item: item to check for in the row
+ Returns:
+ bool: True if item is in the row, False otherwise
+ """
+ if isinstance(item, _family_type):
+ return item in self._index
+ elif (
+ isinstance(item, tuple)
+ and isinstance(item[0], _family_type)
+ and isinstance(item[1], (bytes, str))
+ ):
+ q = item[1] if isinstance(item[1], bytes) else item[1].encode("utf-8")
+ return item[0] in self._index and q in self._index[item[0]]
+ # check if Cell is in Row
+ return item in self.cells
+
+ @overload
+ def __getitem__(
+ self,
+ index: str | tuple[str, bytes | str],
+ ) -> list[Cell]:
+ # overload signature for type checking
+ pass
+
+ @overload
+ def __getitem__(self, index: int) -> Cell:
+ # overload signature for type checking
+ pass
+
+ @overload
+ def __getitem__(self, index: slice) -> list[Cell]:
+ # overload signature for type checking
+ pass
+
+ def __getitem__(self, index):
+ """
+ Implements [] indexing
+
+ Supports indexing by family, (family, qualifier) pair,
+ numerical index, and index slicing
+ """
+ if isinstance(index, _family_type):
+ return self.get_cells(family=index)
+ elif (
+ isinstance(index, tuple)
+ and isinstance(index[0], _family_type)
+ and isinstance(index[1], (bytes, str))
+ ):
+ return self.get_cells(family=index[0], qualifier=index[1])
+ elif isinstance(index, int) or isinstance(index, slice):
+ # index is int or slice
+ return self.cells[index]
+ else:
+ raise TypeError(
+ "Index must be family_id, (family_id, qualifier), int, or slice"
+ )
+
+ def __len__(self):
+ """
+ Returns the number of cells in the row
+
+ Returns:
+ int: Number of cells in the row
+ """
+ return len(self.cells)
+
+ def _get_column_components(self) -> list[tuple[str, bytes]]:
+ """
+ Returns a list of (family, qualifier) pairs associated with the cells
+
+ Pairs can be used for indexing
+
+ Returns:
+ list[tuple[str, bytes]]: List of (family, qualifier) pairs
+ """
+ return [(f, q) for f in self._index for q in self._index[f]]
+
+ def __eq__(self, other):
+ """
+ Implements `==` operator
+
+ Returns:
+ bool: True if rows are equal, False otherwise
+ """
+ # for performance reasons, check row metadata
+ # before checking individual cells
+ if not isinstance(other, Row):
+ return False
+ if self.row_key != other.row_key:
+ return False
+ if len(self.cells) != len(other.cells):
+ return False
+ components = self._get_column_components()
+ other_components = other._get_column_components()
+ if len(components) != len(other_components):
+ return False
+ if components != other_components:
+ return False
+ for family, qualifier in components:
+ if len(self[family, qualifier]) != len(other[family, qualifier]):
+ return False
+ # compare individual cell lists
+ if self.cells != other.cells:
+ return False
+ return True
+
+ def __ne__(self, other) -> bool:
+ """
+ Implements `!=` operator
+
+ Returns:
+ bool: True if rows are not equal, False otherwise
+ """
+ return not self == other
+
+
+@total_ordering
+class Cell:
+ """
+ Model class for cell data
+
+ Does not represent all data contained in the cell, only data returned by a
+ query.
+ Expected to be read-only to users, and written by backend
+
+ Args:
+ value: the byte string value of the cell
+ row_key: the row key of the cell
+ family: the family associated with the cell
+ qualifier: the column qualifier associated with the cell
+ timestamp_micros: the timestamp of the cell in microseconds
+ labels: the list of labels associated with the cell
+ """
+
+ __slots__ = (
+ "value",
+ "row_key",
+ "family",
+ "qualifier",
+ "timestamp_micros",
+ "labels",
+ )
+
+ def __init__(
+ self,
+ value: bytes,
+ row_key: bytes,
+ family: str,
+ qualifier: bytes | str,
+ timestamp_micros: int,
+ labels: list[str] | None = None,
+ ):
+ # Cell objects are not intended to be constructed by users.
+ # They are returned by the Bigtable backend.
+ self.value = value
+ self.row_key = row_key
+ self.family = family
+ if isinstance(qualifier, str):
+ qualifier = qualifier.encode()
+ self.qualifier = qualifier
+ self.timestamp_micros = timestamp_micros
+ self.labels = labels if labels is not None else []
+
+ def __int__(self) -> int:
+ """
+ Allows casting cell to int
+ Interprets value as a 64-bit big-endian signed integer, as expected by
+ ReadModifyWrite increment rule
+
+ Returns:
+ int: Value of the cell as a 64-bit big-endian signed integer
+ """
+ return int.from_bytes(self.value, byteorder="big", signed=True)
+
+ def _to_dict(self) -> dict[str, Any]:
+ """
+ Returns a dictionary representation of the cell in the Bigtable Cell
+ proto format
+
+ https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell
+
+ Returns:
+ dict: Dictionary representation of the cell
+ """
+ cell_dict: dict[str, Any] = {
+ "value": self.value,
+ }
+ cell_dict["timestamp_micros"] = self.timestamp_micros
+ if self.labels:
+ cell_dict["labels"] = self.labels
+ return cell_dict
+
+ def __str__(self) -> str:
+ """
+ Allows casting cell to str
+ Prints encoded byte string, same as printing value directly.
+
+ Returns:
+ str: Encoded byte string of the value
+ """
+ return str(self.value)
+
+ def __repr__(self):
+ """
+ Returns a string representation of the cell
+
+ Returns:
+ str: String representation of the cell
+ """
+ return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})"
+
+ """For Bigtable native ordering"""
+
+ def __lt__(self, other) -> bool:
+ """
+ Implements `<` operator
+
+ Args:
+ other: Cell to compare with
+ Returns:
+ bool: True if this cell is less than the other cell, False otherwise
+ Raises:
+ NotImplementedError: If other is not a Cell
+ """
+ if not isinstance(other, Cell):
+ raise NotImplementedError
+ this_ordering = (
+ self.family,
+ self.qualifier,
+ -self.timestamp_micros,
+ self.value,
+ self.labels,
+ )
+ other_ordering = (
+ other.family,
+ other.qualifier,
+ -other.timestamp_micros,
+ other.value,
+ other.labels,
+ )
+ return this_ordering < other_ordering
+
+ def __eq__(self, other) -> bool:
+ """
+ Implements `==` operator
+
+ Args:
+ other: Cell to compare with
+ Returns:
+ bool: True if cells are equal, False otherwise
+ """
+ if not isinstance(other, Cell):
+ return False
+ return (
+ self.row_key == other.row_key
+ and self.family == other.family
+ and self.qualifier == other.qualifier
+ and self.value == other.value
+ and self.timestamp_micros == other.timestamp_micros
+ and len(self.labels) == len(other.labels)
+ and all([label in other.labels for label in self.labels])
+ )
+
+ def __ne__(self, other) -> bool:
+ """
+ Implements `!=` operator
+
+ Args:
+ other: Cell to compare with
+ Returns:
+ bool: True if cells are not equal, False otherwise
+ """
+ return not self == other
+
+ def __hash__(self):
+ """
+ Implements `hash()` function to fingerprint cell
+
+ Returns:
+ int: hash value of the cell
+ """
+ return hash(
+ (
+ self.row_key,
+ self.family,
+ self.qualifier,
+ self.value,
+ self.timestamp_micros,
+ tuple(self.labels),
+ )
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py
new file mode 100644
index 000000000000..9f09133d533d
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py
@@ -0,0 +1,968 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Filters for Google Cloud Bigtable Row classes."""
+from __future__ import annotations
+
+import struct
+
+from typing import Any, Sequence, TYPE_CHECKING, overload
+from abc import ABC, abstractmethod
+
+from google.cloud._helpers import _microseconds_from_datetime # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.cloud.bigtable_v2.types import data as data_v2_pb2
+
+if TYPE_CHECKING:
+ # import dependencies when type checking
+ from datetime import datetime
+
+_PACK_I64 = struct.Struct(">q").pack
+
+
+class RowFilter(ABC):
+ """Basic filter to apply to cells in a row.
+
+ These values can be combined via :class:`RowFilterChain`,
+ :class:`RowFilterUnion` and :class:`ConditionalRowFilter`.
+
+ .. note::
+
+ This class is a do-nothing base class for all row filters.
+ """
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ Returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(**self._to_dict())
+
+ @abstractmethod
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ pass
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}()"
+
+
+class _BoolFilter(RowFilter, ABC):
+ """Row filter that uses a boolean flag.
+
+ :type flag: bool
+ :param flag: An indicator if a setting is turned on or off.
+ """
+
+ def __init__(self, flag: bool):
+ self.flag = flag
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.flag == self.flag
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(flag={self.flag})"
+
+
+class SinkFilter(_BoolFilter):
+ """Advanced row filter to skip parent filters.
+
+ :type flag: bool
+ :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter.
+ Outputs all cells directly to the output of the read rather
+ than to any parent filter. Cannot be used within the
+ ``predicate_filter``, ``true_filter``, or ``false_filter``
+ of a :class:`ConditionalRowFilter`.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"sink": self.flag}
+
+
+class PassAllFilter(_BoolFilter):
+ """Row filter equivalent to not filtering at all.
+
+ :type flag: bool
+ :param flag: Matches all cells, regardless of input. Functionally
+ equivalent to leaving ``filter`` unset, but included for
+ completeness.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"pass_all_filter": self.flag}
+
+
+class BlockAllFilter(_BoolFilter):
+ """Row filter that doesn't match any cells.
+
+ :type flag: bool
+ :param flag: Does not match any cells, regardless of input. Useful for
+ temporarily disabling just part of a filter.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"block_all_filter": self.flag}
+
+
+class _RegexFilter(RowFilter, ABC):
+ """Row filter that uses a regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ :type regex: bytes or str
+ :param regex:
+ A regular expression (RE2) for some row filter. String values
+ will be encoded as ASCII.
+ """
+
+ def __init__(self, regex: str | bytes):
+ self.regex: bytes = _to_bytes(regex)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.regex == self.regex
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(regex={self.regex!r})"
+
+
+class RowKeyRegexFilter(_RegexFilter):
+ """Row filter for a row key regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes
+ :param regex: A regular expression (RE2) to match cells from rows with row
+ keys that satisfy this regex. For a
+ ``CheckAndMutateRowRequest``, this filter is unnecessary
+ since the row key is already specified.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"row_key_regex_filter": self.regex}
+
+
+class RowSampleFilter(RowFilter):
+ """Matches all cells from a row with probability p.
+
+ :type sample: float
+ :param sample: The probability of matching a cell (must be in the
+ interval ``(0, 1)`` The end points are excluded).
+ """
+
+ def __init__(self, sample: float):
+ self.sample: float = sample
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.sample == self.sample
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"row_sample_filter": self.sample}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(sample={self.sample})"
+
+
+class FamilyNameRegexFilter(_RegexFilter):
+ """Row filter for a family name regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ :type regex: str
+ :param regex: A regular expression (RE2) to match cells from columns in a
+ given column family. For technical reasons, the regex must
+ not contain the ``':'`` character, even if it is not being
+ used as a literal.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"family_name_regex_filter": self.regex}
+
+
+class ColumnQualifierRegexFilter(_RegexFilter):
+ """Row filter for a column qualifier regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes
+ :param regex: A regular expression (RE2) to match cells from column that
+ match this regex (irrespective of column family).
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"column_qualifier_regex_filter": self.regex}
+
+
+class TimestampRange(object):
+ """Range of time with inclusive lower and exclusive upper bounds.
+
+ :type start: :class:`datetime.datetime`
+ :param start: (Optional) The (inclusive) lower bound of the timestamp
+ range. If omitted, defaults to Unix epoch.
+
+ :type end: :class:`datetime.datetime`
+ :param end: (Optional) The (exclusive) upper bound of the timestamp
+ range. If omitted, no upper bound is used.
+ """
+
+ def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None):
+ self.start: "datetime" | None = start
+ self.end: "datetime" | None = end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.start == self.start and other.end == self.end
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_pb(self) -> data_v2_pb2.TimestampRange:
+ """Converts the :class:`TimestampRange` to a protobuf.
+
+ Returns: The converted current object.
+ """
+ return data_v2_pb2.TimestampRange(**self._to_dict())
+
+ def _to_dict(self) -> dict[str, int]:
+ """Converts the timestamp range to a dict representation."""
+ timestamp_range_kwargs = {}
+ if self.start is not None:
+ start_time = _microseconds_from_datetime(self.start) // 1000 * 1000
+ timestamp_range_kwargs["start_timestamp_micros"] = start_time
+ if self.end is not None:
+ end_time = _microseconds_from_datetime(self.end)
+ if end_time % 1000 != 0:
+ # if not a whole milisecond value, round up
+ end_time = end_time // 1000 * 1000 + 1000
+ timestamp_range_kwargs["end_timestamp_micros"] = end_time
+ return timestamp_range_kwargs
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(start={self.start}, end={self.end})"
+
+
+class TimestampRangeFilter(RowFilter):
+ """Row filter that limits cells to a range of time.
+
+ :type range_: :class:`TimestampRange`
+ :param range_: Range of time that cells should match against.
+ """
+
+ def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None):
+ self.range_: TimestampRange = TimestampRange(start, end)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.range_ == self.range_
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ First converts the ``range_`` on the current object to a protobuf and
+ then uses it in the ``timestamp_range_filter`` field.
+
+ Returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_._to_pb())
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"timestamp_range_filter": self.range_._to_dict()}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(start={self.range_.start!r}, end={self.range_.end!r})"
+
+
+class ColumnRangeFilter(RowFilter):
+ """A row filter to restrict to a range of columns.
+
+ Both the start and end column can be included or excluded in the range.
+ By default, we include them both, but this can be changed with optional
+ flags.
+
+ :type family_id: str
+ :param family_id: The column family that contains the columns. Must
+ be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type start_qualifier: bytes
+ :param start_qualifier: The start of the range of columns. If no value is
+ used, the backend applies no upper bound to the
+ values.
+
+ :type end_qualifier: bytes
+ :param end_qualifier: The end of the range of columns. If no value is used,
+ the backend applies no upper bound to the values.
+
+ :type inclusive_start: bool
+ :param inclusive_start: Boolean indicating if the start column should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``start_qualifier`` is passed and
+ no ``inclusive_start`` was given.
+
+ :type inclusive_end: bool
+ :param inclusive_end: Boolean indicating if the end column should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``end_qualifier`` is passed and
+ no ``inclusive_end`` was given.
+
+ :raises: :class:`ValueError ` if ``inclusive_start``
+ is set but no ``start_qualifier`` is given or if ``inclusive_end``
+ is set but no ``end_qualifier`` is given
+ """
+
+ def __init__(
+ self,
+ family_id: str,
+ start_qualifier: bytes | None = None,
+ end_qualifier: bytes | None = None,
+ inclusive_start: bool | None = None,
+ inclusive_end: bool | None = None,
+ ):
+ if inclusive_start is None:
+ inclusive_start = True
+ elif start_qualifier is None:
+ raise ValueError(
+ "inclusive_start was specified but no start_qualifier was given."
+ )
+ if inclusive_end is None:
+ inclusive_end = True
+ elif end_qualifier is None:
+ raise ValueError(
+ "inclusive_end was specified but no end_qualifier was given."
+ )
+
+ self.family_id = family_id
+
+ self.start_qualifier = start_qualifier
+ self.inclusive_start = inclusive_start
+
+ self.end_qualifier = end_qualifier
+ self.inclusive_end = inclusive_end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.family_id == self.family_id
+ and other.start_qualifier == self.start_qualifier
+ and other.end_qualifier == self.end_qualifier
+ and other.inclusive_start == self.inclusive_start
+ and other.inclusive_end == self.inclusive_end
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it
+ in the ``column_range_filter`` field.
+
+ Returns: The converted current object.
+ """
+ column_range = data_v2_pb2.ColumnRange(**self._range_to_dict())
+ return data_v2_pb2.RowFilter(column_range_filter=column_range)
+
+ def _range_to_dict(self) -> dict[str, str | bytes]:
+ """Converts the column range range to a dict representation."""
+ column_range_kwargs: dict[str, str | bytes] = {}
+ column_range_kwargs["family_name"] = self.family_id
+ if self.start_qualifier is not None:
+ if self.inclusive_start:
+ key = "start_qualifier_closed"
+ else:
+ key = "start_qualifier_open"
+ column_range_kwargs[key] = _to_bytes(self.start_qualifier)
+ if self.end_qualifier is not None:
+ if self.inclusive_end:
+ key = "end_qualifier_closed"
+ else:
+ key = "end_qualifier_open"
+ column_range_kwargs[key] = _to_bytes(self.end_qualifier)
+ return column_range_kwargs
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"column_range_filter": self._range_to_dict()}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(family_id='{self.family_id}', start_qualifier={self.start_qualifier!r}, end_qualifier={self.end_qualifier!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})"
+
+
+class ValueRegexFilter(_RegexFilter):
+ """Row filter for a value regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes or str
+ :param regex: A regular expression (RE2) to match cells with values that
+ match this regex. String values will be encoded as ASCII.
+ """
+
+ def _to_dict(self) -> dict[str, bytes]:
+ """Converts the row filter to a dict representation."""
+ return {"value_regex_filter": self.regex}
+
+
+class LiteralValueFilter(ValueRegexFilter):
+ """Row filter for an exact value.
+
+
+ :type value: bytes or str or int
+ :param value:
+ a literal string, integer, or the equivalent bytes.
+ Integer values will be packed into signed 8-bytes.
+ """
+
+ def __init__(self, value: bytes | str | int):
+ if isinstance(value, int):
+ value = _PACK_I64(value)
+ elif isinstance(value, str):
+ value = value.encode("utf-8")
+ value = self._write_literal_regex(value)
+ super(LiteralValueFilter, self).__init__(value)
+
+ @staticmethod
+ def _write_literal_regex(input_bytes: bytes) -> bytes:
+ """
+ Escape re2 special characters from literal bytes.
+
+ Extracted from: re2 QuoteMeta:
+ https://github.com/google/re2/blob/70f66454c255080a54a8da806c52d1f618707f8a/re2/re2.cc#L456
+ """
+ result = bytearray()
+ for byte in input_bytes:
+ # If this is the part of a UTF8 or Latin1 character, we need \
+ # to copy this byte without escaping. Experimentally this is \
+ # what works correctly with the regexp library. \
+ utf8_latin1_check = (byte & 128) == 0
+ if (
+ (byte < ord("a") or byte > ord("z"))
+ and (byte < ord("A") or byte > ord("Z"))
+ and (byte < ord("0") or byte > ord("9"))
+ and byte != ord("_")
+ and utf8_latin1_check
+ ):
+ if byte == 0:
+ # Special handling for null chars.
+ # Note that this special handling is not strictly required for RE2,
+ # but this quoting is required for other regexp libraries such as
+ # PCRE.
+ # Can't use "\\0" since the next character might be a digit.
+ result.extend([ord("\\"), ord("x"), ord("0"), ord("0")])
+ continue
+ result.append(ord(b"\\"))
+ result.append(byte)
+ return bytes(result)
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(value={self.regex!r})"
+
+
+class ValueRangeFilter(RowFilter):
+ """A range of values to restrict to in a row filter.
+
+ Will only match cells that have values in this range.
+
+ Both the start and end value can be included or excluded in the range.
+ By default, we include them both, but this can be changed with optional
+ flags.
+
+ :type start_value: bytes
+ :param start_value: The start of the range of values. If no value is used,
+ the backend applies no lower bound to the values.
+
+ :type end_value: bytes
+ :param end_value: The end of the range of values. If no value is used,
+ the backend applies no upper bound to the values.
+
+ :type inclusive_start: bool
+ :param inclusive_start: Boolean indicating if the start value should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``start_value`` is passed and
+ no ``inclusive_start`` was given.
+
+ :type inclusive_end: bool
+ :param inclusive_end: Boolean indicating if the end value should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``end_value`` is passed and
+ no ``inclusive_end`` was given.
+
+ :raises: :class:`ValueError ` if ``inclusive_start``
+ is set but no ``start_value`` is given or if ``inclusive_end``
+ is set but no ``end_value`` is given
+ """
+
+ def __init__(
+ self,
+ start_value: bytes | int | None = None,
+ end_value: bytes | int | None = None,
+ inclusive_start: bool | None = None,
+ inclusive_end: bool | None = None,
+ ):
+ if inclusive_start is None:
+ inclusive_start = True
+ elif start_value is None:
+ raise ValueError(
+ "inclusive_start was specified but no start_value was given."
+ )
+ if inclusive_end is None:
+ inclusive_end = True
+ elif end_value is None:
+ raise ValueError(
+ "inclusive_end was specified but no end_qualifier was given."
+ )
+ if isinstance(start_value, int):
+ start_value = _PACK_I64(start_value)
+ self.start_value = start_value
+ self.inclusive_start = inclusive_start
+
+ if isinstance(end_value, int):
+ end_value = _PACK_I64(end_value)
+ self.end_value = end_value
+ self.inclusive_end = inclusive_end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.start_value == self.start_value
+ and other.end_value == self.end_value
+ and other.inclusive_start == self.inclusive_start
+ and other.inclusive_end == self.inclusive_end
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ First converts to a :class:`.data_v2_pb2.ValueRange` and then uses
+ it to create a row filter protobuf.
+
+ Returns: The converted current object.
+ """
+ value_range = data_v2_pb2.ValueRange(**self._range_to_dict())
+ return data_v2_pb2.RowFilter(value_range_filter=value_range)
+
+ def _range_to_dict(self) -> dict[str, bytes]:
+ """Converts the value range range to a dict representation."""
+ value_range_kwargs = {}
+ if self.start_value is not None:
+ if self.inclusive_start:
+ key = "start_value_closed"
+ else:
+ key = "start_value_open"
+ value_range_kwargs[key] = _to_bytes(self.start_value)
+ if self.end_value is not None:
+ if self.inclusive_end:
+ key = "end_value_closed"
+ else:
+ key = "end_value_open"
+ value_range_kwargs[key] = _to_bytes(self.end_value)
+ return value_range_kwargs
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"value_range_filter": self._range_to_dict()}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(start_value={self.start_value!r}, end_value={self.end_value!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})"
+
+
+class _CellCountFilter(RowFilter, ABC):
+ """Row filter that uses an integer count of cells.
+
+ The cell count is used as an offset or a limit for the number
+ of results returned.
+
+ :type num_cells: int
+ :param num_cells: An integer count / offset / limit.
+ """
+
+ def __init__(self, num_cells: int):
+ self.num_cells = num_cells
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.num_cells == self.num_cells
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(num_cells={self.num_cells})"
+
+
+class CellsRowOffsetFilter(_CellCountFilter):
+ """Row filter to skip cells in a row.
+
+ :type num_cells: int
+ :param num_cells: Skips the first N cells of the row.
+ """
+
+ def _to_dict(self) -> dict[str, int]:
+ """Converts the row filter to a dict representation."""
+ return {"cells_per_row_offset_filter": self.num_cells}
+
+
+class CellsRowLimitFilter(_CellCountFilter):
+ """Row filter to limit cells in a row.
+
+ :type num_cells: int
+ :param num_cells: Matches only the first N cells of the row.
+ """
+
+ def _to_dict(self) -> dict[str, int]:
+ """Converts the row filter to a dict representation."""
+ return {"cells_per_row_limit_filter": self.num_cells}
+
+
+class CellsColumnLimitFilter(_CellCountFilter):
+ """Row filter to limit cells in a column.
+
+ :type num_cells: int
+ :param num_cells: Matches only the most recent N cells within each column.
+ This filters a (family name, column) pair, based on
+ timestamps of each cell.
+ """
+
+ def _to_dict(self) -> dict[str, int]:
+ """Converts the row filter to a dict representation."""
+ return {"cells_per_column_limit_filter": self.num_cells}
+
+
+class StripValueTransformerFilter(_BoolFilter):
+ """Row filter that transforms cells into empty string (0 bytes).
+
+ :type flag: bool
+ :param flag: If :data:`True`, replaces each cell's value with the empty
+ string. As the name indicates, this is more useful as a
+ transformer than a generic query / filter.
+ """
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"strip_value_transformer": self.flag}
+
+
+class ApplyLabelFilter(RowFilter):
+ """Filter to apply labels to cells.
+
+ Intended to be used as an intermediate filter on a pre-existing filtered
+ result set. This way if two sets are combined, the label can tell where
+ the cell(s) originated.This allows the client to determine which results
+ were produced from which part of the filter.
+
+ .. note::
+
+ Due to a technical limitation of the backend, it is not currently
+ possible to apply multiple labels to a cell.
+
+ :type label: str
+ :param label: Label to apply to cells in the output row. Values must be
+ at most 15 characters long, and match the pattern
+ ``[a-z0-9\\-]+``.
+ """
+
+ def __init__(self, label: str):
+ self.label = label
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.label == self.label
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_dict(self) -> dict[str, str]:
+ """Converts the row filter to a dict representation."""
+ return {"apply_label_transformer": self.label}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(label={self.label})"
+
+
+class _FilterCombination(RowFilter, Sequence[RowFilter], ABC):
+ """Chain of row filters.
+
+ Sends rows through several filters in sequence. The filters are "chained"
+ together to process a row. After the first filter is applied, the second
+ is applied to the filtered output and so on for subsequent filters.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def __init__(self, filters: list[RowFilter] | None = None):
+ if filters is None:
+ filters = []
+ self.filters: list[RowFilter] = filters
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.filters == self.filters
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __len__(self) -> int:
+ return len(self.filters)
+
+ @overload
+ def __getitem__(self, index: int) -> RowFilter:
+ # overload signature for type checking
+ pass
+
+ @overload
+ def __getitem__(self, index: slice) -> list[RowFilter]:
+ # overload signature for type checking
+ pass
+
+ def __getitem__(self, index):
+ return self.filters[index]
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(filters={self.filters})"
+
+ def __str__(self) -> str:
+ """
+ Returns a string representation of the filter chain.
+
+ Adds line breaks between each sub-filter for readability.
+ """
+ output = [f"{self.__class__.__name__}(["]
+ for filter_ in self.filters:
+ filter_lines = f"{filter_},".splitlines()
+ output.extend([f" {line}" for line in filter_lines])
+ output.append("])")
+ return "\n".join(output)
+
+
+class RowFilterChain(_FilterCombination):
+ """Chain of row filters.
+
+ Sends rows through several filters in sequence. The filters are "chained"
+ together to process a row. After the first filter is applied, the second
+ is applied to the filtered output and so on for subsequent filters.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ Returns: The converted current object.
+ """
+ chain = data_v2_pb2.RowFilter.Chain(
+ filters=[row_filter._to_pb() for row_filter in self.filters]
+ )
+ return data_v2_pb2.RowFilter(chain=chain)
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"chain": {"filters": [f._to_dict() for f in self.filters]}}
+
+
+class RowFilterUnion(_FilterCombination):
+ """Union of row filters.
+
+ Sends rows through several filters simultaneously, then
+ merges / interleaves all the filtered results together.
+
+ If multiple cells are produced with the same column and timestamp,
+ they will all appear in the output row in an unspecified mutual order.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ Returns: The converted current object.
+ """
+ interleave = data_v2_pb2.RowFilter.Interleave(
+ filters=[row_filter._to_pb() for row_filter in self.filters]
+ )
+ return data_v2_pb2.RowFilter(interleave=interleave)
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"interleave": {"filters": [f._to_dict() for f in self.filters]}}
+
+
+class ConditionalRowFilter(RowFilter):
+ """Conditional row filter which exhibits ternary behavior.
+
+ Executes one of two filters based on another filter. If the ``predicate_filter``
+ returns any cells in the row, then ``true_filter`` is executed. If not,
+ then ``false_filter`` is executed.
+
+ .. note::
+
+ The ``predicate_filter`` does not execute atomically with the true and false
+ filters, which may lead to inconsistent or unexpected results.
+
+ Additionally, executing a :class:`ConditionalRowFilter` has poor
+ performance on the server, especially when ``false_filter`` is set.
+
+ :type predicate_filter: :class:`RowFilter`
+ :param predicate_filter: The filter to condition on before executing the
+ true/false filters.
+
+ :type true_filter: :class:`RowFilter`
+ :param true_filter: (Optional) The filter to execute if there are any cells
+ matching ``predicate_filter``. If not provided, no results
+ will be returned in the true case.
+
+ :type false_filter: :class:`RowFilter`
+ :param false_filter: (Optional) The filter to execute if there are no cells
+ matching ``predicate_filter``. If not provided, no results
+ will be returned in the false case.
+ """
+
+ def __init__(
+ self,
+ predicate_filter: RowFilter,
+ true_filter: RowFilter | None = None,
+ false_filter: RowFilter | None = None,
+ ):
+ self.predicate_filter = predicate_filter
+ self.true_filter = true_filter
+ self.false_filter = false_filter
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.predicate_filter == self.predicate_filter
+ and other.true_filter == self.true_filter
+ and other.false_filter == self.false_filter
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _to_pb(self) -> data_v2_pb2.RowFilter:
+ """Converts the row filter to a protobuf.
+
+ Returns: The converted current object.
+ """
+ condition_kwargs = {"predicate_filter": self.predicate_filter._to_pb()}
+ if self.true_filter is not None:
+ condition_kwargs["true_filter"] = self.true_filter._to_pb()
+ if self.false_filter is not None:
+ condition_kwargs["false_filter"] = self.false_filter._to_pb()
+ condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs)
+ return data_v2_pb2.RowFilter(condition=condition)
+
+ def _condition_to_dict(self) -> dict[str, Any]:
+ """Converts the condition to a dict representation."""
+ condition_kwargs = {"predicate_filter": self.predicate_filter._to_dict()}
+ if self.true_filter is not None:
+ condition_kwargs["true_filter"] = self.true_filter._to_dict()
+ if self.false_filter is not None:
+ condition_kwargs["false_filter"] = self.false_filter._to_dict()
+ return condition_kwargs
+
+ def _to_dict(self) -> dict[str, Any]:
+ """Converts the row filter to a dict representation."""
+ return {"condition": self._condition_to_dict()}
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(predicate_filter={self.predicate_filter!r}, true_filter={self.true_filter!r}, false_filter={self.false_filter!r})"
+
+ def __str__(self) -> str:
+ output = [f"{self.__class__.__name__}("]
+ for filter_type in ("predicate_filter", "true_filter", "false_filter"):
+ filter_ = getattr(self, filter_type)
+ if filter_ is None:
+ continue
+ # add the new filter set, adding indentations for readability
+ filter_lines = f"{filter_type}={filter_},".splitlines()
+ output.extend(f" {line}" for line in filter_lines)
+ output.append(")")
+ return "\n".join(output)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py b/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py
new file mode 100644
index 000000000000..1757297bcbeb
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py
@@ -0,0 +1,64 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Class for encryption info for tables and backups."""
+
+from google.cloud.bigtable.error import Status
+
+
+class EncryptionInfo:
+ """Encryption information for a given resource.
+
+ If this resource is protected with customer managed encryption, the in-use Google
+ Cloud Key Management Service (KMS) key versions will be specified along with their
+ status.
+
+ :type encryption_type: int
+ :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType`
+
+ :type encryption_status: google.cloud.bigtable.encryption.Status
+ :param encryption_status: The encryption status.
+
+ :type kms_key_version: str
+ :param kms_key_version: The key version used for encryption.
+ """
+
+ @classmethod
+ def _from_pb(cls, info_pb):
+ return cls(
+ info_pb.encryption_type,
+ Status(info_pb.encryption_status),
+ info_pb.kms_key_version,
+ )
+
+ def __init__(self, encryption_type, encryption_status, kms_key_version):
+ self.encryption_type = encryption_type
+ self.encryption_status = encryption_status
+ self.kms_key_version = kms_key_version
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+
+ if not isinstance(other, type(self)):
+ return NotImplemented
+
+ return (
+ self.encryption_type == other.encryption_type
+ and self.encryption_status == other.encryption_status
+ and self.kms_key_version == other.kms_key_version
+ )
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py
new file mode 100644
index 000000000000..327b2f828c3b
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py
@@ -0,0 +1,223 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Wrappers for gapic enum types."""
+
+from google.cloud.bigtable_admin_v2.types import common
+from google.cloud.bigtable_admin_v2.types import instance
+from google.cloud.bigtable_admin_v2.types import table
+
+
+class StorageType(object):
+ """
+ Storage media types for persisting Bigtable data.
+
+ Attributes:
+ UNSPECIFIED (int): The user did not specify a storage type.
+ SSD (int): Flash (SSD) storage should be used.
+ HDD (int): Magnetic drive (HDD) storage should be used.
+ """
+
+ UNSPECIFIED = common.StorageType.STORAGE_TYPE_UNSPECIFIED
+ SSD = common.StorageType.SSD
+ HDD = common.StorageType.HDD
+
+
+class Instance(object):
+ class State(object):
+ """
+ Possible states of an instance.
+
+ Attributes:
+ STATE_NOT_KNOWN (int): The state of the instance could not be
+ determined.
+ READY (int): The instance has been successfully created and can
+ serve requests to its tables.
+ CREATING (int): The instance is currently being created, and may be
+ destroyed if the creation process encounters an error.
+ """
+
+ NOT_KNOWN = instance.Instance.State.STATE_NOT_KNOWN
+ READY = instance.Instance.State.READY
+ CREATING = instance.Instance.State.CREATING
+
+ class Type(object):
+ """
+ The type of the instance.
+
+ Attributes:
+ UNSPECIFIED (int): The type of the instance is unspecified.
+ If set when creating an instance, a ``PRODUCTION`` instance will
+ be created. If set when updating an instance, the type will be
+ left unchanged.
+ PRODUCTION (int): An instance meant for production use.
+ ``serve_nodes`` must be set on the cluster.
+ DEVELOPMENT (int): The instance is meant for development and testing
+ purposes only; it has no performance or uptime guarantees and is not
+ covered by SLA.
+ After a development instance is created, it can be upgraded by
+ updating the instance to type ``PRODUCTION``. An instance created
+ as a production instance cannot be changed to a development instance.
+ When creating a development instance, ``serve_nodes`` on the cluster
+ must not be set.
+ """
+
+ UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED
+ PRODUCTION = instance.Instance.Type.PRODUCTION
+ DEVELOPMENT = instance.Instance.Type.DEVELOPMENT
+
+
+class Cluster(object):
+ class State(object):
+ """
+ Possible states of a cluster.
+
+ Attributes:
+ NOT_KNOWN (int): The state of the cluster could not be determined.
+ READY (int): The cluster has been successfully created and is ready
+ to serve requests.
+ CREATING (int): The cluster is currently being created, and may be
+ destroyed if the creation process encounters an error.
+ A cluster may not be able to serve requests while being created.
+ RESIZING (int): The cluster is currently being resized, and may
+ revert to its previous node count if the process encounters an error.
+ A cluster is still capable of serving requests while being resized,
+ but may exhibit performance as if its number of allocated nodes is
+ between the starting and requested states.
+ DISABLED (int): The cluster has no backing nodes. The data (tables)
+ still exist, but no operations can be performed on the cluster.
+ """
+
+ NOT_KNOWN = instance.Cluster.State.STATE_NOT_KNOWN
+ READY = instance.Cluster.State.READY
+ CREATING = instance.Cluster.State.CREATING
+ RESIZING = instance.Cluster.State.RESIZING
+ DISABLED = instance.Cluster.State.DISABLED
+
+
+class RoutingPolicyType(object):
+ """
+ The type of the routing policy for app_profile.
+
+ Attributes:
+ ANY (int): Read/write requests may be routed to any cluster in the
+ instance, and will fail over to another cluster in the event of
+ transient errors or delays.
+ Choosing this option sacrifices read-your-writes consistency to
+ improve availability.
+ See
+ https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
+
+ SINGLE (int): Unconditionally routes all read/write requests to a
+ specific cluster.
+ This option preserves read-your-writes consistency, but does not improve
+ availability.
+ See
+ https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting
+ """
+
+ ANY = 1
+ SINGLE = 2
+
+
+class Table(object):
+ class View(object):
+ """
+ Defines a view over a table's fields.
+
+ Attributes:
+ VIEW_UNSPECIFIED (int): Uses the default view for each method
+ as documented in its request.
+ NAME_ONLY (int): Only populates ``name``.
+ SCHEMA_VIEW (int): Only populates ``name`` and fields related
+ to the table's schema.
+ REPLICATION_VIEW (int): This is a private alpha release of
+ Cloud Bigtable replication. This feature is not currently available
+ to most Cloud Bigtable customers. This feature might be changed in
+ backward-incompatible ways and is not recommended for production use.
+ It is not subject to any SLA or deprecation policy.
+
+ Only populates ``name`` and fields related to the table's
+ replication state.
+ FULL (int): Populates all fields.
+ """
+
+ VIEW_UNSPECIFIED = table.Table.View.VIEW_UNSPECIFIED
+ NAME_ONLY = table.Table.View.NAME_ONLY
+ SCHEMA_VIEW = table.Table.View.SCHEMA_VIEW
+ REPLICATION_VIEW = table.Table.View.REPLICATION_VIEW
+ ENCRYPTION_VIEW = table.Table.View.ENCRYPTION_VIEW
+ FULL = table.Table.View.FULL
+
+ class ReplicationState(object):
+ """
+ Table replication states.
+
+ Attributes:
+ STATE_NOT_KNOWN (int): The replication state of the table is unknown
+ in this cluster.
+ INITIALIZING (int): The cluster was recently created, and the table
+ must finish copying
+ over pre-existing data from other clusters before it can begin
+ receiving live replication updates and serving
+ ``Data API`` requests.
+ PLANNED_MAINTENANCE (int): The table is temporarily unable to serve
+ ``Data API`` requests from this
+ cluster due to planned internal maintenance.
+ UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve
+ ``Data API`` requests from this
+ cluster due to unplanned or emergency maintenance.
+ READY (int): The table can serve
+ ``Data API`` requests from this
+ cluster. Depending on replication delay, reads may not immediately
+ reflect the state of the table in other clusters.
+ """
+
+ STATE_NOT_KNOWN = table.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN
+ INITIALIZING = table.Table.ClusterState.ReplicationState.INITIALIZING
+ PLANNED_MAINTENANCE = (
+ table.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE
+ )
+ UNPLANNED_MAINTENANCE = (
+ table.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE
+ )
+ READY = table.Table.ClusterState.ReplicationState.READY
+
+
+class EncryptionInfo:
+ class EncryptionType:
+ """Possible encryption types for a resource.
+
+ Attributes:
+ ENCRYPTION_TYPE_UNSPECIFIED (int): Encryption type was not specified, though
+ data at rest remains encrypted.
+ GOOGLE_DEFAULT_ENCRYPTION (int): The data backing this resource is encrypted
+ at rest with a key that is fully managed by Google. No key version or
+ status will be populated. This is the default state.
+ CUSTOMER_MANAGED_ENCRYPTION (int): The data backing this resource is
+ encrypted at rest with a key that is managed by the customer. The in-use
+ version of the key and its status are populated for CMEK-protected
+ tables. CMEK-protected backups are pinned to the key version that was in
+ use at the time the backup was taken. This key version is populated but
+ its status is not tracked and is reported as `UNKNOWN`.
+ """
+
+ ENCRYPTION_TYPE_UNSPECIFIED = (
+ table.EncryptionInfo.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED
+ )
+ GOOGLE_DEFAULT_ENCRYPTION = (
+ table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
+ )
+ CUSTOMER_MANAGED_ENCRYPTION = (
+ table.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/error.py b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py
new file mode 100644
index 000000000000..075bb01ccd04
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py
@@ -0,0 +1,64 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Class for error status."""
+
+
+class Status:
+ """A status, comprising a code and a message.
+
+ See: `Cloud APIs Errors `_
+
+ This is a thin wrapper for ``google.rpc.status_pb2.Status``.
+
+ :type status_pb: google.rpc.status_pb2.Status
+ :param status_pb: The status protocol buffer.
+ """
+
+ def __init__(self, status_pb):
+ self.status_pb = status_pb
+
+ @property
+ def code(self):
+ """The status code.
+
+ Values are defined in ``google.rpc.code_pb2.Code``.
+
+ See: `google.rpc.Code
+ `_
+
+ :rtype: int
+ :returns: The status code.
+ """
+ return self.status_pb.code
+
+ @property
+ def message(self):
+ """A human readable status message.
+
+ :rypte: str
+ :returns: The status message.
+ """
+ return self.status_pb.message
+
+ def __repr__(self):
+ return repr(self.status_pb)
+
+ def __eq__(self, other):
+ if isinstance(other, type(self)):
+ return self.status_pb == other.status_pb
+ return NotImplemented
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py
new file mode 100644
index 000000000000..4800b05591a5
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+__version__ = "2.34.0" # {x-release-please-version}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py
new file mode 100644
index 000000000000..78af430892fc
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py
@@ -0,0 +1,31 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TypeVar, Iterable, Generator, Tuple
+
+from itertools import islice
+
+T = TypeVar("T")
+
+
+# batched landed in standard library in Python 3.11.
+def batched(iterable: Iterable[T], n) -> Generator[Tuple[T, ...], None, None]:
+ # batched('ABCDEFG', 3) → ABC DEF G
+ if n < 1:
+ raise ValueError("n must be at least one")
+ it = iter(iterable)
+ batch = tuple(islice(it, n))
+ while batch:
+ yield batch
+ batch = tuple(islice(it, n))
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py
new file mode 100644
index 000000000000..23fb1c95dece
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py
@@ -0,0 +1,789 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-friendly container for Google Cloud Bigtable Instance."""
+
+import re
+
+from google.cloud.bigtable.app_profile import AppProfile
+from google.cloud.bigtable.cluster import Cluster
+from google.cloud.bigtable.table import Table
+
+from google.protobuf import field_mask_pb2
+
+from google.cloud.bigtable_admin_v2.types import instance
+
+from google.iam.v1 import options_pb2 # type: ignore
+
+from google.api_core.exceptions import NotFound
+
+from google.cloud.bigtable.policy import Policy
+
+import warnings
+
+
+_INSTANCE_NAME_RE = re.compile(
+ r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$"
+)
+
+_INSTANCE_CREATE_WARNING = """
+Use of `instance.create({0}, {1}, {2})` will be deprecated.
+Please replace with
+`cluster = instance.cluster({0}, {1}, {2})`
+`instance.create(clusters=[cluster])`."""
+
+
+class Instance(object):
+ """Representation of a Google Cloud Bigtable Instance.
+
+ We can use an :class:`Instance` to:
+
+ * :meth:`reload` itself
+ * :meth:`create` itself
+ * :meth:`update` itself
+ * :meth:`delete` itself
+
+ .. note::
+
+ For now, we leave out the ``default_storage_type`` (an enum)
+ which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`.
+
+ :type instance_id: str
+ :param instance_id: The ID of the instance.
+
+ :type client: :class:`Client `
+ :param client: The client that owns the instance. Provides
+ authorization and a project ID.
+
+ :type display_name: str
+ :param display_name: (Optional) The display name for the instance in the
+ Cloud Console UI. (Must be between 4 and 30
+ characters.) If this value is not set in the
+ constructor, will fall back to the instance ID.
+
+ :type instance_type: int
+ :param instance_type: (Optional) The type of the instance.
+ Possible values are represented
+ by the following constants:
+ :data:`google.cloud.bigtable.enums.Instance.Type.PRODUCTION`.
+ :data:`google.cloud.bigtable.enums.Instance.Type.DEVELOPMENT`,
+ Defaults to
+ :data:`google.cloud.bigtable.enums.Instance.Type.UNSPECIFIED`.
+
+ :type labels: dict
+ :param labels: (Optional) Labels are a flexible and lightweight
+ mechanism for organizing cloud resources into groups
+ that reflect a customer's organizational needs and
+ deployment strategies. They can be used to filter
+ resources and aggregate metrics. Label keys must be
+ between 1 and 63 characters long. Maximum 64 labels can
+ be associated with a given resource. Label values must
+ be between 0 and 63 characters long. Keys and values
+ must both be under 128 bytes.
+
+ :type _state: int
+ :param _state: (`OutputOnly`)
+ The current state of the instance.
+ Possible values are represented by the following constants:
+ :data:`google.cloud.bigtable.enums.Instance.State.STATE_NOT_KNOWN`.
+ :data:`google.cloud.bigtable.enums.Instance.State.READY`.
+ :data:`google.cloud.bigtable.enums.Instance.State.CREATING`.
+ """
+
+ def __init__(
+ self,
+ instance_id,
+ client,
+ display_name=None,
+ instance_type=None,
+ labels=None,
+ _state=None,
+ ):
+ self.instance_id = instance_id
+ self._client = client
+ self.display_name = display_name or instance_id
+ self.type_ = instance_type
+ self.labels = labels
+ self._state = _state
+
+ def _update_from_pb(self, instance_pb):
+ """Refresh self from the server-provided protobuf.
+ Helper for :meth:`from_pb` and :meth:`reload`.
+ """
+ if not instance_pb.display_name: # Simple field (string)
+ raise ValueError("Instance protobuf does not contain display_name")
+ self.display_name = instance_pb.display_name
+ self.type_ = instance_pb.type_
+ self.labels = dict(instance_pb.labels)
+ self._state = instance_pb.state
+
+ @classmethod
+ def from_pb(cls, instance_pb, client):
+ """Creates an instance instance from a protobuf.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_instance_from_pb]
+ :end-before: [END bigtable_api_instance_from_pb]
+ :dedent: 4
+
+ :type instance_pb: :class:`instance.Instance`
+ :param instance_pb: An instance protobuf object.
+
+ :type client: :class:`Client `
+ :param client: The client that owns the instance.
+
+ :rtype: :class:`Instance`
+ :returns: The instance parsed from the protobuf response.
+ :raises: :class:`ValueError ` if the instance
+ name does not match
+ ``projects/{project}/instances/{instance_id}``
+ or if the parsed project ID does not match the project ID
+ on the client.
+ """
+ match = _INSTANCE_NAME_RE.match(instance_pb.name)
+ if match is None:
+ raise ValueError(
+ "Instance protobuf name was not in the " "expected format.",
+ instance_pb.name,
+ )
+ if match.group("project") != client.project:
+ raise ValueError(
+ "Project ID on instance does not match the " "project ID on the client"
+ )
+ instance_id = match.group("instance_id")
+
+ result = cls(instance_id, client)
+ result._update_from_pb(instance_pb)
+ return result
+
+ @property
+ def name(self):
+ """Instance name used in requests.
+
+ .. note::
+ This property will not change if ``instance_id`` does not,
+ but the return value is not cached.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_instance_name]
+ :end-before: [END bigtable_api_instance_name]
+ :dedent: 4
+
+ The instance name is of the form
+
+ ``"projects/{project}/instances/{instance_id}"``
+
+ :rtype: str
+ :returns: Return a fully-qualified instance string.
+ """
+ return self._client.instance_admin_client.instance_path(
+ project=self._client.project, instance=self.instance_id
+ )
+
+ @property
+ def state(self):
+ """google.cloud.bigtable.enums.Instance.State: state of Instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_instance_state]
+ :end-before: [END bigtable_api_instance_state]
+ :dedent: 4
+
+ """
+ return self._state
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ # NOTE: This does not compare the configuration values, such as
+ # the display_name. Instead, it only compares
+ # identifying values instance ID and client. This is
+ # intentional, since the same instance can be in different states
+ # if not synchronized. Instances with similar instance
+ # settings but different clients can't be used in the same way.
+ return other.instance_id == self.instance_id and other._client == self._client
+
+ def __ne__(self, other):
+ return not self == other
+
+ def create(
+ self,
+ location_id=None,
+ serve_nodes=None,
+ default_storage_type=None,
+ clusters=None,
+ min_serve_nodes=None,
+ max_serve_nodes=None,
+ cpu_utilization_percent=None,
+ ):
+ """Create this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_prod_instance]
+ :end-before: [END bigtable_api_create_prod_instance]
+ :dedent: 4
+
+ .. note::
+
+ Uses the ``project`` and ``instance_id`` on the current
+ :class:`Instance` in addition to the ``display_name``.
+ To change them before creating, reset the values via
+
+ .. code:: python
+
+ instance.display_name = 'New display name'
+ instance.instance_id = 'i-changed-my-mind'
+
+ before calling :meth:`create`.
+
+ :type location_id: str
+ :param location_id: (Creation Only) The location where nodes and
+ storage of the cluster owned by this instance
+ reside. For best performance, clients should be
+ located as close as possible to cluster's location.
+ For list of supported locations refer to
+ https://cloud.google.com/bigtable/docs/locations
+
+
+ :type serve_nodes: int
+ :param serve_nodes: (Optional) The number of nodes in the instance's
+ cluster; used to set up the instance's cluster.
+
+ :type default_storage_type: int
+ :param default_storage_type: (Optional) The storage media type for
+ persisting Bigtable data.
+ Possible values are represented
+ by the following constants:
+ :data:`google.cloud.bigtable.enums.StorageType.SSD`.
+ :data:`google.cloud.bigtable.enums.StorageType.HDD`,
+ Defaults to
+ :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`.
+
+ :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]`
+ :param clusters: List of clusters to be created.
+
+ :rtype: :class:`~google.api_core.operation.Operation`
+ :returns: The long-running operation corresponding to the create
+ operation.
+
+ :raises: :class:`ValueError ` if both
+ ``clusters`` and one of ``location_id``, ``serve_nodes``
+ and ``default_storage_type`` are set.
+ """
+
+ if clusters is None:
+ warnings.warn(
+ _INSTANCE_CREATE_WARNING.format(
+ "location_id", "serve_nodes", "default_storage_type"
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ cluster_id = "{}-cluster".format(self.instance_id)
+
+ clusters = [
+ self.cluster(
+ cluster_id,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ default_storage_type=default_storage_type,
+ min_serve_nodes=None,
+ max_serve_nodes=None,
+ cpu_utilization_percent=None,
+ )
+ ]
+ elif (
+ location_id is not None
+ or serve_nodes is not None
+ or default_storage_type is not None
+ or min_serve_nodes is not None
+ or max_serve_nodes is not None
+ or cpu_utilization_percent is not None
+ ):
+ raise ValueError(
+ "clusters and one of location_id, serve_nodes, \
+ default_storage_type can not be set \
+ simultaneously."
+ )
+
+ instance_pb = instance.Instance(
+ display_name=self.display_name, type_=self.type_, labels=self.labels
+ )
+
+ parent = self._client.project_path
+
+ return self._client.instance_admin_client.create_instance(
+ request={
+ "parent": parent,
+ "instance_id": self.instance_id,
+ "instance": instance_pb,
+ "clusters": {c.cluster_id: c._to_pb() for c in clusters},
+ }
+ )
+
+ def exists(self):
+ """Check whether the instance already exists.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_check_instance_exists]
+ :end-before: [END bigtable_api_check_instance_exists]
+ :dedent: 4
+
+ :rtype: bool
+ :returns: True if the table exists, else False.
+ """
+ try:
+ self._client.instance_admin_client.get_instance(request={"name": self.name})
+ return True
+ # NOTE: There could be other exceptions that are returned to the user.
+ except NotFound:
+ return False
+
+ def reload(self):
+ """Reload the metadata for this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_reload_instance]
+ :end-before: [END bigtable_api_reload_instance]
+ :dedent: 4
+ """
+ instance_pb = self._client.instance_admin_client.get_instance(
+ request={"name": self.name}
+ )
+
+ # NOTE: _update_from_pb does not check that the project and
+ # instance ID on the response match the request.
+ self._update_from_pb(instance_pb)
+
+ def update(self):
+ """Updates an instance within a project.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_update_instance]
+ :end-before: [END bigtable_api_update_instance]
+ :dedent: 4
+
+ .. note::
+
+ Updates any or all of the following values:
+ ``display_name``
+ ``type``
+ ``labels``
+ To change a value before
+ updating, assign that values via
+
+ .. code:: python
+
+ instance.display_name = 'New display name'
+
+ before calling :meth:`update`.
+
+ :rtype: :class:`~google.api_core.operation.Operation`
+ :returns: The long-running operation corresponding to the update
+ operation.
+ """
+ update_mask_pb = field_mask_pb2.FieldMask()
+ if self.display_name is not None:
+ update_mask_pb.paths.append("display_name")
+ if self.type_ is not None:
+ update_mask_pb.paths.append("type")
+ if self.labels is not None:
+ update_mask_pb.paths.append("labels")
+ instance_pb = instance.Instance(
+ name=self.name,
+ display_name=self.display_name,
+ type_=self.type_,
+ labels=self.labels,
+ )
+
+ return self._client.instance_admin_client.partial_update_instance(
+ request={"instance": instance_pb, "update_mask": update_mask_pb}
+ )
+
+ def delete(self):
+ """Delete this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_delete_instance]
+ :end-before: [END bigtable_api_delete_instance]
+ :dedent: 4
+
+ Marks an instance and all of its tables for permanent deletion
+ in 7 days.
+
+ Immediately upon completion of the request:
+
+ * Billing will cease for all of the instance's reserved resources.
+ * The instance's ``delete_time`` field will be set 7 days in
+ the future.
+
+ Soon afterward:
+
+ * All tables within the instance will become unavailable.
+
+ At the instance's ``delete_time``:
+
+ * The instance and **all of its tables** will immediately and
+ irrevocably disappear from the API, and their data will be
+ permanently deleted.
+ """
+ self._client.instance_admin_client.delete_instance(request={"name": self.name})
+
+ def get_iam_policy(self, requested_policy_version=None):
+ """Gets the access control policy for an instance resource.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_get_iam_policy]
+ :end-before: [END bigtable_api_get_iam_policy]
+ :dedent: 4
+
+ :type requested_policy_version: int or ``NoneType``
+ :param requested_policy_version: Optional. The version of IAM policies to request.
+ If a policy with a condition is requested without
+ setting this, the server will return an error.
+ This must be set to a value of 3 to retrieve IAM
+ policies containing conditions. This is to prevent
+ client code that isn't aware of IAM conditions from
+ interpreting and modifying policies incorrectly.
+ The service might return a policy with version lower
+ than the one that was requested, based on the
+ feature syntax in the policy fetched.
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this instance
+ """
+ args = {"resource": self.name}
+ if requested_policy_version is not None:
+ args["options_"] = options_pb2.GetPolicyOptions(
+ requested_policy_version=requested_policy_version
+ )
+
+ instance_admin_client = self._client.instance_admin_client
+
+ resp = instance_admin_client.get_iam_policy(request=args)
+ return Policy.from_pb(resp)
+
+ def set_iam_policy(self, policy):
+ """Sets the access control policy on an instance resource. Replaces any
+ existing policy.
+
+ For more information about policy, please see documentation of
+ class `google.cloud.bigtable.policy.Policy`
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_set_iam_policy]
+ :end-before: [END bigtable_api_set_iam_policy]
+ :dedent: 4
+
+ :type policy: :class:`google.cloud.bigtable.policy.Policy`
+ :param policy: A new IAM policy to replace the current IAM policy
+ of this instance
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this instance.
+ """
+ instance_admin_client = self._client.instance_admin_client
+ resp = instance_admin_client.set_iam_policy(
+ request={"resource": self.name, "policy": policy.to_pb()}
+ )
+ return Policy.from_pb(resp)
+
+ def test_iam_permissions(self, permissions):
+ """Returns permissions that the caller has on the specified instance
+ resource.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_test_iam_permissions]
+ :end-before: [END bigtable_api_test_iam_permissions]
+ :dedent: 4
+
+ :type permissions: list
+ :param permissions: The set of permissions to check for
+ the ``resource``. Permissions with wildcards (such as '*'
+ or 'storage.*') are not allowed. For more information see
+ `IAM Overview
+ `_.
+ `Bigtable Permissions
+ `_.
+
+ :rtype: list
+ :returns: A List(string) of permissions allowed on the instance
+ """
+ instance_admin_client = self._client.instance_admin_client
+ resp = instance_admin_client.test_iam_permissions(
+ request={"resource": self.name, "permissions": permissions}
+ )
+ return list(resp.permissions)
+
+ def cluster(
+ self,
+ cluster_id,
+ location_id=None,
+ serve_nodes=None,
+ default_storage_type=None,
+ kms_key_name=None,
+ min_serve_nodes=None,
+ max_serve_nodes=None,
+ cpu_utilization_percent=None,
+ ):
+ """Factory to create a cluster associated with this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_cluster]
+ :end-before: [END bigtable_api_create_cluster]
+ :dedent: 4
+
+ :type cluster_id: str
+ :param cluster_id: The ID of the cluster.
+
+ :type location_id: str
+ :param location_id: (Creation Only) The location where this cluster's
+ nodes and storage reside. For best performance,
+ clients should be located as close as possible to
+ this cluster.
+ For list of supported locations refer to
+ https://cloud.google.com/bigtable/docs/locations
+
+ :type serve_nodes: int
+ :param serve_nodes: (Optional) The number of nodes in the cluster.
+
+ :type default_storage_type: int
+ :param default_storage_type: (Optional) The type of storage
+ Possible values are represented by the
+ following constants:
+ :data:`google.cloud.bigtable.enums.StorageType.SSD`.
+ :data:`google.cloud.bigtable.enums.StorageType.HDD`,
+ Defaults to
+ :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`.
+
+ :rtype: :class:`~google.cloud.bigtable.instance.Cluster`
+ :returns: a cluster owned by this instance.
+
+ :type kms_key_name: str
+ :param kms_key_name: (Optional, Creation Only) The name of the KMS customer
+ managed encryption key (CMEK) to use for at-rest encryption
+ of data in this cluster. If omitted, Google's default
+ encryption will be used. If specified, the requirements for
+ this key are:
+
+ 1) The Cloud Bigtable service account associated with the
+ project that contains the cluster must be granted the
+ ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the
+ CMEK.
+ 2) Only regional keys can be used and the region of the
+ CMEK key must match the region of the cluster.
+ 3) All clusters within an instance must use the same CMEK
+ key.
+ """
+ return Cluster(
+ cluster_id,
+ self,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ default_storage_type=default_storage_type,
+ kms_key_name=kms_key_name,
+ min_serve_nodes=min_serve_nodes,
+ max_serve_nodes=max_serve_nodes,
+ cpu_utilization_percent=cpu_utilization_percent,
+ )
+
+ def list_clusters(self):
+ """List the clusters in this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_list_clusters_on_instance]
+ :end-before: [END bigtable_api_list_clusters_on_instance]
+ :dedent: 4
+
+ :rtype: tuple
+ :returns:
+ (clusters, failed_locations), where 'clusters' is list of
+ :class:`google.cloud.bigtable.instance.Cluster`, and
+ 'failed_locations' is a list of locations which could not
+ be resolved.
+ """
+ resp = self._client.instance_admin_client.list_clusters(
+ request={"parent": self.name}
+ )
+ clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters]
+ return clusters, resp.failed_locations
+
+ def table(self, table_id, mutation_timeout=None, app_profile_id=None):
+ """Factory to create a table associated with this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_table]
+ :end-before: [END bigtable_api_create_table]
+ :dedent: 4
+
+ :type table_id: str
+ :param table_id: The ID of the table.
+
+ :type mutation_timeout: int
+ :param mutation_timeout: (Optional) The overriding mutation timeout.
+
+ :type app_profile_id: str
+ :param app_profile_id: (Optional) The unique name of the AppProfile.
+
+ :rtype: :class:`Table `
+ :returns: The table owned by this instance.
+ """
+ return Table(
+ table_id,
+ self,
+ app_profile_id=app_profile_id,
+ mutation_timeout=mutation_timeout,
+ )
+
+ def list_tables(self):
+ """List the tables in this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_list_tables]
+ :end-before: [END bigtable_api_list_tables]
+ :dedent: 4
+
+ :rtype: list of :class:`Table `
+ :returns: The list of tables owned by the instance.
+ :raises: :class:`ValueError ` if one of the
+ returned tables has a name that is not of the expected format.
+ """
+ table_list_pb = self._client.table_admin_client.list_tables(
+ request={"parent": self.name}
+ )
+
+ result = []
+ for table_pb in table_list_pb.tables:
+ table_prefix = self.name + "/tables/"
+ if not table_pb.name.startswith(table_prefix):
+ raise ValueError(
+ "Table name {} not of expected format".format(table_pb.name)
+ )
+ table_id = table_pb.name[len(table_prefix) :]
+ result.append(self.table(table_id))
+
+ return result
+
+ def app_profile(
+ self,
+ app_profile_id,
+ routing_policy_type=None,
+ description=None,
+ cluster_id=None,
+ multi_cluster_ids=None,
+ allow_transactional_writes=None,
+ ):
+ """Factory to create AppProfile associated with this instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_create_app_profile]
+ :end-before: [END bigtable_api_create_app_profile]
+ :dedent: 4
+
+ :type app_profile_id: str
+ :param app_profile_id: The ID of the AppProfile. Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type: routing_policy_type: int
+ :param: routing_policy_type: The type of the routing policy.
+ Possible values are represented
+ by the following constants:
+ :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY`
+ :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE`
+
+ :type: description: str
+ :param: description: (Optional) Long form description of the use
+ case for this AppProfile.
+
+ :type: cluster_id: str
+ :param: cluster_id: (Optional) Unique cluster_id which is only required
+ when routing_policy_type is
+ ROUTING_POLICY_TYPE_SINGLE.
+
+ :type: multi_cluster_ids: list
+ :param: multi_cluster_ids: (Optional) The set of clusters to route to.
+ The order is ignored; clusters will be tried in order of distance.
+ If left empty, all clusters are eligible.
+
+ :type: allow_transactional_writes: bool
+ :param: allow_transactional_writes: (Optional) If true, allow
+ transactional writes for
+ ROUTING_POLICY_TYPE_SINGLE.
+
+ :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>`
+ :returns: AppProfile for this instance.
+ """
+ return AppProfile(
+ app_profile_id,
+ self,
+ routing_policy_type=routing_policy_type,
+ description=description,
+ cluster_id=cluster_id,
+ multi_cluster_ids=multi_cluster_ids,
+ allow_transactional_writes=allow_transactional_writes,
+ )
+
+ def list_app_profiles(self):
+ """Lists information about AppProfiles in an instance.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_list_app_profiles]
+ :end-before: [END bigtable_api_list_app_profiles]
+ :dedent: 4
+
+ :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`]
+ :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`].
+ By default, this is a list of
+ :class:`~google.cloud.bigtable.app_profile.AppProfile`
+ instances.
+ """
+ resp = self._client.instance_admin_client.list_app_profiles(
+ request={"parent": self.name}
+ )
+ return [AppProfile.from_pb(app_profile, self) for app_profile in resp]
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py
new file mode 100644
index 000000000000..8396642fb23c
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py
@@ -0,0 +1,255 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+
+from google.api_core.iam import Policy as BasePolicy
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+
+"""IAM roles supported by Bigtable Instance resource"""
+BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin"
+"""Administers all instances within a project, including the data stored
+within tables. Can create new instances. Intended for project administrators.
+"""
+BIGTABLE_USER_ROLE = "roles/bigtable.user"
+"""Provides read-write access to the data stored within tables. Intended for
+application developers or service accounts.
+"""
+BIGTABLE_READER_ROLE = "roles/bigtable.reader"
+"""Provides read-only access to the data stored within tables. Intended for
+data scientists, dashboard generators, and other data-analysis scenarios.
+"""
+BIGTABLE_VIEWER_ROLE = "roles/bigtable.viewer"
+"""Provides no data access. Intended as a minimal set of permissions to access
+the GCP Console for Cloud Bigtable.
+"""
+"""For detailed information
+See
+https://cloud.google.com/bigtable/docs/access-control#roles
+"""
+
+
+class Policy(BasePolicy):
+ """IAM Policy
+
+ See
+ https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.iam.v1#policy
+
+ A Policy consists of a list of bindings. A binding binds a list of
+ members to a role, where the members can be user accounts, Google
+ groups, Google domains, and service accounts. A role is a named list
+ of permissions defined by IAM.
+ For more information about predefined roles currently supoprted
+ by Bigtable Instance please see
+ `Predefined roles
+ `_.
+ For more information about custom roles please see
+ `Custom roles
+ `_.
+
+ :type etag: str
+ :param etag: etag is used for optimistic concurrency control as a way to
+ help prevent simultaneous updates of a policy from overwriting
+ each other. It is strongly suggested that systems make use
+ of the etag in the read-modify-write cycle to perform policy
+ updates in order to avoid race conditions:
+ An etag is returned in the response to getIamPolicy, and
+ systems are expected to put that etag in the request to
+ setIamPolicy to ensure that their change will be applied to
+ the same version of the policy.
+
+ If no etag is provided in the call to setIamPolicy, then the
+ existing policy is overwritten blindly.
+ :type version: int
+ :param version: The syntax schema version of the policy.
+
+ Note:
+ Using conditions in bindings requires the policy's version to be set
+ to `3` or greater, depending on the versions that are currently supported.
+
+ Accessing the policy using dict operations will raise InvalidOperationException
+ when the policy's version is set to 3.
+
+ Use the policy.bindings getter/setter to retrieve and modify the policy's bindings.
+
+ See:
+ IAM Policy https://cloud.google.com/iam/reference/rest/v1/Policy
+ Policy versions https://cloud.google.com/iam/docs/policies#versions
+ Conditions overview https://cloud.google.com/iam/docs/conditions-overview.
+ """
+
+ def __init__(self, etag=None, version=None):
+ BasePolicy.__init__(
+ self, etag=etag if etag is None else _to_bytes(etag), version=version
+ )
+
+ @property
+ def bigtable_admins(self):
+ """Access to bigtable.admin role memebers
+
+ Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_admins_policy]
+ :end-before: [END bigtable_api_admins_policy]
+ :dedent: 4
+ """
+ result = set()
+ for member in self.get(BIGTABLE_ADMIN_ROLE, ()):
+ result.add(member)
+ return frozenset(result)
+
+ @property
+ def bigtable_readers(self):
+ """Access to bigtable.reader role memebers
+
+ Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_readers_policy]
+ :end-before: [END bigtable_api_readers_policy]
+ :dedent: 4
+ """
+ result = set()
+ for member in self.get(BIGTABLE_READER_ROLE, ()):
+ result.add(member)
+ return frozenset(result)
+
+ @property
+ def bigtable_users(self):
+ """Access to bigtable.user role memebers
+
+ Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_users_policy]
+ :end-before: [END bigtable_api_users_policy]
+ :dedent: 4
+ """
+ result = set()
+ for member in self.get(BIGTABLE_USER_ROLE, ()):
+ result.add(member)
+ return frozenset(result)
+
+ @property
+ def bigtable_viewers(self):
+ """Access to bigtable.viewer role memebers
+
+ Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
+
+ For example:
+
+ .. literalinclude:: snippets.py
+ :start-after: [START bigtable_api_viewers_policy]
+ :end-before: [END bigtable_api_viewers_policy]
+ :dedent: 4
+ """
+ result = set()
+ for member in self.get(BIGTABLE_VIEWER_ROLE, ()):
+ result.add(member)
+ return frozenset(result)
+
+ @classmethod
+ def from_pb(cls, policy_pb):
+ """Factory: create a policy from a protobuf message.
+
+ Args:
+ policy_pb (google.iam.policy_pb2.Policy): message returned by
+ ``get_iam_policy`` gRPC API.
+
+ Returns:
+ :class:`Policy`: the parsed policy
+ """
+ policy = cls(policy_pb.etag, policy_pb.version)
+
+ policy.bindings = bindings = []
+ for binding_pb in policy_pb.bindings:
+ binding = {"role": binding_pb.role, "members": set(binding_pb.members)}
+ condition = binding_pb.condition
+ if condition and condition.expression:
+ binding["condition"] = {
+ "title": condition.title,
+ "description": condition.description,
+ "expression": condition.expression,
+ }
+ bindings.append(binding)
+
+ return policy
+
+ def to_pb(self):
+ """Render a protobuf message.
+
+ Returns:
+ google.iam.policy_pb2.Policy: a message to be passed to the
+ ``set_iam_policy`` gRPC API.
+ """
+
+ return policy_pb2.Policy(
+ etag=self.etag,
+ version=self.version or 0,
+ bindings=[
+ policy_pb2.Binding(
+ role=binding["role"],
+ members=sorted(binding["members"]),
+ condition=binding.get("condition"),
+ )
+ for binding in self.bindings
+ if binding["members"]
+ ],
+ )
+
+ @classmethod
+ def from_api_repr(cls, resource):
+ """Factory: create a policy from a JSON resource.
+
+ Overrides the base class version to store :attr:`etag` as bytes.
+
+ Args:
+ resource (dict): JSON policy resource returned by the
+ ``getIamPolicy`` REST API.
+
+ Returns:
+ :class:`Policy`: the parsed policy
+ """
+ etag = resource.get("etag")
+
+ if etag is not None:
+ resource = resource.copy()
+ resource["etag"] = base64.b64decode(etag.encode("ascii"))
+
+ return super(Policy, cls).from_api_repr(resource)
+
+ def to_api_repr(self):
+ """Render a JSON policy resource.
+
+ Overrides the base class version to convert :attr:`etag` from bytes
+ to JSON-compatible base64-encoded text.
+
+ Returns:
+ dict: a JSON resource to be passed to the
+ ``setIamPolicy`` REST API.
+ """
+ resource = super(Policy, self).to_api_repr()
+
+ if self.etag is not None:
+ resource["etag"] = base64.b64encode(self.etag).decode("ascii")
+
+ return resource
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed
new file mode 100644
index 000000000000..889d34043118
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-bigtable package uses inline types.
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py
new file mode 100644
index 000000000000..752458a08a79
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py
@@ -0,0 +1,1267 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-friendly container for Google Cloud Bigtable Row."""
+
+
+import struct
+
+from google.cloud._helpers import _datetime_from_microseconds # type: ignore
+from google.cloud._helpers import _microseconds_from_datetime # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.cloud.bigtable_v2.types import data as data_v2_pb2
+
+
+_PACK_I64 = struct.Struct(">q").pack
+
+MAX_MUTATIONS = 100000
+"""The maximum number of mutations that a row can accumulate."""
+
+_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row."
+_MISSING_COLUMN = (
+ "Column {} is not among the cells stored in this row in the column family {}."
+)
+_MISSING_INDEX = (
+ "Index {!r} is not valid for the cells stored in this row for column {} "
+ "in the column family {}. There are {} such cells."
+)
+
+
+class Row(object):
+ """Base representation of a Google Cloud Bigtable Row.
+
+ This class has three subclasses corresponding to the three
+ RPC methods for sending row mutations:
+
+ * :class:`DirectRow` for ``MutateRow``
+ * :class:`ConditionalRow` for ``CheckAndMutateRow``
+ * :class:`AppendRow` for ``ReadModifyWriteRow``
+
+ :type row_key: bytes
+ :param row_key: The key for the current row.
+
+ :type table: :class:`Table `
+ :param table: (Optional) The table that owns the row.
+ """
+
+ def __init__(self, row_key, table=None):
+ self._row_key = _to_bytes(row_key)
+ self._table = table
+
+ @property
+ def row_key(self):
+ """Row key.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_row_key]
+ :end-before: [END bigtable_api_row_row_key]
+ :dedent: 4
+
+ :rtype: bytes
+ :returns: The key for the current row.
+ """
+ return self._row_key
+
+ @property
+ def table(self):
+ """Row table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_table]
+ :end-before: [END bigtable_api_row_table]
+ :dedent: 4
+
+ :rtype: table: :class:`Table `
+ :returns: table: The table that owns the row.
+ """
+ return self._table
+
+
+class _SetDeleteRow(Row):
+ """Row helper for setting or deleting cell values.
+
+ Implements helper methods to add mutations to set or delete cell contents:
+
+ * :meth:`set_cell`
+ * :meth:`delete`
+ * :meth:`delete_cell`
+ * :meth:`delete_cells`
+
+ :type row_key: bytes
+ :param row_key: The key for the current row.
+
+ :type table: :class:`Table `
+ :param table: The table that owns the row.
+ """
+
+ ALL_COLUMNS = object()
+ """Sentinel value used to indicate all columns in a column family."""
+
+ def _get_mutations(self, state=None):
+ """Gets the list of mutations for a given state.
+
+ This method intended to be implemented by subclasses.
+
+ ``state`` may not need to be used by all subclasses.
+
+ :type state: bool
+ :param state: The state that the mutation should be
+ applied in.
+
+ :raises: :class:`NotImplementedError `
+ always.
+ """
+ raise NotImplementedError
+
+ def _set_cell(self, column_family_id, column, value, timestamp=None, state=None):
+ """Helper for :meth:`set_cell`
+
+ Adds a mutation to set the value in a specific cell.
+
+ ``state`` is unused by :class:`DirectRow` but is used by
+ subclasses.
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column.
+ Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family where the cell
+ is located.
+
+ :type value: bytes or :class:`int`
+ :param value: The value to set in the cell. If an integer is used,
+ will be interpreted as a 64-bit big-endian signed
+ integer (8 bytes).
+
+ :type timestamp: :class:`datetime.datetime`
+ :param timestamp: (Optional) The timestamp of the operation.
+
+ :type state: bool
+ :param state: (Optional) The state that is passed along to
+ :meth:`_get_mutations`.
+ """
+ column = _to_bytes(column)
+ if isinstance(value, int):
+ value = _PACK_I64(value)
+ value = _to_bytes(value)
+ if timestamp is None:
+ # Use -1 for current Bigtable server time.
+ timestamp_micros = -1
+ else:
+ timestamp_micros = _microseconds_from_datetime(timestamp)
+ # Truncate to millisecond granularity.
+ timestamp_micros -= timestamp_micros % 1000
+
+ mutation_val = data_v2_pb2.Mutation.SetCell(
+ family_name=column_family_id,
+ column_qualifier=column,
+ timestamp_micros=timestamp_micros,
+ value=value,
+ )
+ mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val)
+ self._get_mutations(state).append(mutation_pb)
+
+ def _delete(self, state=None):
+ """Helper for :meth:`delete`
+
+ Adds a delete mutation (for the entire row) to the accumulated
+ mutations.
+
+ ``state`` is unused by :class:`DirectRow` but is used by
+ subclasses.
+
+ :type state: bool
+ :param state: (Optional) The state that is passed along to
+ :meth:`_get_mutations`.
+ """
+ mutation_val = data_v2_pb2.Mutation.DeleteFromRow()
+ mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val)
+ self._get_mutations(state).append(mutation_pb)
+
+ def _delete_cells(self, column_family_id, columns, time_range=None, state=None):
+ """Helper for :meth:`delete_cell` and :meth:`delete_cells`.
+
+ ``state`` is unused by :class:`DirectRow` but is used by
+ subclasses.
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column
+ or columns with cells being deleted. Must be
+ of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type columns: :class:`list` of :class:`str` /
+ :func:`unicode `, or :class:`object`
+ :param columns: The columns within the column family that will have
+ cells deleted. If :attr:`ALL_COLUMNS` is used then
+ the entire column family will be deleted from the row.
+
+ :type time_range: :class:`TimestampRange`
+ :param time_range: (Optional) The range of time within which cells
+ should be deleted.
+
+ :type state: bool
+ :param state: (Optional) The state that is passed along to
+ :meth:`_get_mutations`.
+ """
+ mutations_list = self._get_mutations(state)
+ if columns is self.ALL_COLUMNS:
+ mutation_val = data_v2_pb2.Mutation.DeleteFromFamily(
+ family_name=column_family_id
+ )
+ mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val)
+ mutations_list.append(mutation_pb)
+ else:
+ delete_kwargs = {}
+ if time_range is not None:
+ delete_kwargs["time_range"] = time_range.to_pb()
+
+ to_append = []
+ for column in columns:
+ column = _to_bytes(column)
+ # time_range will never change if present, but the rest of
+ # delete_kwargs will
+ delete_kwargs.update(
+ family_name=column_family_id, column_qualifier=column
+ )
+ mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs)
+ mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val)
+ to_append.append(mutation_pb)
+
+ # We don't add the mutations until all columns have been
+ # processed without error.
+ mutations_list.extend(to_append)
+
+
+class DirectRow(_SetDeleteRow):
+ """Google Cloud Bigtable Row for sending "direct" mutations.
+
+ These mutations directly set or delete cell contents:
+
+ * :meth:`set_cell`
+ * :meth:`delete`
+ * :meth:`delete_cell`
+ * :meth:`delete_cells`
+
+ These methods can be used directly::
+
+ >>> row = table.row(b'row-key1')
+ >>> row.set_cell(u'fam', b'col1', b'cell-val')
+ >>> row.delete_cell(u'fam', b'col2')
+
+ .. note::
+
+ A :class:`DirectRow` accumulates mutations locally via the
+ :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and
+ :meth:`delete_cells` methods. To actually send these mutations to the
+ Google Cloud Bigtable API, you must call :meth:`commit`.
+
+ :type row_key: bytes
+ :param row_key: The key for the current row.
+
+ :type table: :class:`Table `
+ :param table: (Optional) The table that owns the row. This is
+ used for the :meth: `commit` only. Alternatively,
+ DirectRows can be persisted via
+ :meth:`~google.cloud.bigtable.table.Table.mutate_rows`.
+ """
+
+ def __init__(self, row_key, table=None):
+ super(DirectRow, self).__init__(row_key, table)
+ self._pb_mutations = []
+
+ def _get_mutations(self, state=None): # pylint: disable=unused-argument
+ """Gets the list of mutations for a given state.
+
+ ``state`` is unused by :class:`DirectRow` but is used by
+ subclasses.
+
+ :type state: bool
+ :param state: The state that the mutation should be
+ applied in.
+
+ :rtype: list
+ :returns: The list to add new mutations to (for the current state).
+ """
+ return self._pb_mutations
+
+ def get_mutations_size(self):
+ """Gets the total mutations size for current row
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_get_mutations_size]
+ :end-before: [END bigtable_api_row_get_mutations_size]
+ :dedent: 4
+ """
+
+ mutation_size = 0
+ for mutation in self._get_mutations():
+ mutation_size += mutation._pb.ByteSize()
+
+ return mutation_size
+
+ def set_cell(self, column_family_id, column, value, timestamp=None):
+ """Sets a value in this row.
+
+ The cell is determined by the ``row_key`` of this :class:`DirectRow`
+ and the ``column``. The ``column`` must be in an existing
+ :class:`.ColumnFamily` (as determined by ``column_family_id``).
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_set_cell]
+ :end-before: [END bigtable_api_row_set_cell]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column.
+ Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family where the cell
+ is located.
+
+ :type value: bytes or :class:`int`
+ :param value: The value to set in the cell. If an integer is used,
+ will be interpreted as a 64-bit big-endian signed
+ integer (8 bytes).
+
+ :type timestamp: :class:`datetime.datetime`
+ :param timestamp: (Optional) The timestamp of the operation.
+ """
+ self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None)
+
+ def delete(self):
+ """Deletes this row from the table.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete]
+ :end-before: [END bigtable_api_row_delete]
+ :dedent: 4
+ """
+ self._delete(state=None)
+
+ def delete_cell(self, column_family_id, column, time_range=None):
+ """Deletes cell in this row.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete_cell]
+ :end-before: [END bigtable_api_row_delete_cell]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column
+ or columns with cells being deleted. Must be
+ of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family that will have a
+ cell deleted.
+
+ :type time_range: :class:`TimestampRange`
+ :param time_range: (Optional) The range of time within which cells
+ should be deleted.
+ """
+ self._delete_cells(
+ column_family_id, [column], time_range=time_range, state=None
+ )
+
+ def delete_cells(self, column_family_id, columns, time_range=None):
+ """Deletes cells in this row.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete_cells]
+ :end-before: [END bigtable_api_row_delete_cells]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column
+ or columns with cells being deleted. Must be
+ of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type columns: :class:`list` of :class:`str` /
+ :func:`unicode `, or :class:`object`
+ :param columns: The columns within the column family that will have
+ cells deleted. If :attr:`ALL_COLUMNS` is used then
+ the entire column family will be deleted from the row.
+
+ :type time_range: :class:`TimestampRange`
+ :param time_range: (Optional) The range of time within which cells
+ should be deleted.
+ """
+ self._delete_cells(column_family_id, columns, time_range=time_range, state=None)
+
+ def commit(self):
+ """Makes a ``MutateRow`` API request.
+
+ If no mutations have been created in the row, no request is made.
+
+ Mutations are applied atomically and in order, meaning that earlier
+ mutations can be masked / negated by later ones. Cells already present
+ in the row are left unchanged unless explicitly changed by a mutation.
+
+ After committing the accumulated mutations, resets the local
+ mutations to an empty list.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_commit]
+ :end-before: [END bigtable_api_row_commit]
+ :dedent: 4
+
+ :rtype: :class:`~google.rpc.status_pb2.Status`
+ :returns: A response status (`google.rpc.status_pb2.Status`)
+ representing success or failure of the row committed.
+ :raises: :exc:`~.table.TooManyMutationsError` if the number of
+ mutations is greater than 100,000.
+ """
+ response = self._table.mutate_rows([self])
+
+ self.clear()
+
+ return response[0]
+
+ def clear(self):
+ """Removes all currently accumulated mutations on the current row.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_clear]
+ :end-before: [END bigtable_api_row_clear]
+ :dedent: 4
+ """
+ del self._pb_mutations[:]
+
+
+class ConditionalRow(_SetDeleteRow):
+ """Google Cloud Bigtable Row for sending mutations conditionally.
+
+ Each mutation has an associated state: :data:`True` or :data:`False`.
+ When :meth:`commit`-ed, the mutations for the :data:`True`
+ state will be applied if the filter matches any cells in
+ the row, otherwise the :data:`False` state will be applied.
+
+ A :class:`ConditionalRow` accumulates mutations in the same way a
+ :class:`DirectRow` does:
+
+ * :meth:`set_cell`
+ * :meth:`delete`
+ * :meth:`delete_cell`
+ * :meth:`delete_cells`
+
+ with the only change the extra ``state`` parameter::
+
+ >>> row_cond = table.row(b'row-key2', filter_=row_filter)
+ >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True)
+ >>> row_cond.delete_cell(u'fam', b'col', state=False)
+
+ .. note::
+
+ As with :class:`DirectRow`, to actually send these mutations to the
+ Google Cloud Bigtable API, you must call :meth:`commit`.
+
+ :type row_key: bytes
+ :param row_key: The key for the current row.
+
+ :type table: :class:`Table `
+ :param table: The table that owns the row.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: Filter to be used for conditional mutations.
+ """
+
+ def __init__(self, row_key, table, filter_):
+ super(ConditionalRow, self).__init__(row_key, table)
+ self._filter = filter_
+ self._true_pb_mutations = []
+ self._false_pb_mutations = []
+
+ def _get_mutations(self, state=None):
+ """Gets the list of mutations for a given state.
+
+ Over-ridden so that the state can be used in:
+
+ * :meth:`set_cell`
+ * :meth:`delete`
+ * :meth:`delete_cell`
+ * :meth:`delete_cells`
+
+ :type state: bool
+ :param state: The state that the mutation should be
+ applied in.
+
+ :rtype: list
+ :returns: The list to add new mutations to (for the current state).
+ """
+ if state:
+ return self._true_pb_mutations
+ else:
+ return self._false_pb_mutations
+
+ def commit(self):
+ """Makes a ``CheckAndMutateRow`` API request.
+
+ If no mutations have been created in the row, no request is made.
+
+ The mutations will be applied conditionally, based on whether the
+ filter matches any cells in the :class:`ConditionalRow` or not. (Each
+ method which adds a mutation has a ``state`` parameter for this
+ purpose.)
+
+ Mutations are applied atomically and in order, meaning that earlier
+ mutations can be masked / negated by later ones. Cells already present
+ in the row are left unchanged unless explicitly changed by a mutation.
+
+ After committing the accumulated mutations, resets the local
+ mutations.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_commit]
+ :end-before: [END bigtable_api_row_commit]
+ :dedent: 4
+
+ :rtype: bool
+ :returns: Flag indicating if the filter was matched (which also
+ indicates which set of mutations were applied by the server).
+ :raises: :class:`ValueError ` if the number of
+ mutations exceeds the :data:`MAX_MUTATIONS`.
+ """
+ true_mutations = self._get_mutations(state=True)
+ false_mutations = self._get_mutations(state=False)
+ num_true_mutations = len(true_mutations)
+ num_false_mutations = len(false_mutations)
+ if num_true_mutations == 0 and num_false_mutations == 0:
+ return
+ if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS:
+ raise ValueError(
+ "Exceed the maximum allowable mutations (%d). Had %s true "
+ "mutations and %d false mutations."
+ % (MAX_MUTATIONS, num_true_mutations, num_false_mutations)
+ )
+
+ data_client = self._table._instance._client.table_data_client
+ resp = data_client.check_and_mutate_row(
+ table_name=self._table.name,
+ row_key=self._row_key,
+ predicate_filter=self._filter.to_pb(),
+ app_profile_id=self._table._app_profile_id,
+ true_mutations=true_mutations,
+ false_mutations=false_mutations,
+ )
+ self.clear()
+ return resp.predicate_matched
+
+ # pylint: disable=arguments-differ
+ def set_cell(self, column_family_id, column, value, timestamp=None, state=True):
+ """Sets a value in this row.
+
+ The cell is determined by the ``row_key`` of this
+ :class:`ConditionalRow` and the ``column``. The ``column`` must be in
+ an existing :class:`.ColumnFamily` (as determined by
+ ``column_family_id``).
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_set_cell]
+ :end-before: [END bigtable_api_row_set_cell]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column.
+ Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family where the cell
+ is located.
+
+ :type value: bytes or :class:`int`
+ :param value: The value to set in the cell. If an integer is used,
+ will be interpreted as a 64-bit big-endian signed
+ integer (8 bytes).
+
+ :type timestamp: :class:`datetime.datetime`
+ :param timestamp: (Optional) The timestamp of the operation.
+
+ :type state: bool
+ :param state: (Optional) The state that the mutation should be
+ applied in. Defaults to :data:`True`.
+ """
+ self._set_cell(
+ column_family_id, column, value, timestamp=timestamp, state=state
+ )
+
+ def delete(self, state=True):
+ """Deletes this row from the table.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete]
+ :end-before: [END bigtable_api_row_delete]
+ :dedent: 4
+
+ :type state: bool
+ :param state: (Optional) The state that the mutation should be
+ applied in. Defaults to :data:`True`.
+ """
+ self._delete(state=state)
+
+ def delete_cell(self, column_family_id, column, time_range=None, state=True):
+ """Deletes cell in this row.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete_cell]
+ :end-before: [END bigtable_api_row_delete_cell]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column
+ or columns with cells being deleted. Must be
+ of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family that will have a
+ cell deleted.
+
+ :type time_range: :class:`TimestampRange`
+ :param time_range: (Optional) The range of time within which cells
+ should be deleted.
+
+ :type state: bool
+ :param state: (Optional) The state that the mutation should be
+ applied in. Defaults to :data:`True`.
+ """
+ self._delete_cells(
+ column_family_id, [column], time_range=time_range, state=state
+ )
+
+ def delete_cells(self, column_family_id, columns, time_range=None, state=True):
+ """Deletes cells in this row.
+
+ .. note::
+
+ This method adds a mutation to the accumulated mutations on this
+ row, but does not make an API request. To actually
+ send an API request (with the mutations) to the Google Cloud
+ Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_delete_cells]
+ :end-before: [END bigtable_api_row_delete_cells]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column
+ or columns with cells being deleted. Must be
+ of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type columns: :class:`list` of :class:`str` /
+ :func:`unicode `, or :class:`object`
+ :param columns: The columns within the column family that will have
+ cells deleted. If :attr:`ALL_COLUMNS` is used then the
+ entire column family will be deleted from the row.
+
+ :type time_range: :class:`TimestampRange`
+ :param time_range: (Optional) The range of time within which cells
+ should be deleted.
+
+ :type state: bool
+ :param state: (Optional) The state that the mutation should be
+ applied in. Defaults to :data:`True`.
+ """
+ self._delete_cells(
+ column_family_id, columns, time_range=time_range, state=state
+ )
+
+ # pylint: enable=arguments-differ
+
+ def clear(self):
+ """Removes all currently accumulated mutations on the current row.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_clear]
+ :end-before: [END bigtable_api_row_clear]
+ :dedent: 4
+ """
+ del self._true_pb_mutations[:]
+ del self._false_pb_mutations[:]
+
+
+class AppendRow(Row):
+ """Google Cloud Bigtable Row for sending append mutations.
+
+ These mutations are intended to augment the value of an existing cell
+ and uses the methods:
+
+ * :meth:`append_cell_value`
+ * :meth:`increment_cell_value`
+
+ The first works by appending bytes and the second by incrementing an
+ integer (stored in the cell as 8 bytes). In either case, if the
+ cell is empty, assumes the default empty value (empty string for
+ bytes or 0 for integer).
+
+ :type row_key: bytes
+ :param row_key: The key for the current row.
+
+ :type table: :class:`Table `
+ :param table: The table that owns the row.
+ """
+
+ def __init__(self, row_key, table):
+ super(AppendRow, self).__init__(row_key, table)
+ self._rule_pb_list = []
+
+ def clear(self):
+ """Removes all currently accumulated modifications on current row.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_clear]
+ :end-before: [END bigtable_api_row_clear]
+ :dedent: 4
+ """
+ del self._rule_pb_list[:]
+
+ def append_cell_value(self, column_family_id, column, value):
+ """Appends a value to an existing cell.
+
+ .. note::
+
+ This method adds a read-modify rule protobuf to the accumulated
+ read-modify rules on this row, but does not make an API
+ request. To actually send an API request (with the rules) to the
+ Google Cloud Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_append_cell_value]
+ :end-before: [END bigtable_api_row_append_cell_value]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column.
+ Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family where the cell
+ is located.
+
+ :type value: bytes
+ :param value: The value to append to the existing value in the cell. If
+ the targeted cell is unset, it will be treated as
+ containing the empty string.
+ """
+ column = _to_bytes(column)
+ value = _to_bytes(value)
+ rule_pb = data_v2_pb2.ReadModifyWriteRule(
+ family_name=column_family_id, column_qualifier=column, append_value=value
+ )
+ self._rule_pb_list.append(rule_pb)
+
+ def increment_cell_value(self, column_family_id, column, int_value):
+ """Increments a value in an existing cell.
+
+ Assumes the value in the cell is stored as a 64 bit integer
+ serialized to bytes.
+
+ .. note::
+
+ This method adds a read-modify rule protobuf to the accumulated
+ read-modify rules on this row, but does not make an API
+ request. To actually send an API request (with the rules) to the
+ Google Cloud Bigtable API, call :meth:`commit`.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_increment_cell_value]
+ :end-before: [END bigtable_api_row_increment_cell_value]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the column.
+ Must be of the form
+ ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type column: bytes
+ :param column: The column within the column family where the cell
+ is located.
+
+ :type int_value: int
+ :param int_value: The value to increment the existing value in the cell
+ by. If the targeted cell is unset, it will be treated
+ as containing a zero. Otherwise, the targeted cell
+ must contain an 8-byte value (interpreted as a 64-bit
+ big-endian signed integer), or the entire request
+ will fail.
+ """
+ column = _to_bytes(column)
+ rule_pb = data_v2_pb2.ReadModifyWriteRule(
+ family_name=column_family_id,
+ column_qualifier=column,
+ increment_amount=int_value,
+ )
+ self._rule_pb_list.append(rule_pb)
+
+ def commit(self):
+ """Makes a ``ReadModifyWriteRow`` API request.
+
+ This commits modifications made by :meth:`append_cell_value` and
+ :meth:`increment_cell_value`. If no modifications were made, makes
+ no API request and just returns ``{}``.
+
+ Modifies a row atomically, reading the latest existing
+ timestamp / value from the specified columns and writing a new value by
+ appending / incrementing. The new cell created uses either the current
+ server time or the highest timestamp of a cell in that column (if it
+ exceeds the server time).
+
+ After committing the accumulated mutations, resets the local mutations.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_commit]
+ :end-before: [END bigtable_api_row_commit]
+ :dedent: 4
+
+ :rtype: dict
+ :returns: The new contents of all modified cells. Returned as a
+ dictionary of column families, each of which holds a
+ dictionary of columns. Each column contains a list of cells
+ modified. Each cell is represented with a two-tuple with the
+ value (in bytes) and the timestamp for the cell.
+ :raises: :class:`ValueError ` if the number of
+ mutations exceeds the :data:`MAX_MUTATIONS`.
+ """
+ num_mutations = len(self._rule_pb_list)
+ if num_mutations == 0:
+ return {}
+ if num_mutations > MAX_MUTATIONS:
+ raise ValueError(
+ "%d total append mutations exceed the maximum "
+ "allowable %d." % (num_mutations, MAX_MUTATIONS)
+ )
+
+ data_client = self._table._instance._client.table_data_client
+ row_response = data_client.read_modify_write_row(
+ table_name=self._table.name,
+ row_key=self._row_key,
+ rules=self._rule_pb_list,
+ app_profile_id=self._table._app_profile_id,
+ )
+
+ # Reset modifications after commit-ing request.
+ self.clear()
+
+ # NOTE: We expect row_response.key == self._row_key but don't check.
+ return _parse_rmw_row_response(row_response)
+
+
+def _parse_rmw_row_response(row_response):
+ """Parses the response to a ``ReadModifyWriteRow`` request.
+
+ :type row_response: :class:`.data_v2_pb2.Row`
+ :param row_response: The response row (with only modified cells) from a
+ ``ReadModifyWriteRow`` request.
+
+ :rtype: dict
+ :returns: The new contents of all modified cells. Returned as a
+ dictionary of column families, each of which holds a
+ dictionary of columns. Each column contains a list of cells
+ modified. Each cell is represented with a two-tuple with the
+ value (in bytes) and the timestamp for the cell. For example:
+
+ .. code:: python
+
+ {
+ u'col-fam-id': {
+ b'col-name1': [
+ (b'cell-val', datetime.datetime(...)),
+ (b'cell-val-newer', datetime.datetime(...)),
+ ],
+ b'col-name2': [
+ (b'altcol-cell-val', datetime.datetime(...)),
+ ],
+ },
+ u'col-fam-id2': {
+ b'col-name3-but-other-fam': [
+ (b'foo', datetime.datetime(...)),
+ ],
+ },
+ }
+ """
+ result = {}
+ for column_family in row_response.row.families:
+ column_family_id, curr_family = _parse_family_pb(column_family)
+ result[column_family_id] = curr_family
+ return result
+
+
+def _parse_family_pb(family_pb):
+ """Parses a Family protobuf into a dictionary.
+
+ :type family_pb: :class:`._generated.data_pb2.Family`
+ :param family_pb: A protobuf
+
+ :rtype: tuple
+ :returns: A string and dictionary. The string is the name of the
+ column family and the dictionary has column names (within the
+ family) as keys and cell lists as values. Each cell is
+ represented with a two-tuple with the value (in bytes) and the
+ timestamp for the cell. For example:
+
+ .. code:: python
+
+ {
+ b'col-name1': [
+ (b'cell-val', datetime.datetime(...)),
+ (b'cell-val-newer', datetime.datetime(...)),
+ ],
+ b'col-name2': [
+ (b'altcol-cell-val', datetime.datetime(...)),
+ ],
+ }
+ """
+ result = {}
+ for column in family_pb.columns:
+ result[column.qualifier] = cells = []
+ for cell in column.cells:
+ val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros))
+ cells.append(val_pair)
+
+ return family_pb.name, result
+
+
+class PartialRowData(object):
+ """Representation of partial row in a Google Cloud Bigtable Table.
+
+ These are expected to be updated directly from a
+ :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
+
+ :type row_key: bytes
+ :param row_key: The key for the row holding the (partial) data.
+ """
+
+ def __init__(self, row_key):
+ self._row_key = row_key
+ self._cells = {}
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other._row_key == self._row_key and other._cells == self._cells
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_dict(self):
+ """Convert the cells to a dictionary.
+
+ This is intended to be used with HappyBase, so the column family and
+ column qualiers are combined (with ``:``).
+
+ :rtype: dict
+ :returns: Dictionary containing all the data in the cells of this row.
+ """
+ result = {}
+ for column_family_id, columns in self._cells.items():
+ for column_qual, cells in columns.items():
+ key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual)
+ result[key] = cells
+ return result
+
+ @property
+ def cells(self):
+ """Property returning all the cells accumulated on this partial row.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_data_cells]
+ :end-before: [END bigtable_api_row_data_cells]
+ :dedent: 4
+
+ :rtype: dict
+ :returns: Dictionary of the :class:`Cell` objects accumulated. This
+ dictionary has two-levels of keys (first for column families
+ and second for column names/qualifiers within a family). For
+ a given column, a list of :class:`Cell` objects is stored.
+ """
+ return self._cells
+
+ @property
+ def row_key(self):
+ """Getter for the current (partial) row's key.
+
+ :rtype: bytes
+ :returns: The current (partial) row's key.
+ """
+ return self._row_key
+
+ def find_cells(self, column_family_id, column):
+ """Get a time series of cells stored on this instance.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_find_cells]
+ :end-before: [END bigtable_api_row_find_cells]
+ :dedent: 4
+
+ Args:
+ column_family_id (str): The ID of the column family. Must be of the
+ form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+ column (bytes): The column within the column family where the cells
+ are located.
+
+ Returns:
+ List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the
+ specified column.
+
+ Raises:
+ KeyError: If ``column_family_id`` is not among the cells stored
+ in this row.
+ KeyError: If ``column`` is not among the cells stored in this row
+ for the given ``column_family_id``.
+ """
+ try:
+ column_family = self._cells[column_family_id]
+ except KeyError:
+ raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id))
+
+ try:
+ cells = column_family[column]
+ except KeyError:
+ raise KeyError(_MISSING_COLUMN.format(column, column_family_id))
+
+ return cells
+
+ def cell_value(self, column_family_id, column, index=0):
+ """Get a single cell value stored on this instance.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_cell_value]
+ :end-before: [END bigtable_api_row_cell_value]
+ :dedent: 4
+
+ Args:
+ column_family_id (str): The ID of the column family. Must be of the
+ form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+ column (bytes): The column within the column family where the cell
+ is located.
+ index (Optional[int]): The offset within the series of values. If
+ not specified, will return the first cell.
+
+ Returns:
+ ~google.cloud.bigtable.row_data.Cell value: The cell value stored
+ in the specified column and specified index.
+
+ Raises:
+ KeyError: If ``column_family_id`` is not among the cells stored
+ in this row.
+ KeyError: If ``column`` is not among the cells stored in this row
+ for the given ``column_family_id``.
+ IndexError: If ``index`` cannot be found within the cells stored
+ in this row for the given ``column_family_id``, ``column``
+ pair.
+ """
+ cells = self.find_cells(column_family_id, column)
+
+ try:
+ cell = cells[index]
+ except (TypeError, IndexError):
+ num_cells = len(cells)
+ msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells)
+ raise IndexError(msg)
+
+ return cell.value
+
+ def cell_values(self, column_family_id, column, max_count=None):
+ """Get a time series of cells stored on this instance.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_cell_values]
+ :end-before: [END bigtable_api_row_cell_values]
+ :dedent: 4
+
+ Args:
+ column_family_id (str): The ID of the column family. Must be of the
+ form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+ column (bytes): The column within the column family where the cells
+ are located.
+ max_count (int): The maximum number of cells to use.
+
+ Returns:
+ A generator which provides: cell.value, cell.timestamp_micros
+ for each cell in the list of cells
+
+ Raises:
+ KeyError: If ``column_family_id`` is not among the cells stored
+ in this row.
+ KeyError: If ``column`` is not among the cells stored in this row
+ for the given ``column_family_id``.
+ """
+ cells = self.find_cells(column_family_id, column)
+ if max_count is None:
+ max_count = len(cells)
+
+ for index, cell in enumerate(cells):
+ if index == max_count:
+ break
+
+ yield cell.value, cell.timestamp_micros
+
+
+class Cell(object):
+ """Representation of a Google Cloud Bigtable Cell.
+
+ :type value: bytes
+ :param value: The value stored in the cell.
+
+ :type timestamp_micros: int
+ :param timestamp_micros: The timestamp_micros when the cell was stored.
+
+ :type labels: list
+ :param labels: (Optional) List of strings. Labels applied to the cell.
+ """
+
+ def __init__(self, value, timestamp_micros, labels=None):
+ self.value = value
+ self.timestamp_micros = timestamp_micros
+ self.labels = list(labels) if labels is not None else []
+
+ @classmethod
+ def from_pb(cls, cell_pb):
+ """Create a new cell from a Cell protobuf.
+
+ :type cell_pb: :class:`._generated.data_pb2.Cell`
+ :param cell_pb: The protobuf to convert.
+
+ :rtype: :class:`Cell`
+ :returns: The cell corresponding to the protobuf.
+ """
+ if cell_pb.labels:
+ return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels)
+ else:
+ return cls(cell_pb.value, cell_pb.timestamp_micros)
+
+ @property
+ def timestamp(self):
+ return _datetime_from_microseconds(self.timestamp_micros)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.value == self.value
+ and other.timestamp_micros == self.timestamp_micros
+ and other.labels == self.labels
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return "<{name} value={value!r} timestamp={timestamp}>".format(
+ name=self.__class__.__name__, value=self.value, timestamp=self.timestamp
+ )
+
+
+class InvalidChunk(RuntimeError):
+ """Exception raised to invalid chunk data from back-end."""
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py
new file mode 100644
index 000000000000..e11379108c4f
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py
@@ -0,0 +1,380 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Container for Google Cloud Bigtable Cells and Streaming Row Contents."""
+
+
+import copy
+
+import grpc # type: ignore
+import warnings
+from google.api_core import exceptions
+from google.api_core import retry
+from google.cloud._helpers import _to_bytes # type: ignore
+
+from google.cloud.bigtable.row_merger import _RowMerger, _State
+from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
+from google.cloud.bigtable_v2.types import data as data_v2_pb2
+from google.cloud.bigtable.row import Cell, InvalidChunk, PartialRowData
+
+
+# Some classes need to be re-exported here to keep backwards
+# compatibility. Those classes were moved to row_merger, but we dont want to
+# break enduser's imports. This hack, ensures they don't get marked as unused.
+_ = (Cell, InvalidChunk, PartialRowData)
+
+
+class PartialCellData(object): # pragma: NO COVER
+ """This class is no longer used and will be removed in the future"""
+
+ def __init__(
+ self, row_key, family_name, qualifier, timestamp_micros, labels=(), value=b""
+ ):
+ self.row_key = row_key
+ self.family_name = family_name
+ self.qualifier = qualifier
+ self.timestamp_micros = timestamp_micros
+ self.labels = labels
+ self.value = value
+
+ def append_value(self, value):
+ self.value += value
+
+
+class InvalidReadRowsResponse(RuntimeError):
+ """Exception raised to invalid response data from back-end."""
+
+
+class InvalidRetryRequest(RuntimeError):
+ """Exception raised when retry request is invalid."""
+
+
+RETRYABLE_INTERNAL_ERROR_MESSAGES = (
+ "rst_stream",
+ "rst stream",
+ "received unexpected eos on data frame from server",
+)
+"""Internal error messages that can be retried during read row and mutation."""
+
+
+def _retriable_internal_server_error(exc):
+ """
+ Return True if the internal server error is retriable.
+ """
+ return isinstance(exc, exceptions.InternalServerError) and any(
+ retryable_message in exc.message.lower()
+ for retryable_message in RETRYABLE_INTERNAL_ERROR_MESSAGES
+ )
+
+
+def _retry_read_rows_exception(exc):
+ """Return True if the exception is retriable for read row requests."""
+ if isinstance(exc, grpc.RpcError):
+ exc = exceptions.from_grpc_error(exc)
+
+ return _retriable_internal_server_error(exc) or isinstance(
+ exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded)
+ )
+
+
+DEFAULT_RETRY_READ_ROWS = retry.Retry(
+ predicate=_retry_read_rows_exception,
+ initial=1.0,
+ maximum=15.0,
+ multiplier=2.0,
+ deadline=60.0, # 60 seconds
+)
+"""The default retry strategy to be used on retry-able errors.
+
+Used by
+:meth:`~google.cloud.bigtable.row_data.PartialRowsData._read_next_response`.
+"""
+
+
+class PartialRowsData(object):
+ """Convenience wrapper for consuming a ``ReadRows`` streaming response.
+
+ :type read_method: :class:`client._table_data_client.read_rows`
+ :param read_method: ``ReadRows`` method.
+
+ :type request: :class:`data_messages_v2_pb2.ReadRowsRequest`
+ :param request: The ``ReadRowsRequest`` message used to create a
+ ReadRowsResponse iterator. If the iterator fails, a new
+ iterator is created, allowing the scan to continue from
+ the point just beyond the last successfully read row,
+ identified by self.last_scanned_row_key. The retry happens
+ inside of the Retry class, using a predicate for the
+ expected exceptions during iteration.
+
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry: (Optional) Retry delay and deadline arguments. To override,
+ the default value :attr:`DEFAULT_RETRY_READ_ROWS` can be
+ used and modified with the
+ :meth:`~google.api_core.retry.Retry.with_delay` method
+ or the
+ :meth:`~google.api_core.retry.Retry.with_deadline` method.
+ """
+
+ NEW_ROW = "New row" # No cells yet complete for row
+ ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row
+ CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row
+
+ STATE_NEW_ROW = 1
+ STATE_ROW_IN_PROGRESS = 2
+ STATE_CELL_IN_PROGRESS = 3
+
+ read_states = {
+ STATE_NEW_ROW: NEW_ROW,
+ STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS,
+ STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS,
+ }
+
+ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS):
+ # Counter for rows returned to the user
+ self._counter = 0
+ self._row_merger = _RowMerger()
+
+ # May be cached from previous response
+ self.last_scanned_row_key = None
+ self.read_method = read_method
+ self.request = request
+ self.retry = retry
+
+ # The `timeout` parameter must be somewhat greater than the value
+ # contained in `self.retry`, in order to avoid race-like condition and
+ # allow registering the first deadline error before invoking the retry.
+ # Otherwise there is a risk of entering an infinite loop that resets
+ # the timeout counter just before it being triggered. The increment
+ # by 1 second here is customary but should not be much less than that.
+ self.response_iterator = read_method(
+ request, timeout=self.retry._deadline + 1, retry=self.retry
+ )
+
+ self.rows = {}
+
+ # Flag to stop iteration, for any reason not related to self.retry()
+ self._cancelled = False
+
+ @property
+ def state(self): # pragma: NO COVER
+ """
+ DEPRECATED: this property is deprecated and will be removed in the
+ future.
+ """
+ warnings.warn(
+ "`PartialRowsData#state()` is deprecated and will be removed in the future",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ # Best effort: try to map internal RowMerger states to old strings for
+ # backwards compatibility
+ internal_state = self._row_merger.state
+ if internal_state == _State.ROW_START:
+ return self.NEW_ROW
+ # note: _State.CELL_START, _State.CELL_COMPLETE are transient states
+ # and will not be visible in between chunks
+ elif internal_state == _State.CELL_IN_PROGRESS:
+ return self.CELL_IN_PROGRESS
+ elif internal_state == _State.ROW_COMPLETE:
+ return self.NEW_ROW
+ else:
+ raise RuntimeError("unexpected internal state: " + self._)
+
+ def cancel(self):
+ """Cancels the iterator, closing the stream."""
+ self._cancelled = True
+ self.response_iterator.cancel()
+
+ def consume_all(self, max_loops=None):
+ """Consume the streamed responses until there are no more.
+
+ .. warning::
+ This method will be removed in future releases. Please use this
+ class as a generator instead.
+
+ :type max_loops: int
+ :param max_loops: (Optional) Maximum number of times to try to consume
+ an additional ``ReadRowsResponse``. You can use this
+ to avoid long wait times.
+ """
+ for row in self:
+ self.rows[row.row_key] = row
+
+ def _create_retry_request(self):
+ """Helper for :meth:`__iter__`."""
+ req_manager = _ReadRowsRequestManager(
+ self.request, self.last_scanned_row_key, self._counter
+ )
+ return req_manager.build_updated_request()
+
+ def _on_error(self, exc):
+ """Helper for :meth:`__iter__`."""
+ # restart the read scan from AFTER the last successfully read row
+ retry_request = self.request
+ if self.last_scanned_row_key:
+ retry_request = self._create_retry_request()
+
+ self._row_merger = _RowMerger(self._row_merger.last_seen_row_key)
+ self.response_iterator = self.read_method(retry_request)
+
+ def _read_next(self):
+ """Helper for :meth:`__iter__`."""
+ return next(self.response_iterator)
+
+ def _read_next_response(self):
+ """Helper for :meth:`__iter__`."""
+ resp_protoplus = self.retry(self._read_next, on_error=self._on_error)()
+ # unwrap the underlying protobuf, there is a significant amount of
+ # overhead that protoplus imposes for very little gain. The protos
+ # are not user visible, so we just use the raw protos for merging.
+ return data_messages_v2_pb2.ReadRowsResponse.pb(resp_protoplus)
+
+ def __iter__(self):
+ """Consume the ``ReadRowsResponse`` s from the stream.
+ Read the rows and yield each to the reader
+
+ Parse the response and its chunks into a new/existing row in
+ :attr:`_rows`. Rows are returned in order by row key.
+ """
+ while not self._cancelled:
+ try:
+ response = self._read_next_response()
+ except StopIteration:
+ self._row_merger.finalize()
+ break
+ except InvalidRetryRequest:
+ self._cancelled = True
+ break
+
+ for row in self._row_merger.process_chunks(response):
+ self.last_scanned_row_key = self._row_merger.last_seen_row_key
+ self._counter += 1
+
+ yield row
+
+ if self._cancelled:
+ break
+ # The last response might not have generated any rows, but it
+ # could've updated last_scanned_row_key
+ self.last_scanned_row_key = self._row_merger.last_seen_row_key
+
+
+class _ReadRowsRequestManager(object):
+ """Update the ReadRowsRequest message in case of failures by
+ filtering the already read keys.
+
+ :type message: class:`data_messages_v2_pb2.ReadRowsRequest`
+ :param message: Original ReadRowsRequest containing all of the parameters
+ of API call
+
+ :type last_scanned_key: bytes
+ :param last_scanned_key: last successfully scanned key
+
+ :type rows_read_so_far: int
+ :param rows_read_so_far: total no of rows successfully read so far.
+ this will be used for updating rows_limit
+
+ """
+
+ def __init__(self, message, last_scanned_key, rows_read_so_far):
+ self.message = message
+ self.last_scanned_key = last_scanned_key
+ self.rows_read_so_far = rows_read_so_far
+
+ def build_updated_request(self):
+ """Updates the given message request as per last scanned key"""
+
+ resume_request = data_messages_v2_pb2.ReadRowsRequest()
+ data_messages_v2_pb2.ReadRowsRequest.copy_from(resume_request, self.message)
+
+ if self.message.rows_limit != 0:
+ row_limit_remaining = self.message.rows_limit - self.rows_read_so_far
+ if row_limit_remaining > 0:
+ resume_request.rows_limit = row_limit_remaining
+ else:
+ raise InvalidRetryRequest
+
+ # if neither RowSet.row_keys nor RowSet.row_ranges currently exist,
+ # add row_range that starts with last_scanned_key as start_key_open
+ # to request only rows that have not been returned yet
+ if "rows" not in self.message:
+ row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key)
+ resume_request.rows = data_v2_pb2.RowSet(row_ranges=[row_range])
+ else:
+ row_keys = self._filter_rows_keys()
+ row_ranges = self._filter_row_ranges()
+
+ if len(row_keys) == 0 and len(row_ranges) == 0:
+ # Avoid sending empty row_keys and row_ranges
+ # if that was not the intention
+ raise InvalidRetryRequest
+
+ resume_request.rows = data_v2_pb2.RowSet(
+ row_keys=row_keys, row_ranges=row_ranges
+ )
+ return resume_request
+
+ def _filter_rows_keys(self):
+ """Helper for :meth:`build_updated_request`"""
+ return [
+ row_key
+ for row_key in self.message.rows.row_keys
+ if row_key > self.last_scanned_key
+ ]
+
+ def _filter_row_ranges(self):
+ """Helper for :meth:`build_updated_request`"""
+ new_row_ranges = []
+
+ for row_range in self.message.rows.row_ranges:
+ # if current end_key (open or closed) is set, return its value,
+ # if not, set to empty string ('').
+ # NOTE: Empty string in end_key means "end of table"
+ end_key = self._end_key_set(row_range)
+ # if end_key is already read, skip to the next row_range
+ if end_key and self._key_already_read(end_key):
+ continue
+
+ # if current start_key (open or closed) is set, return its value,
+ # if not, then set to empty string ('')
+ # NOTE: Empty string in start_key means "beginning of table"
+ start_key = self._start_key_set(row_range)
+
+ # if start_key was already read or doesn't exist,
+ # create a row_range with last_scanned_key as start_key_open
+ # to be passed to retry request
+ retry_row_range = row_range
+ if self._key_already_read(start_key):
+ retry_row_range = copy.deepcopy(row_range)
+ retry_row_range.start_key_closed = _to_bytes("")
+ retry_row_range.start_key_open = self.last_scanned_key
+
+ new_row_ranges.append(retry_row_range)
+
+ return new_row_ranges
+
+ def _key_already_read(self, key):
+ """Helper for :meth:`_filter_row_ranges`"""
+ return key <= self.last_scanned_key
+
+ @staticmethod
+ def _start_key_set(row_range):
+ """Helper for :meth:`_filter_row_ranges`"""
+ return row_range.start_key_open or row_range.start_key_closed
+
+ @staticmethod
+ def _end_key_set(row_range):
+ """Helper for :meth:`_filter_row_ranges`"""
+ return row_range.end_key_open or row_range.end_key_closed
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py
new file mode 100644
index 000000000000..53192acc86d0
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py
@@ -0,0 +1,838 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Filters for Google Cloud Bigtable Row classes."""
+
+import struct
+
+
+from google.cloud._helpers import _microseconds_from_datetime # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.cloud.bigtable_v2.types import data as data_v2_pb2
+
+_PACK_I64 = struct.Struct(">q").pack
+
+
+class RowFilter(object):
+ """Basic filter to apply to cells in a row.
+
+ These values can be combined via :class:`RowFilterChain`,
+ :class:`RowFilterUnion` and :class:`ConditionalRowFilter`.
+
+ .. note::
+
+ This class is a do-nothing base class for all row filters.
+ """
+
+
+class _BoolFilter(RowFilter):
+ """Row filter that uses a boolean flag.
+
+ :type flag: bool
+ :param flag: An indicator if a setting is turned on or off.
+ """
+
+ def __init__(self, flag):
+ self.flag = flag
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.flag == self.flag
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class SinkFilter(_BoolFilter):
+ """Advanced row filter to skip parent filters.
+
+ :type flag: bool
+ :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter.
+ Outputs all cells directly to the output of the read rather
+ than to any parent filter. Cannot be used within the
+ ``predicate_filter``, ``true_filter``, or ``false_filter``
+ of a :class:`ConditionalRowFilter`.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(sink=self.flag)
+
+
+class PassAllFilter(_BoolFilter):
+ """Row filter equivalent to not filtering at all.
+
+ :type flag: bool
+ :param flag: Matches all cells, regardless of input. Functionally
+ equivalent to leaving ``filter`` unset, but included for
+ completeness.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(pass_all_filter=self.flag)
+
+
+class BlockAllFilter(_BoolFilter):
+ """Row filter that doesn't match any cells.
+
+ :type flag: bool
+ :param flag: Does not match any cells, regardless of input. Useful for
+ temporarily disabling just part of a filter.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(block_all_filter=self.flag)
+
+
+class _RegexFilter(RowFilter):
+ """Row filter that uses a regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ :type regex: bytes or str
+ :param regex:
+ A regular expression (RE2) for some row filter. String values
+ will be encoded as ASCII.
+ """
+
+ def __init__(self, regex):
+ self.regex = _to_bytes(regex)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.regex == self.regex
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class RowKeyRegexFilter(_RegexFilter):
+ """Row filter for a row key regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes
+ :param regex: A regular expression (RE2) to match cells from rows with row
+ keys that satisfy this regex. For a
+ ``CheckAndMutateRowRequest``, this filter is unnecessary
+ since the row key is already specified.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex)
+
+
+class RowSampleFilter(RowFilter):
+ """Matches all cells from a row with probability p.
+
+ :type sample: float
+ :param sample: The probability of matching a cell (must be in the
+ interval ``(0, 1)`` The end points are excluded).
+ """
+
+ def __init__(self, sample):
+ self.sample = sample
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.sample == self.sample
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(row_sample_filter=self.sample)
+
+
+class FamilyNameRegexFilter(_RegexFilter):
+ """Row filter for a family name regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ :type regex: str
+ :param regex: A regular expression (RE2) to match cells from columns in a
+ given column family. For technical reasons, the regex must
+ not contain the ``':'`` character, even if it is not being
+ used as a literal.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex)
+
+
+class ColumnQualifierRegexFilter(_RegexFilter):
+ """Row filter for a column qualifier regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes
+ :param regex: A regular expression (RE2) to match cells from column that
+ match this regex (irrespective of column family).
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex)
+
+
+class TimestampRange(object):
+ """Range of time with inclusive lower and exclusive upper bounds.
+
+ :type start: :class:`datetime.datetime`
+ :param start: (Optional) The (inclusive) lower bound of the timestamp
+ range. If omitted, defaults to Unix epoch.
+
+ :type end: :class:`datetime.datetime`
+ :param end: (Optional) The (exclusive) upper bound of the timestamp
+ range. If omitted, no upper bound is used.
+ """
+
+ def __init__(self, start=None, end=None):
+ self.start = start
+ self.end = end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.start == self.start and other.end == self.end
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the :class:`TimestampRange` to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.TimestampRange`
+ :returns: The converted current object.
+ """
+ timestamp_range_kwargs = {}
+ if self.start is not None:
+ timestamp_range_kwargs["start_timestamp_micros"] = (
+ _microseconds_from_datetime(self.start) // 1000 * 1000
+ )
+ if self.end is not None:
+ end_time = _microseconds_from_datetime(self.end)
+ if end_time % 1000 != 0:
+ end_time = end_time // 1000 * 1000 + 1000
+ timestamp_range_kwargs["end_timestamp_micros"] = end_time
+ return data_v2_pb2.TimestampRange(**timestamp_range_kwargs)
+
+
+class TimestampRangeFilter(RowFilter):
+ """Row filter that limits cells to a range of time.
+
+ :type range_: :class:`TimestampRange`
+ :param range_: Range of time that cells should match against.
+ """
+
+ def __init__(self, range_):
+ self.range_ = range_
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.range_ == self.range_
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ First converts the ``range_`` on the current object to a protobuf and
+ then uses it in the ``timestamp_range_filter`` field.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb())
+
+
+class ColumnRangeFilter(RowFilter):
+ """A row filter to restrict to a range of columns.
+
+ Both the start and end column can be included or excluded in the range.
+ By default, we include them both, but this can be changed with optional
+ flags.
+
+ :type column_family_id: str
+ :param column_family_id: The column family that contains the columns. Must
+ be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type start_column: bytes
+ :param start_column: The start of the range of columns. If no value is
+ used, the backend applies no upper bound to the
+ values.
+
+ :type end_column: bytes
+ :param end_column: The end of the range of columns. If no value is used,
+ the backend applies no upper bound to the values.
+
+ :type inclusive_start: bool
+ :param inclusive_start: Boolean indicating if the start column should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``start_column`` is passed and
+ no ``inclusive_start`` was given.
+
+ :type inclusive_end: bool
+ :param inclusive_end: Boolean indicating if the end column should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``end_column`` is passed and
+ no ``inclusive_end`` was given.
+
+ :raises: :class:`ValueError ` if ``inclusive_start``
+ is set but no ``start_column`` is given or if ``inclusive_end``
+ is set but no ``end_column`` is given
+ """
+
+ def __init__(
+ self,
+ column_family_id,
+ start_column=None,
+ end_column=None,
+ inclusive_start=None,
+ inclusive_end=None,
+ ):
+ self.column_family_id = column_family_id
+
+ if inclusive_start is None:
+ inclusive_start = True
+ elif start_column is None:
+ raise ValueError(
+ "Inclusive start was specified but no " "start column was given."
+ )
+ self.start_column = start_column
+ self.inclusive_start = inclusive_start
+
+ if inclusive_end is None:
+ inclusive_end = True
+ elif end_column is None:
+ raise ValueError(
+ "Inclusive end was specified but no " "end column was given."
+ )
+ self.end_column = end_column
+ self.inclusive_end = inclusive_end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.column_family_id == self.column_family_id
+ and other.start_column == self.start_column
+ and other.end_column == self.end_column
+ and other.inclusive_start == self.inclusive_start
+ and other.inclusive_end == self.inclusive_end
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it
+ in the ``column_range_filter`` field.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ column_range_kwargs = {"family_name": self.column_family_id}
+ if self.start_column is not None:
+ if self.inclusive_start:
+ key = "start_qualifier_closed"
+ else:
+ key = "start_qualifier_open"
+ column_range_kwargs[key] = _to_bytes(self.start_column)
+ if self.end_column is not None:
+ if self.inclusive_end:
+ key = "end_qualifier_closed"
+ else:
+ key = "end_qualifier_open"
+ column_range_kwargs[key] = _to_bytes(self.end_column)
+
+ column_range = data_v2_pb2.ColumnRange(**column_range_kwargs)
+ return data_v2_pb2.RowFilter(column_range_filter=column_range)
+
+
+class ValueRegexFilter(_RegexFilter):
+ """Row filter for a value regular expression.
+
+ The ``regex`` must be valid RE2 patterns. See Google's
+ `RE2 reference`_ for the accepted syntax.
+
+ .. _RE2 reference: https://github.com/google/re2/wiki/Syntax
+
+ .. note::
+
+ Special care need be used with the expression used. Since
+ each of these properties can contain arbitrary bytes, the ``\\C``
+ escape sequence must be used if a true wildcard is desired. The ``.``
+ character will not match the new line character ``\\n``, which may be
+ present in a binary value.
+
+ :type regex: bytes or str
+ :param regex: A regular expression (RE2) to match cells with values that
+ match this regex. String values will be encoded as ASCII.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(value_regex_filter=self.regex)
+
+
+class ExactValueFilter(ValueRegexFilter):
+ """Row filter for an exact value.
+
+
+ :type value: bytes or str or int
+ :param value:
+ a literal string encodable as ASCII, or the
+ equivalent bytes, or an integer (which will be packed into 8-bytes).
+ """
+
+ def __init__(self, value):
+ if isinstance(value, int):
+ value = _PACK_I64(value)
+ super(ExactValueFilter, self).__init__(value)
+
+
+class ValueRangeFilter(RowFilter):
+ """A range of values to restrict to in a row filter.
+
+ Will only match cells that have values in this range.
+
+ Both the start and end value can be included or excluded in the range.
+ By default, we include them both, but this can be changed with optional
+ flags.
+
+ :type start_value: bytes
+ :param start_value: The start of the range of values. If no value is used,
+ the backend applies no lower bound to the values.
+
+ :type end_value: bytes
+ :param end_value: The end of the range of values. If no value is used,
+ the backend applies no upper bound to the values.
+
+ :type inclusive_start: bool
+ :param inclusive_start: Boolean indicating if the start value should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``start_value`` is passed and
+ no ``inclusive_start`` was given.
+
+ :type inclusive_end: bool
+ :param inclusive_end: Boolean indicating if the end value should be
+ included in the range (or excluded). Defaults
+ to :data:`True` if ``end_value`` is passed and
+ no ``inclusive_end`` was given.
+
+ :raises: :class:`ValueError ` if ``inclusive_start``
+ is set but no ``start_value`` is given or if ``inclusive_end``
+ is set but no ``end_value`` is given
+ """
+
+ def __init__(
+ self, start_value=None, end_value=None, inclusive_start=None, inclusive_end=None
+ ):
+ if inclusive_start is None:
+ inclusive_start = True
+ elif start_value is None:
+ raise ValueError(
+ "Inclusive start was specified but no " "start value was given."
+ )
+ if isinstance(start_value, int):
+ start_value = _PACK_I64(start_value)
+ self.start_value = start_value
+ self.inclusive_start = inclusive_start
+
+ if inclusive_end is None:
+ inclusive_end = True
+ elif end_value is None:
+ raise ValueError(
+ "Inclusive end was specified but no " "end value was given."
+ )
+ if isinstance(end_value, int):
+ end_value = _PACK_I64(end_value)
+ self.end_value = end_value
+ self.inclusive_end = inclusive_end
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.start_value == self.start_value
+ and other.end_value == self.end_value
+ and other.inclusive_start == self.inclusive_start
+ and other.inclusive_end == self.inclusive_end
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ First converts to a :class:`.data_v2_pb2.ValueRange` and then uses
+ it to create a row filter protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ value_range_kwargs = {}
+ if self.start_value is not None:
+ if self.inclusive_start:
+ key = "start_value_closed"
+ else:
+ key = "start_value_open"
+ value_range_kwargs[key] = _to_bytes(self.start_value)
+ if self.end_value is not None:
+ if self.inclusive_end:
+ key = "end_value_closed"
+ else:
+ key = "end_value_open"
+ value_range_kwargs[key] = _to_bytes(self.end_value)
+
+ value_range = data_v2_pb2.ValueRange(**value_range_kwargs)
+ return data_v2_pb2.RowFilter(value_range_filter=value_range)
+
+
+class _CellCountFilter(RowFilter):
+ """Row filter that uses an integer count of cells.
+
+ The cell count is used as an offset or a limit for the number
+ of results returned.
+
+ :type num_cells: int
+ :param num_cells: An integer count / offset / limit.
+ """
+
+ def __init__(self, num_cells):
+ self.num_cells = num_cells
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.num_cells == self.num_cells
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class CellsRowOffsetFilter(_CellCountFilter):
+ """Row filter to skip cells in a row.
+
+ :type num_cells: int
+ :param num_cells: Skips the first N cells of the row.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells)
+
+
+class CellsRowLimitFilter(_CellCountFilter):
+ """Row filter to limit cells in a row.
+
+ :type num_cells: int
+ :param num_cells: Matches only the first N cells of the row.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells)
+
+
+class CellsColumnLimitFilter(_CellCountFilter):
+ """Row filter to limit cells in a column.
+
+ :type num_cells: int
+ :param num_cells: Matches only the most recent N cells within each column.
+ This filters a (family name, column) pair, based on
+ timestamps of each cell.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells)
+
+
+class StripValueTransformerFilter(_BoolFilter):
+ """Row filter that transforms cells into empty string (0 bytes).
+
+ :type flag: bool
+ :param flag: If :data:`True`, replaces each cell's value with the empty
+ string. As the name indicates, this is more useful as a
+ transformer than a generic query / filter.
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(strip_value_transformer=self.flag)
+
+
+class ApplyLabelFilter(RowFilter):
+ """Filter to apply labels to cells.
+
+ Intended to be used as an intermediate filter on a pre-existing filtered
+ result set. This way if two sets are combined, the label can tell where
+ the cell(s) originated.This allows the client to determine which results
+ were produced from which part of the filter.
+
+ .. note::
+
+ Due to a technical limitation of the backend, it is not currently
+ possible to apply multiple labels to a cell.
+
+ :type label: str
+ :param label: Label to apply to cells in the output row. Values must be
+ at most 15 characters long, and match the pattern
+ ``[a-z0-9\\-]+``.
+ """
+
+ def __init__(self, label):
+ self.label = label
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.label == self.label
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ return data_v2_pb2.RowFilter(apply_label_transformer=self.label)
+
+
+class _FilterCombination(RowFilter):
+ """Chain of row filters.
+
+ Sends rows through several filters in sequence. The filters are "chained"
+ together to process a row. After the first filter is applied, the second
+ is applied to the filtered output and so on for subsequent filters.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def __init__(self, filters=None):
+ if filters is None:
+ filters = []
+ self.filters = filters
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.filters == self.filters
+
+ def __ne__(self, other):
+ return not self == other
+
+
+class RowFilterChain(_FilterCombination):
+ """Chain of row filters.
+
+ Sends rows through several filters in sequence. The filters are "chained"
+ together to process a row. After the first filter is applied, the second
+ is applied to the filtered output and so on for subsequent filters.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ chain = data_v2_pb2.RowFilter.Chain(
+ filters=[row_filter.to_pb() for row_filter in self.filters]
+ )
+ return data_v2_pb2.RowFilter(chain=chain)
+
+
+class RowFilterUnion(_FilterCombination):
+ """Union of row filters.
+
+ Sends rows through several filters simultaneously, then
+ merges / interleaves all the filtered results together.
+
+ If multiple cells are produced with the same column and timestamp,
+ they will all appear in the output row in an unspecified mutual order.
+
+ :type filters: list
+ :param filters: List of :class:`RowFilter`
+ """
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ interleave = data_v2_pb2.RowFilter.Interleave(
+ filters=[row_filter.to_pb() for row_filter in self.filters]
+ )
+ return data_v2_pb2.RowFilter(interleave=interleave)
+
+
+class ConditionalRowFilter(RowFilter):
+ """Conditional row filter which exhibits ternary behavior.
+
+ Executes one of two filters based on another filter. If the ``base_filter``
+ returns any cells in the row, then ``true_filter`` is executed. If not,
+ then ``false_filter`` is executed.
+
+ .. note::
+
+ The ``base_filter`` does not execute atomically with the true and false
+ filters, which may lead to inconsistent or unexpected results.
+
+ Additionally, executing a :class:`ConditionalRowFilter` has poor
+ performance on the server, especially when ``false_filter`` is set.
+
+ :type base_filter: :class:`RowFilter`
+ :param base_filter: The filter to condition on before executing the
+ true/false filters.
+
+ :type true_filter: :class:`RowFilter`
+ :param true_filter: (Optional) The filter to execute if there are any cells
+ matching ``base_filter``. If not provided, no results
+ will be returned in the true case.
+
+ :type false_filter: :class:`RowFilter`
+ :param false_filter: (Optional) The filter to execute if there are no cells
+ matching ``base_filter``. If not provided, no results
+ will be returned in the false case.
+ """
+
+ def __init__(self, base_filter, true_filter=None, false_filter=None):
+ self.base_filter = base_filter
+ self.true_filter = true_filter
+ self.false_filter = false_filter
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (
+ other.base_filter == self.base_filter
+ and other.true_filter == self.true_filter
+ and other.false_filter == self.false_filter
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def to_pb(self):
+ """Converts the row filter to a protobuf.
+
+ :rtype: :class:`.data_v2_pb2.RowFilter`
+ :returns: The converted current object.
+ """
+ condition_kwargs = {"predicate_filter": self.base_filter.to_pb()}
+ if self.true_filter is not None:
+ condition_kwargs["true_filter"] = self.true_filter.to_pb()
+ if self.false_filter is not None:
+ condition_kwargs["false_filter"] = self.false_filter.to_pb()
+ condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs)
+ return data_v2_pb2.RowFilter(condition=condition)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py
new file mode 100644
index 000000000000..515b91df7ef2
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py
@@ -0,0 +1,250 @@
+from enum import Enum
+from collections import OrderedDict
+from google.cloud.bigtable.row import Cell, PartialRowData, InvalidChunk
+
+_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row."
+_MISSING_COLUMN = (
+ "Column {} is not among the cells stored in this row in the column family {}."
+)
+_MISSING_INDEX = (
+ "Index {!r} is not valid for the cells stored in this row for column {} "
+ "in the column family {}. There are {} such cells."
+)
+
+
+class _State(Enum):
+ ROW_START = "ROW_START"
+ CELL_START = "CELL_START"
+ CELL_IN_PROGRESS = "CELL_IN_PROGRESS"
+ CELL_COMPLETE = "CELL_COMPLETE"
+ ROW_COMPLETE = "ROW_COMPLETE"
+
+
+class _PartialRow(object):
+ __slots__ = [
+ "row_key",
+ "cells",
+ "last_family",
+ "last_family_cells",
+ "last_qualifier",
+ "last_qualifier_cells",
+ "cell",
+ ]
+
+ def __init__(self, row_key):
+ self.row_key = row_key
+ self.cells = OrderedDict()
+
+ self.last_family = None
+ self.last_family_cells = OrderedDict()
+ self.last_qualifier = None
+ self.last_qualifier_cells = []
+
+ self.cell = None
+
+
+class _PartialCell(object):
+ __slots__ = ["family", "qualifier", "timestamp", "labels", "value", "value_index"]
+
+ def __init__(self):
+ self.family = None
+ self.qualifier = None
+ self.timestamp = None
+ self.labels = None
+ self.value = None
+ self.value_index = 0
+
+
+class _RowMerger(object):
+ """
+ State machine to merge chunks from a response stream into logical rows.
+
+ The implementation is a fairly linear state machine that is implemented as
+ a method for every state in the _State enum. In general the states flow
+ from top to bottom with some repetition. Each state handler will do some
+ sanity checks, update in progress data and set the next state.
+
+ There can be multiple state transitions for each chunk, i.e. a single chunk
+ row will flow from ROW_START -> CELL_START -> CELL_COMPLETE -> ROW_COMPLETE
+ in a single iteration.
+ """
+
+ __slots__ = ["state", "last_seen_row_key", "row"]
+
+ def __init__(self, last_seen_row=b""):
+ self.last_seen_row_key = last_seen_row
+ self.state = _State.ROW_START
+ self.row = None
+
+ def process_chunks(self, response):
+ """
+ Process the chunks in the given response and yield logical rows.
+ This class will maintain state across multiple response protos.
+ """
+ if response.last_scanned_row_key:
+ if self.last_seen_row_key >= response.last_scanned_row_key:
+ raise InvalidChunk("Last scanned row key is out of order")
+ self.last_seen_row_key = response.last_scanned_row_key
+
+ for chunk in response.chunks:
+ if chunk.reset_row:
+ self._handle_reset(chunk)
+ continue
+
+ if self.state == _State.ROW_START:
+ self._handle_row_start(chunk)
+
+ if self.state == _State.CELL_START:
+ self._handle_cell_start(chunk)
+
+ if self.state == _State.CELL_IN_PROGRESS:
+ self._handle_cell_in_progress(chunk)
+
+ if self.state == _State.CELL_COMPLETE:
+ self._handle_cell_complete(chunk)
+
+ if self.state == _State.ROW_COMPLETE:
+ yield self._handle_row_complete(chunk)
+ elif chunk.commit_row:
+ raise InvalidChunk(
+ f"Chunk tried to commit row in wrong state (${self.state})"
+ )
+
+ def _handle_reset(self, chunk):
+ if self.state == _State.ROW_START:
+ raise InvalidChunk("Bare reset")
+ if chunk.row_key:
+ raise InvalidChunk("Reset chunk has a row key")
+ if chunk.HasField("family_name"):
+ raise InvalidChunk("Reset chunk has family_name")
+ if chunk.HasField("qualifier"):
+ raise InvalidChunk("Reset chunk has qualifier")
+ if chunk.timestamp_micros:
+ raise InvalidChunk("Reset chunk has a timestamp")
+ if chunk.labels:
+ raise InvalidChunk("Reset chunk has labels")
+ if chunk.value:
+ raise InvalidChunk("Reset chunk has a value")
+
+ self.state = _State.ROW_START
+ self.row = None
+
+ def _handle_row_start(self, chunk):
+ if not chunk.row_key:
+ raise InvalidChunk("New row is missing a row key")
+ if self.last_seen_row_key and self.last_seen_row_key >= chunk.row_key:
+ raise InvalidChunk("Out of order row keys")
+
+ self.row = _PartialRow(chunk.row_key)
+ self.state = _State.CELL_START
+
+ def _handle_cell_start(self, chunk):
+ # Ensure that all chunks after the first one either are missing a row
+ # key or the row is the same
+ if self.row.cells and chunk.row_key and chunk.row_key != self.row.row_key:
+ raise InvalidChunk("row key changed mid row")
+
+ if not self.row.cell:
+ self.row.cell = _PartialCell()
+
+ # Cells can inherit family/qualifier from previous cells
+ # However if the family changes, then qualifier must be specified as well
+ if chunk.HasField("family_name"):
+ self.row.cell.family = chunk.family_name.value
+ self.row.cell.qualifier = None
+ if not self.row.cell.family:
+ raise InvalidChunk("missing family for a new cell")
+
+ if chunk.HasField("qualifier"):
+ self.row.cell.qualifier = chunk.qualifier.value
+ if self.row.cell.qualifier is None:
+ raise InvalidChunk("missing qualifier for a new cell")
+
+ self.row.cell.timestamp = chunk.timestamp_micros
+ self.row.cell.labels = chunk.labels
+
+ if chunk.value_size > 0:
+ # explicitly avoid pre-allocation as it seems that bytearray
+ # concatenation performs better than slice copies.
+ self.row.cell.value = bytearray()
+ self.state = _State.CELL_IN_PROGRESS
+ else:
+ self.row.cell.value = chunk.value
+ self.state = _State.CELL_COMPLETE
+
+ def _handle_cell_in_progress(self, chunk):
+ # if this isn't the first cell chunk, make sure that everything except
+ # the value stayed constant.
+ if self.row.cell.value_index > 0:
+ if chunk.row_key:
+ raise InvalidChunk("found row key mid cell")
+ if chunk.HasField("family_name"):
+ raise InvalidChunk("In progress cell had a family name")
+ if chunk.HasField("qualifier"):
+ raise InvalidChunk("In progress cell had a qualifier")
+ if chunk.timestamp_micros:
+ raise InvalidChunk("In progress cell had a timestamp")
+ if chunk.labels:
+ raise InvalidChunk("In progress cell had labels")
+
+ self.row.cell.value += chunk.value
+ self.row.cell.value_index += len(chunk.value)
+
+ if chunk.value_size > 0:
+ self.state = _State.CELL_IN_PROGRESS
+ else:
+ self.row.cell.value = bytes(self.row.cell.value)
+ self.state = _State.CELL_COMPLETE
+
+ def _handle_cell_complete(self, chunk):
+ # since we are guaranteed that all family & qualifier cells are
+ # contiguous, we can optimize away the dict lookup by caching the last
+ # family/qualifier and simply comparing and appending
+ family_changed = False
+ if self.row.last_family != self.row.cell.family:
+ family_changed = True
+ self.row.last_family = self.row.cell.family
+ self.row.cells[
+ self.row.cell.family
+ ] = self.row.last_family_cells = OrderedDict()
+
+ if family_changed or self.row.last_qualifier != self.row.cell.qualifier:
+ self.row.last_qualifier = self.row.cell.qualifier
+ self.row.last_family_cells[
+ self.row.cell.qualifier
+ ] = self.row.last_qualifier_cells = []
+
+ self.row.last_qualifier_cells.append(
+ Cell(
+ self.row.cell.value,
+ self.row.cell.timestamp,
+ self.row.cell.labels,
+ )
+ )
+
+ self.row.cell.timestamp = 0
+ self.row.cell.value = None
+ self.row.cell.value_index = 0
+
+ if not chunk.commit_row:
+ self.state = _State.CELL_START
+ else:
+ self.state = _State.ROW_COMPLETE
+
+ def _handle_row_complete(self, chunk):
+ new_row = PartialRowData(self.row.row_key)
+ new_row._cells = self.row.cells
+
+ self.last_seen_row_key = new_row.row_key
+ self.row = None
+ self.state = _State.ROW_START
+
+ return new_row
+
+ def finalize(self):
+ """
+ Must be called at the end of the stream to ensure there are no unmerged
+ rows.
+ """
+ if self.row or self.state != _State.ROW_START:
+ raise ValueError("The row remains partial / is not committed.")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py
new file mode 100644
index 000000000000..2bc436d54c0c
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py
@@ -0,0 +1,213 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-friendly container for Google Cloud Bigtable RowSet """
+
+
+from google.cloud._helpers import _to_bytes # type: ignore
+
+
+class RowSet(object):
+ """Convenience wrapper of google.bigtable.v2.RowSet
+
+ Useful for creating a set of row keys and row ranges, which can
+ be passed to read_rows method of class:`.Table.read_rows`.
+ """
+
+ def __init__(self):
+ self.row_keys = []
+ self.row_ranges = []
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ if len(other.row_keys) != len(self.row_keys):
+ return False
+
+ if len(other.row_ranges) != len(self.row_ranges):
+ return False
+
+ if not set(other.row_keys) == set(self.row_keys):
+ return False
+
+ if not set(other.row_ranges) == set(self.row_ranges):
+ return False
+
+ return True
+
+ def __ne__(self, other):
+ return not self == other
+
+ def add_row_key(self, row_key):
+ """Add row key to row_keys list.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_add_row_key]
+ :end-before: [END bigtable_api_add_row_key]
+ :dedent: 4
+
+ :type row_key: bytes
+ :param row_key: The key of a row to read
+ """
+ self.row_keys.append(row_key)
+
+ def add_row_range(self, row_range):
+ """Add row_range to row_ranges list.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_add_row_range]
+ :end-before: [END bigtable_api_add_row_range]
+ :dedent: 4
+
+ :type row_range: class:`RowRange`
+ :param row_range: The row range object having start and end key
+ """
+ self.row_ranges.append(row_range)
+
+ def add_row_range_from_keys(
+ self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False
+ ):
+ """Add row range to row_ranges list from the row keys
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_row_range_from_keys]
+ :end-before: [END bigtable_api_row_range_from_keys]
+ :dedent: 4
+
+ :type start_key: bytes
+ :param start_key: (Optional) Start key of the row range. If left empty,
+ will be interpreted as the empty string.
+
+ :type end_key: bytes
+ :param end_key: (Optional) End key of the row range. If left empty,
+ will be interpreted as the empty string and range will
+ be unbounded on the high end.
+
+ :type start_inclusive: bool
+ :param start_inclusive: (Optional) Whether the ``start_key`` should be
+ considered inclusive. The default is True (inclusive).
+
+ :type end_inclusive: bool
+ :param end_inclusive: (Optional) Whether the ``end_key`` should be
+ considered inclusive. The default is False (exclusive).
+ """
+ row_range = RowRange(start_key, end_key, start_inclusive, end_inclusive)
+ self.row_ranges.append(row_range)
+
+ def add_row_range_with_prefix(self, row_key_prefix):
+ """Add row range to row_ranges list that start with the row_key_prefix from the row keys
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_add_row_range_with_prefix]
+ :end-before: [END bigtable_api_add_row_range_with_prefix]
+
+ :type row_key_prefix: str
+ :param row_key_prefix: To retrieve all rows that start with this row key prefix.
+ Prefix cannot be zero length."""
+
+ end_key = row_key_prefix[:-1] + chr(ord(row_key_prefix[-1]) + 1)
+ self.add_row_range_from_keys(
+ row_key_prefix.encode("utf-8"), end_key.encode("utf-8")
+ )
+
+ def _update_message_request(self, message):
+ """Add row keys and row range to given request message
+
+ :type message: class:`data_messages_v2_pb2.ReadRowsRequest`
+ :param message: The ``ReadRowsRequest`` protobuf
+ """
+ for each in self.row_keys:
+ message.rows.row_keys._pb.append(_to_bytes(each))
+
+ for each in self.row_ranges:
+ r_kwrags = each.get_range_kwargs()
+ message.rows.row_ranges.append(r_kwrags)
+
+
+class RowRange(object):
+ """Convenience wrapper of google.bigtable.v2.RowRange
+
+ :type start_key: bytes
+ :param start_key: (Optional) Start key of the row range. If left empty,
+ will be interpreted as the empty string.
+
+ :type end_key: bytes
+ :param end_key: (Optional) End key of the row range. If left empty,
+ will be interpreted as the empty string and range will
+ be unbounded on the high end.
+
+ :type start_inclusive: bool
+ :param start_inclusive: (Optional) Whether the ``start_key`` should be
+ considered inclusive. The default is True (inclusive).
+
+ :type end_inclusive: bool
+ :param end_inclusive: (Optional) Whether the ``end_key`` should be
+ considered inclusive. The default is False (exclusive).
+ """
+
+ def __init__(
+ self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False
+ ):
+ self.start_key = start_key
+ self.start_inclusive = start_inclusive
+ self.end_key = end_key
+ self.end_inclusive = end_inclusive
+
+ def _key(self):
+ """A tuple key that uniquely describes this field.
+
+ Used to compute this instance's hashcode and evaluate equality.
+
+ Returns:
+ Tuple[str]: The contents of this :class:`.RowRange`.
+ """
+ return (self.start_key, self.start_inclusive, self.end_key, self.end_inclusive)
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._key() == other._key()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def get_range_kwargs(self):
+ """Convert row range object to dict which can be passed to
+ google.bigtable.v2.RowRange add method.
+ """
+ range_kwargs = {}
+ if self.start_key is not None:
+ start_key_key = "start_key_open"
+ if self.start_inclusive:
+ start_key_key = "start_key_closed"
+ range_kwargs[start_key_key] = _to_bytes(self.start_key)
+
+ if self.end_key is not None:
+ end_key_key = "end_key_open"
+ if self.end_inclusive:
+ end_key_key = "end_key_closed"
+ range_kwargs[end_key_key] = _to_bytes(self.end_key)
+ return range_kwargs
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py
new file mode 100644
index 000000000000..0009f287ef85
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py
@@ -0,0 +1,1409 @@
+# Copyright 2015 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User-friendly container for Google Cloud Bigtable Table."""
+
+from typing import Set
+import warnings
+
+from google.api_core import timeout
+from google.api_core.exceptions import Aborted
+from google.api_core.exceptions import DeadlineExceeded
+from google.api_core.exceptions import NotFound
+from google.api_core.exceptions import RetryError
+from google.api_core.exceptions import ServiceUnavailable
+from google.api_core.exceptions import InternalServerError
+from google.api_core.gapic_v1.method import DEFAULT
+from google.api_core.retry import if_exception_type
+from google.api_core.retry import Retry
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.cloud.bigtable.backup import Backup
+from google.cloud.bigtable.column_family import _gc_rule_from_pb
+from google.cloud.bigtable.column_family import ColumnFamily
+from google.cloud.bigtable.batcher import MutationsBatcher
+from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE
+from google.cloud.bigtable.encryption_info import EncryptionInfo
+from google.cloud.bigtable.policy import Policy
+from google.cloud.bigtable.row import AppendRow
+from google.cloud.bigtable.row import ConditionalRow
+from google.cloud.bigtable.row import DirectRow
+from google.cloud.bigtable.row_data import (
+ PartialRowsData,
+ _retriable_internal_server_error,
+)
+from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
+from google.cloud.bigtable.row_set import RowSet
+from google.cloud.bigtable.row_set import RowRange
+from google.cloud.bigtable import enums
+from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
+from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
+from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2
+from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_messages_v2_pb2,
+)
+
+# Maximum number of mutations in bulk (MutateRowsRequest message):
+# (https://cloud.google.com/bigtable/docs/reference/data/rpc/
+# google.bigtable.v2#google.bigtable.v2.MutateRowRequest)
+_MAX_BULK_MUTATIONS = 100000
+VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY
+
+RETRYABLE_MUTATION_ERRORS = (
+ Aborted,
+ DeadlineExceeded,
+ ServiceUnavailable,
+ InternalServerError,
+)
+"""Errors which can be retried during row mutation."""
+
+
+RETRYABLE_CODES: Set[int] = set()
+
+for retryable in RETRYABLE_MUTATION_ERRORS:
+ if retryable.grpc_status_code is not None: # pragma: NO COVER
+ RETRYABLE_CODES.add(retryable.grpc_status_code.value[0])
+
+
+class _BigtableRetryableError(Exception):
+ """Retry-able error expected by the default retry strategy."""
+
+
+DEFAULT_RETRY = Retry(
+ predicate=if_exception_type(_BigtableRetryableError),
+ initial=1.0,
+ maximum=15.0,
+ multiplier=2.0,
+ deadline=120.0, # 2 minutes
+)
+"""The default retry strategy to be used on retry-able errors.
+
+Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`.
+"""
+
+
+class TableMismatchError(ValueError):
+ """Row from another table."""
+
+
+class TooManyMutationsError(ValueError):
+ """The number of mutations for bulk request is too big."""
+
+
+class Table(object):
+ """Representation of a Google Cloud Bigtable Table.
+
+ .. note::
+
+ We don't define any properties on a table other than the name.
+ The only other fields are ``column_families`` and ``granularity``,
+ The ``column_families`` are not stored locally and
+ ``granularity`` is an enum with only one value.
+
+ We can use a :class:`Table` to:
+
+ * :meth:`create` the table
+ * :meth:`delete` the table
+ * :meth:`list_column_families` in the table
+
+ :type table_id: str
+ :param table_id: The ID of the table.
+
+ :type instance: :class:`~google.cloud.bigtable.instance.Instance`
+ :param instance: The instance that owns the table.
+
+ :type app_profile_id: str
+ :param app_profile_id: (Optional) The unique name of the AppProfile.
+ """
+
+ def __init__(self, table_id, instance, mutation_timeout=None, app_profile_id=None):
+ self.table_id = table_id
+ self._instance = instance
+ self._app_profile_id = app_profile_id
+ self.mutation_timeout = mutation_timeout
+
+ @property
+ def name(self):
+ """Table name used in requests.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_name]
+ :end-before: [END bigtable_api_table_name]
+ :dedent: 4
+
+ .. note::
+
+ This property will not change if ``table_id`` does not, but the
+ return value is not cached.
+
+ The table name is of the form
+
+ ``"projects/../instances/../tables/{table_id}"``
+
+ :rtype: str
+ :returns: The table name.
+ """
+ project = self._instance._client.project
+ instance_id = self._instance.instance_id
+ table_client = self._instance._client.table_data_client
+ return table_client.table_path(
+ project=project, instance=instance_id, table=self.table_id
+ )
+
+ def get_iam_policy(self):
+ """Gets the IAM access control policy for this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_get_iam_policy]
+ :end-before: [END bigtable_api_table_get_iam_policy]
+ :dedent: 4
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this table.
+ """
+ table_client = self._instance._client.table_admin_client
+ resp = table_client.get_iam_policy(request={"resource": self.name})
+ return Policy.from_pb(resp)
+
+ def set_iam_policy(self, policy):
+ """Sets the IAM access control policy for this table. Replaces any
+ existing policy.
+
+ For more information about policy, please see documentation of
+ class `google.cloud.bigtable.policy.Policy`
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_set_iam_policy]
+ :end-before: [END bigtable_api_table_set_iam_policy]
+ :dedent: 4
+
+ :type policy: :class:`google.cloud.bigtable.policy.Policy`
+ :param policy: A new IAM policy to replace the current IAM policy
+ of this table.
+
+ :rtype: :class:`google.cloud.bigtable.policy.Policy`
+ :returns: The current IAM policy of this table.
+ """
+ table_client = self._instance._client.table_admin_client
+ resp = table_client.set_iam_policy(
+ request={"resource": self.name, "policy": policy.to_pb()}
+ )
+ return Policy.from_pb(resp)
+
+ def test_iam_permissions(self, permissions):
+ """Tests whether the caller has the given permissions for this table.
+ Returns the permissions that the caller has.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_test_iam_permissions]
+ :end-before: [END bigtable_api_table_test_iam_permissions]
+ :dedent: 4
+
+ :type permissions: list
+ :param permissions: The set of permissions to check for
+ the ``resource``. Permissions with wildcards (such as '*'
+ or 'storage.*') are not allowed. For more information see
+ `IAM Overview
+ `_.
+ `Bigtable Permissions
+ `_.
+
+ :rtype: list
+ :returns: A List(string) of permissions allowed on the table.
+ """
+ table_client = self._instance._client.table_admin_client
+ resp = table_client.test_iam_permissions(
+ request={"resource": self.name, "permissions": permissions}
+ )
+ return list(resp.permissions)
+
+ def column_family(self, column_family_id, gc_rule=None):
+ """Factory to create a column family associated with this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_column_family]
+ :end-before: [END bigtable_api_table_column_family]
+ :dedent: 4
+
+ :type column_family_id: str
+ :param column_family_id: The ID of the column family. Must be of the
+ form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
+
+ :type gc_rule: :class:`.GarbageCollectionRule`
+ :param gc_rule: (Optional) The garbage collection settings for this
+ column family.
+
+ :rtype: :class:`.ColumnFamily`
+ :returns: A column family owned by this table.
+ """
+ return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
+
+ def row(self, row_key, filter_=None, append=False):
+ """Factory to create a row associated with this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_row]
+ :end-before: [END bigtable_api_table_row]
+ :dedent: 4
+
+ .. warning::
+
+ At most one of ``filter_`` and ``append`` can be used in a
+ :class:`~google.cloud.bigtable.row.Row`.
+
+ :type row_key: bytes
+ :param row_key: The key for the row being created.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: (Optional) Filter to be used for conditional mutations.
+ See :class:`.ConditionalRow` for more details.
+
+ :type append: bool
+ :param append: (Optional) Flag to determine if the row should be used
+ for append mutations.
+
+ :rtype: :class:`~google.cloud.bigtable.row.Row`
+ :returns: A row owned by this table.
+ :raises: :class:`ValueError ` if both
+ ``filter_`` and ``append`` are used.
+ """
+ warnings.warn(
+ "This method will be deprecated in future versions. Please "
+ "use Table.append_row(), Table.conditional_row() "
+ "and Table.direct_row() methods instead.",
+ PendingDeprecationWarning,
+ stacklevel=2,
+ )
+
+ if append and filter_ is not None:
+ raise ValueError("At most one of filter_ and append can be set")
+ if append:
+ return AppendRow(row_key, self)
+ elif filter_ is not None:
+ return ConditionalRow(row_key, self, filter_=filter_)
+ else:
+ return DirectRow(row_key, self)
+
+ def append_row(self, row_key):
+ """Create a :class:`~google.cloud.bigtable.row.AppendRow` associated with this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_append_row]
+ :end-before: [END bigtable_api_table_append_row]
+ :dedent: 4
+
+ Args:
+ row_key (bytes): The key for the row being created.
+
+ Returns:
+ A row owned by this table.
+ """
+ return AppendRow(row_key, self)
+
+ def direct_row(self, row_key):
+ """Create a :class:`~google.cloud.bigtable.row.DirectRow` associated with this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_direct_row]
+ :end-before: [END bigtable_api_table_direct_row]
+ :dedent: 4
+
+ Args:
+ row_key (bytes): The key for the row being created.
+
+ Returns:
+ A row owned by this table.
+ """
+ return DirectRow(row_key, self)
+
+ def conditional_row(self, row_key, filter_):
+ """Create a :class:`~google.cloud.bigtable.row.ConditionalRow` associated with this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_table_conditional_row]
+ :end-before: [END bigtable_api_table_conditional_row]
+ :dedent: 4
+
+ Args:
+ row_key (bytes): The key for the row being created.
+
+ filter_ (:class:`.RowFilter`): (Optional) Filter to be used for
+ conditional mutations. See :class:`.ConditionalRow` for more details.
+
+ Returns:
+ A row owned by this table.
+ """
+ return ConditionalRow(row_key, self, filter_=filter_)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return other.table_id == self.table_id and other._instance == self._instance
+
+ def __ne__(self, other):
+ return not self == other
+
+ def create(self, initial_split_keys=[], column_families={}):
+ """Creates this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_create_table]
+ :end-before: [END bigtable_api_create_table]
+ :dedent: 4
+
+ .. note::
+
+ A create request returns a
+ :class:`._generated.table.Table` but we don't use
+ this response.
+
+ :type initial_split_keys: list
+ :param initial_split_keys: (Optional) list of row keys in bytes that
+ will be used to initially split the table
+ into several tablets.
+
+ :type column_families: dict
+ :param column_families: (Optional) A map columns to create. The key is
+ the column_id str and the value is a
+ :class:`GarbageCollectionRule`
+ """
+ table_client = self._instance._client.table_admin_client
+ instance_name = self._instance.name
+
+ families = {
+ id: ColumnFamily(id, self, rule).to_pb()
+ for (id, rule) in column_families.items()
+ }
+ table = admin_messages_v2_pb2.Table(column_families=families)
+
+ split = table_admin_messages_v2_pb2.CreateTableRequest.Split
+ splits = [split(key=_to_bytes(key)) for key in initial_split_keys]
+
+ table_client.create_table(
+ request={
+ "parent": instance_name,
+ "table_id": self.table_id,
+ "table": table,
+ "initial_splits": splits,
+ }
+ )
+
+ def exists(self):
+ """Check whether the table exists.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_check_table_exists]
+ :end-before: [END bigtable_api_check_table_exists]
+ :dedent: 4
+
+ :rtype: bool
+ :returns: True if the table exists, else False.
+ """
+ table_client = self._instance._client.table_admin_client
+ try:
+ table_client.get_table(request={"name": self.name, "view": VIEW_NAME_ONLY})
+ return True
+ except NotFound:
+ return False
+
+ def delete(self):
+ """Delete this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_delete_table]
+ :end-before: [END bigtable_api_delete_table]
+ :dedent: 4
+ """
+ table_client = self._instance._client.table_admin_client
+ table_client.delete_table(request={"name": self.name})
+
+ def list_column_families(self):
+ """List the column families owned by this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_list_column_families]
+ :end-before: [END bigtable_api_list_column_families]
+ :dedent: 4
+
+ :rtype: dict
+ :returns: Dictionary of column families attached to this table. Keys
+ are strings (column family names) and values are
+ :class:`.ColumnFamily` instances.
+ :raises: :class:`ValueError ` if the column
+ family name from the response does not agree with the computed
+ name from the column family ID.
+ """
+ table_client = self._instance._client.table_admin_client
+ table_pb = table_client.get_table(request={"name": self.name})
+
+ result = {}
+ for column_family_id, value_pb in table_pb.column_families.items():
+ gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
+ column_family = self.column_family(column_family_id, gc_rule=gc_rule)
+ result[column_family_id] = column_family
+ return result
+
+ def get_cluster_states(self):
+ """List the cluster states owned by this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_get_cluster_states]
+ :end-before: [END bigtable_api_get_cluster_states]
+ :dedent: 4
+
+ :rtype: dict
+ :returns: Dictionary of cluster states for this table.
+ Keys are cluster ids and values are
+ :class: 'ClusterState' instances.
+ """
+
+ REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW
+ table_client = self._instance._client.table_admin_client
+ table_pb = table_client.get_table(
+ request={"name": self.name, "view": REPLICATION_VIEW}
+ )
+
+ return {
+ cluster_id: ClusterState(value_pb.replication_state)
+ for cluster_id, value_pb in table_pb.cluster_states.items()
+ }
+
+ def get_encryption_info(self):
+ """List the encryption info for each cluster owned by this table.
+
+ Gets the current encryption info for the table across all of the clusters. The
+ returned dict will be keyed by cluster id and contain a status for all of the
+ keys in use.
+
+ :rtype: dict
+ :returns: Dictionary of encryption info for this table. Keys are cluster ids and
+ values are tuples of :class:`google.cloud.bigtable.encryption.EncryptionInfo` instances.
+ """
+ ENCRYPTION_VIEW = enums.Table.View.ENCRYPTION_VIEW
+ table_client = self._instance._client.table_admin_client
+ table_pb = table_client.get_table(
+ request={"name": self.name, "view": ENCRYPTION_VIEW}
+ )
+
+ return {
+ cluster_id: tuple(
+ (
+ EncryptionInfo._from_pb(info_pb)
+ for info_pb in value_pb.encryption_info
+ )
+ )
+ for cluster_id, value_pb in table_pb.cluster_states.items()
+ }
+
+ def read_row(self, row_key, filter_=None, retry=DEFAULT_RETRY_READ_ROWS):
+ """Read a single row from this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_read_row]
+ :end-before: [END bigtable_api_read_row]
+ :dedent: 4
+
+ :type row_key: bytes
+ :param row_key: The key of the row to read from.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: (Optional) The filter to apply to the contents of the
+ row. If unset, returns the entire row.
+
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry:
+ (Optional) Retry delay and deadline arguments. To override, the
+ default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and
+ modified with the :meth:`~google.api_core.retry.Retry.with_delay`
+ method or the :meth:`~google.api_core.retry.Retry.with_deadline`
+ method.
+
+ :rtype: :class:`.PartialRowData`, :data:`NoneType `
+ :returns: The contents of the row if any chunks were returned in
+ the response, otherwise :data:`None`.
+ :raises: :class:`ValueError ` if a commit row
+ chunk is never encountered.
+ """
+ row_set = RowSet()
+ row_set.add_row_key(row_key)
+ result_iter = iter(
+ self.read_rows(filter_=filter_, row_set=row_set, retry=retry)
+ )
+ row = next(result_iter, None)
+ if next(result_iter, None) is not None:
+ raise ValueError("More than one row was returned.")
+ return row
+
+ def read_rows(
+ self,
+ start_key=None,
+ end_key=None,
+ limit=None,
+ filter_=None,
+ end_inclusive=False,
+ row_set=None,
+ retry=DEFAULT_RETRY_READ_ROWS,
+ ):
+ """Read rows from this table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_read_rows]
+ :end-before: [END bigtable_api_read_rows]
+ :dedent: 4
+
+ :type start_key: bytes
+ :param start_key: (Optional) The beginning of a range of row keys to
+ read from. The range will include ``start_key``. If
+ left empty, will be interpreted as the empty string.
+
+ :type end_key: bytes
+ :param end_key: (Optional) The end of a range of row keys to read from.
+ The range will not include ``end_key``. If left empty,
+ will be interpreted as an infinite string.
+
+ :type limit: int
+ :param limit: (Optional) The read will terminate after committing to N
+ rows' worth of results. The default (zero) is to return
+ all results.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: (Optional) The filter to apply to the contents of the
+ specified row(s). If unset, reads every column in
+ each row.
+
+ :type end_inclusive: bool
+ :param end_inclusive: (Optional) Whether the ``end_key`` should be
+ considered inclusive. The default is False (exclusive).
+
+ :type row_set: :class:`.RowSet`
+ :param row_set: (Optional) The row set containing multiple row keys and
+ row_ranges.
+
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry:
+ (Optional) Retry delay and deadline arguments. To override, the
+ default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and
+ modified with the :meth:`~google.api_core.retry.Retry.with_delay`
+ method or the :meth:`~google.api_core.retry.Retry.with_deadline`
+ method.
+
+ :rtype: :class:`.PartialRowsData`
+ :returns: A :class:`.PartialRowsData` a generator for consuming
+ the streamed results.
+ """
+ request_pb = _create_row_request(
+ self.name,
+ start_key=start_key,
+ end_key=end_key,
+ filter_=filter_,
+ limit=limit,
+ end_inclusive=end_inclusive,
+ app_profile_id=self._app_profile_id,
+ row_set=row_set,
+ )
+ data_client = self._instance._client.table_data_client
+ return PartialRowsData(data_client.read_rows, request_pb, retry)
+
+ def yield_rows(self, **kwargs):
+ """Read rows from this table.
+
+ .. warning::
+ This method will be removed in future releases. Please use
+ ``read_rows`` instead.
+
+ :type start_key: bytes
+ :param start_key: (Optional) The beginning of a range of row keys to
+ read from. The range will include ``start_key``. If
+ left empty, will be interpreted as the empty string.
+
+ :type end_key: bytes
+ :param end_key: (Optional) The end of a range of row keys to read from.
+ The range will not include ``end_key``. If left empty,
+ will be interpreted as an infinite string.
+
+ :type limit: int
+ :param limit: (Optional) The read will terminate after committing to N
+ rows' worth of results. The default (zero) is to return
+ all results.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: (Optional) The filter to apply to the contents of the
+ specified row(s). If unset, reads every column in
+ each row.
+
+ :type row_set: :class:`.RowSet`
+ :param row_set: (Optional) The row set containing multiple row keys and
+ row_ranges.
+
+ :rtype: :class:`.PartialRowData`
+ :returns: A :class:`.PartialRowData` for each row returned
+ """
+ warnings.warn(
+ "`yield_rows()` is deprecated; use `read_rows()` instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.read_rows(**kwargs)
+
+ def mutate_rows(self, rows, retry=DEFAULT_RETRY, timeout=DEFAULT):
+ """Mutates multiple rows in bulk.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_mutate_rows]
+ :end-before: [END bigtable_api_mutate_rows]
+ :dedent: 4
+
+ The method tries to update all specified rows.
+ If some of the rows weren't updated, it would not remove mutations.
+ They can be applied to the row separately.
+ If row mutations finished successfully, they would be cleaned up.
+
+ Optionally, a ``retry`` strategy can be specified to re-attempt
+ mutations on rows that return transient errors. This method will retry
+ until all rows succeed or until the request deadline is reached. To
+ specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0``
+ can be specified.
+
+ :type rows: list
+ :param rows: List or other iterable of :class:`.DirectRow` instances.
+
+ :type retry: :class:`~google.api_core.retry.Retry`
+ :param retry:
+ (Optional) Retry delay and deadline arguments. To override, the
+ default value :attr:`DEFAULT_RETRY` can be used and modified with
+ the :meth:`~google.api_core.retry.Retry.with_delay` method or the
+ :meth:`~google.api_core.retry.Retry.with_deadline` method.
+
+ :type timeout: float
+ :param timeout: number of seconds bounding retries for the call
+
+ :rtype: list
+ :returns: A list of response statuses (`google.rpc.status_pb2.Status`)
+ corresponding to success or failure of each row mutation
+ sent. These will be in the same order as the `rows`.
+ """
+ if timeout is DEFAULT:
+ timeout = self.mutation_timeout
+
+ retryable_mutate_rows = _RetryableMutateRowsWorker(
+ self._instance._client,
+ self.name,
+ rows,
+ app_profile_id=self._app_profile_id,
+ timeout=timeout,
+ )
+ return retryable_mutate_rows(retry=retry)
+
+ def sample_row_keys(self):
+ """Read a sample of row keys in the table.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_sample_row_keys]
+ :end-before: [END bigtable_api_sample_row_keys]
+ :dedent: 4
+
+ The returned row keys will delimit contiguous sections of the table of
+ approximately equal size, which can be used to break up the data for
+ distributed tasks like mapreduces.
+
+ The elements in the iterator are a SampleRowKeys response and they have
+ the properties ``offset_bytes`` and ``row_key``. They occur in sorted
+ order. The table might have contents before the first row key in the
+ list and after the last one, but a key containing the empty string
+ indicates "end of table" and will be the last response given, if
+ present.
+
+ .. note::
+
+ Row keys in this list may not have ever been written to or read
+ from, and users should therefore not make any assumptions about the
+ row key structure that are specific to their use case.
+
+ The ``offset_bytes`` field on a response indicates the approximate
+ total storage space used by all rows in the table which precede
+ ``row_key``. Buffering the contents of all rows between two subsequent
+ samples would require space roughly equal to the difference in their
+ ``offset_bytes`` fields.
+
+ :rtype: :class:`~google.cloud.exceptions.GrpcRendezvous`
+ :returns: A cancel-able iterator. Can be consumed by calling ``next()``
+ or by casting to a :class:`list` and can be cancelled by
+ calling ``cancel()``.
+ """
+ data_client = self._instance._client.table_data_client
+ response_iterator = data_client.sample_row_keys(
+ request={"table_name": self.name, "app_profile_id": self._app_profile_id}
+ )
+
+ return response_iterator
+
+ def truncate(self, timeout=None):
+ """Truncate the table
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_truncate_table]
+ :end-before: [END bigtable_api_truncate_table]
+ :dedent: 4
+
+ :type timeout: float
+ :param timeout: (Optional) The amount of time, in seconds, to wait
+ for the request to complete.
+
+ :raise: google.api_core.exceptions.GoogleAPICallError: If the
+ request failed for any reason.
+ google.api_core.exceptions.RetryError: If the request failed
+ due to a retryable error and retry attempts failed.
+ ValueError: If the parameters are invalid.
+ """
+ client = self._instance._client
+ table_admin_client = client.table_admin_client
+ if timeout:
+ table_admin_client.drop_row_range(
+ request={"name": self.name, "delete_all_data_from_table": True},
+ timeout=timeout,
+ )
+ else:
+ table_admin_client.drop_row_range(
+ request={"name": self.name, "delete_all_data_from_table": True}
+ )
+
+ def drop_by_prefix(self, row_key_prefix, timeout=None):
+ """
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_drop_by_prefix]
+ :end-before: [END bigtable_api_drop_by_prefix]
+ :dedent: 4
+
+ :type row_key_prefix: bytes
+ :param row_key_prefix: Delete all rows that start with this row key
+ prefix. Prefix cannot be zero length.
+
+ :type timeout: float
+ :param timeout: (Optional) The amount of time, in seconds, to wait
+ for the request to complete.
+
+ :raise: google.api_core.exceptions.GoogleAPICallError: If the
+ request failed for any reason.
+ google.api_core.exceptions.RetryError: If the request failed
+ due to a retryable error and retry attempts failed.
+ ValueError: If the parameters are invalid.
+ """
+ client = self._instance._client
+ table_admin_client = client.table_admin_client
+ if timeout:
+ table_admin_client.drop_row_range(
+ request={
+ "name": self.name,
+ "row_key_prefix": _to_bytes(row_key_prefix),
+ },
+ timeout=timeout,
+ )
+ else:
+ table_admin_client.drop_row_range(
+ request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)}
+ )
+
+ def mutations_batcher(
+ self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE
+ ):
+ """Factory to create a mutation batcher associated with this instance.
+
+ For example:
+
+ .. literalinclude:: snippets_table.py
+ :start-after: [START bigtable_api_mutations_batcher]
+ :end-before: [END bigtable_api_mutations_batcher]
+ :dedent: 4
+
+ :type flush_count: int
+ :param flush_count: (Optional) Maximum number of rows per batch. If it
+ reaches the max number of rows it calls finish_batch() to
+ mutate the current row batch. Default is FLUSH_COUNT (1000
+ rows).
+
+ :type max_row_bytes: int
+ :param max_row_bytes: (Optional) Max number of row mutations size to
+ flush. If it reaches the max number of row mutations size it
+ calls finish_batch() to mutate the current row batch.
+ Default is MAX_ROW_BYTES (5 MB).
+ """
+ return MutationsBatcher(self, flush_count, max_row_bytes)
+
+ def backup(self, backup_id, cluster_id=None, expire_time=None):
+ """Factory to create a Backup linked to this Table.
+
+ :type backup_id: str
+ :param backup_id: The ID of the Backup to be created.
+
+ :type cluster_id: str
+ :param cluster_id: (Optional) The ID of the Cluster. Required for
+ calling 'delete', 'exists' etc. methods.
+
+ :type expire_time: :class:`datetime.datetime`
+ :param expire_time: (Optional) The expiration time of this new Backup.
+ Required, if the `create` method needs to be called.
+ :rtype: :class:`.Backup`
+ :returns: A backup linked to this table.
+ """
+ return Backup(
+ backup_id,
+ self._instance,
+ cluster_id=cluster_id,
+ table_id=self.table_id,
+ expire_time=expire_time,
+ )
+
+ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0):
+ """List Backups for this Table.
+
+ :type cluster_id: str
+ :param cluster_id: (Optional) Specifies a single cluster to list
+ Backups from. If none is specified, the returned list
+ contains all the Backups in this Instance.
+
+ :type filter_: str
+ :param filter_: (Optional) A filter expression that filters backups
+ listed in the response. The expression must specify
+ the field name, a comparison operator, and the value
+ that you want to use for filtering. The value must be
+ a string, a number, or a boolean. The comparison
+ operator must be <, >, <=, >=, !=, =, or :. Colon ':'
+ represents a HAS operator which is roughly synonymous
+ with equality. Filter rules are case insensitive.
+
+ The fields eligible for filtering are:
+
+ - ``name``
+ - ``source_table``
+ - ``state``
+ - ``start_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ)
+ - ``end_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ)
+ - ``expire_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ)
+ - ``size_bytes``
+
+ To filter on multiple expressions, provide each
+ separate expression within parentheses. By default,
+ each expression is an AND expression. However, you can
+ include AND, OR, and NOT expressions explicitly.
+
+ Some examples of using filters are:
+
+ - ``name:"exact"`` --> The Backup name is the string "exact".
+ - ``name:howl`` --> The Backup name contains the string "howl"
+ - ``source_table:prod`` --> The source table's name contains
+ the string "prod".
+ - ``state:CREATING`` --> The Backup is pending creation.
+ - ``state:READY`` --> The Backup is created and ready for use.
+ - ``(name:howl) AND (start_time < \"2020-05-28T14:50:00Z\")``
+ --> The Backup name contains the string "howl" and
+ the Backup start time is before 2020-05-28T14:50:00Z.
+ - ``size_bytes > 10000000000`` --> The Backup size is greater
+ than 10GB
+
+ :type order_by: str
+ :param order_by: (Optional) An expression for specifying the sort order
+ of the results of the request. The string value should
+ specify one or more fields in ``Backup``. The full
+ syntax is described at https://aip.dev/132#ordering.
+
+ Fields supported are: \\* name \\* source_table \\*
+ expire_time \\* start_time \\* end_time \\*
+ size_bytes \\* state
+
+ For example, "start_time". The default sorting order
+ is ascending. To specify descending order for the
+ field, a suffix " desc" should be appended to the
+ field name. For example, "start_time desc". Redundant
+ space characters in the syntax are insigificant. If
+ order_by is empty, results will be sorted by
+ ``start_time`` in descending order starting from
+ the most recently created backup.
+
+ :type page_size: int
+ :param page_size: (Optional) The maximum number of resources contained
+ in the underlying API response. If page streaming is
+ performed per-resource, this parameter does not
+ affect the return value. If page streaming is
+ performed per-page, this determines the maximum
+ number of resources in a page.
+
+ :rtype: :class:`~google.api_core.page_iterator.Iterator`
+ :returns: Iterator of :class:`~google.cloud.bigtable.backup.Backup`
+ resources within the current Instance.
+ :raises: :class:`ValueError ` if one of the
+ returned Backups' name is not of the expected format.
+ """
+ cluster_id = cluster_id or "-"
+
+ backups_filter = "source_table:{}".format(self.name)
+ if filter_:
+ backups_filter = "({}) AND ({})".format(backups_filter, filter_)
+
+ parent = BaseBigtableTableAdminClient.cluster_path(
+ project=self._instance._client.project,
+ instance=self._instance.instance_id,
+ cluster=cluster_id,
+ )
+ client = self._instance._client.table_admin_client
+ backup_list_pb = client.list_backups(
+ request={
+ "parent": parent,
+ "filter": backups_filter,
+ "order_by": order_by,
+ "page_size": page_size,
+ }
+ )
+
+ result = []
+ for backup_pb in backup_list_pb.backups:
+ result.append(Backup.from_pb(backup_pb, self._instance))
+
+ return result
+
+ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=None):
+ """Creates a new Table by restoring from the Backup specified by either
+ `backup_id` or `backup_name`. The returned ``long-running operation``
+ can be used to track the progress of the operation and to cancel it.
+ The ``response`` type is ``Table``, if successful.
+
+ :type new_table_id: str
+ :param new_table_id: The ID of the Table to create and restore to.
+ This Table must not already exist.
+
+ :type cluster_id: str
+ :param cluster_id: The ID of the Cluster containing the Backup.
+ This parameter gets overriden by `backup_name`, if
+ the latter is provided.
+
+ :type backup_id: str
+ :param backup_id: The ID of the Backup to restore the Table from.
+ This parameter gets overriden by `backup_name`, if
+ the latter is provided.
+
+ :type backup_name: str
+ :param backup_name: (Optional) The full name of the Backup to restore
+ from. If specified, it overrides the `cluster_id`
+ and `backup_id` parameters even of such specified.
+
+ :return: An instance of
+ :class:`~google.api_core.operation.Operation`.
+
+ :raises: google.api_core.exceptions.AlreadyExists: If the table
+ already exists.
+ :raises: google.api_core.exceptions.GoogleAPICallError: If the request
+ failed for any reason.
+ :raises: google.api_core.exceptions.RetryError: If the request failed
+ due to a retryable error and retry attempts failed.
+ :raises: ValueError: If the parameters are invalid.
+ """
+ api = self._instance._client.table_admin_client
+ if not backup_name:
+ backup_name = BaseBigtableTableAdminClient.backup_path(
+ project=self._instance._client.project,
+ instance=self._instance.instance_id,
+ cluster=cluster_id,
+ backup=backup_id,
+ )
+ return api._restore_table(
+ request={
+ "parent": self._instance.name,
+ "table_id": new_table_id,
+ "backup": backup_name,
+ }
+ )
+
+
+class _RetryableMutateRowsWorker(object):
+ """A callable worker that can retry to mutate rows with transient errors.
+
+ This class is a callable that can retry mutating rows that result in
+ transient errors. After all rows are successful or none of the rows
+ are retryable, any subsequent call on this callable will be a no-op.
+ """
+
+ def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None):
+ self.client = client
+ self.table_name = table_name
+ self.rows = rows
+ self.app_profile_id = app_profile_id
+ self.responses_statuses = [None] * len(self.rows)
+ self.timeout = timeout
+
+ def __call__(self, retry=DEFAULT_RETRY):
+ """Attempt to mutate all rows and retry rows with transient errors.
+
+ Will retry the rows with transient errors until all rows succeed or
+ ``deadline`` specified in the `retry` is reached.
+
+ :rtype: list
+ :returns: A list of response statuses (`google.rpc.status_pb2.Status`)
+ corresponding to success or failure of each row mutation
+ sent. These will be in the same order as the ``rows``.
+ """
+ mutate_rows = self._do_mutate_retryable_rows
+ if retry:
+ mutate_rows = retry(self._do_mutate_retryable_rows)
+
+ try:
+ mutate_rows()
+ except (_BigtableRetryableError, RetryError):
+ # - _BigtableRetryableError raised when no retry strategy is used
+ # and a retryable error on a mutation occurred.
+ # - RetryError raised when retry deadline is reached.
+ # In both cases, just return current `responses_statuses`.
+ pass
+
+ return self.responses_statuses
+
+ @staticmethod
+ def _is_retryable(status):
+ return status is None or status.code in RETRYABLE_CODES
+
+ def _do_mutate_retryable_rows(self):
+ """Mutate all the rows that are eligible for retry.
+
+ A row is eligible for retry if it has not been tried or if it resulted
+ in a transient error in a previous call.
+
+ :rtype: list
+ :return: The responses statuses, which is a list of
+ :class:`~google.rpc.status_pb2.Status`.
+ :raises: One of the following:
+
+ * :exc:`~.table._BigtableRetryableError` if any
+ row returned a transient error.
+ * :exc:`RuntimeError` if the number of responses doesn't
+ match the number of rows that were retried
+ """
+ retryable_rows = []
+ index_into_all_rows = []
+ for index, status in enumerate(self.responses_statuses):
+ if self._is_retryable(status):
+ retryable_rows.append(self.rows[index])
+ index_into_all_rows.append(index)
+
+ if not retryable_rows:
+ # All mutations are either successful or non-retryable now.
+ return self.responses_statuses
+
+ entries = _compile_mutation_entries(self.table_name, retryable_rows)
+ data_client = self.client.table_data_client
+
+ kwargs = {}
+ if self.timeout is not None:
+ kwargs["timeout"] = timeout.ExponentialTimeout(deadline=self.timeout)
+
+ try:
+ responses = data_client.mutate_rows(
+ table_name=self.table_name,
+ entries=entries,
+ app_profile_id=self.app_profile_id,
+ retry=None,
+ **kwargs
+ )
+ except RETRYABLE_MUTATION_ERRORS as exc:
+ # If an exception, considered retryable by `RETRYABLE_MUTATION_ERRORS`, is
+ # returned from the initial call, consider
+ # it to be retryable. Wrap as a Bigtable Retryable Error.
+ # For InternalServerError, it is only retriable if the message is related to RST Stream messages
+ if _retriable_internal_server_error(exc) or not isinstance(
+ exc, InternalServerError
+ ):
+ raise _BigtableRetryableError
+ else:
+ # re-raise the original exception
+ raise
+
+ num_responses = 0
+ num_retryable_responses = 0
+ for response in responses:
+ for entry in response.entries:
+ num_responses += 1
+ index = index_into_all_rows[entry.index]
+ self.responses_statuses[index] = entry.status
+ if self._is_retryable(entry.status):
+ num_retryable_responses += 1
+ if entry.status.code == 0:
+ self.rows[index].clear()
+
+ if len(retryable_rows) != num_responses:
+ raise RuntimeError(
+ "Unexpected number of responses",
+ num_responses,
+ "Expected",
+ len(retryable_rows),
+ )
+
+ if num_retryable_responses:
+ raise _BigtableRetryableError
+
+ return self.responses_statuses
+
+
+class ClusterState(object):
+ """Representation of a Cluster State.
+
+ :type replication_state: int
+ :param replication_state: enum value for cluster state
+ Possible replications_state values are
+ 0 for STATE_NOT_KNOWN: The replication state of the table is
+ unknown in this cluster.
+ 1 for INITIALIZING: The cluster was recently created, and the
+ table must finish copying
+ over pre-existing data from other clusters before it can
+ begin receiving live replication updates and serving
+ ``Data API`` requests.
+ 2 for PLANNED_MAINTENANCE: The table is temporarily unable to
+ serve
+ ``Data API`` requests from this
+ cluster due to planned internal maintenance.
+ 3 for UNPLANNED_MAINTENANCE: The table is temporarily unable
+ to serve
+ ``Data API`` requests from this
+ cluster due to unplanned or emergency maintenance.
+ 4 for READY: The table can serve
+ ``Data API`` requests from this
+ cluster. Depending on replication delay, reads may not
+ immediately reflect the state of the table in other clusters.
+ """
+
+ def __init__(self, replication_state):
+ self.replication_state = replication_state
+
+ def __repr__(self):
+ """Representation of cluster state instance as string value
+ for cluster state.
+
+ :rtype: ClusterState instance
+ :returns: ClusterState instance as representation of string
+ value for cluster state.
+ """
+ replication_dict = {
+ enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
+ enums.Table.ReplicationState.INITIALIZING: "INITIALIZING",
+ enums.Table.ReplicationState.PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
+ enums.Table.ReplicationState.UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
+ enums.Table.ReplicationState.READY: "READY",
+ }
+ return replication_dict[self.replication_state]
+
+ def __eq__(self, other):
+ """Checks if two ClusterState instances(self and other) are
+ equal on the basis of instance variable 'replication_state'.
+
+ :type other: ClusterState
+ :param other: ClusterState instance to compare with.
+
+ :rtype: Boolean value
+ :returns: True if two cluster state instances have same
+ replication_state.
+ """
+ if not isinstance(other, self.__class__):
+ return False
+ return self.replication_state == other.replication_state
+
+ def __ne__(self, other):
+ """Checks if two ClusterState instances(self and other) are
+ not equal.
+
+ :type other: ClusterState.
+ :param other: ClusterState instance to compare with.
+
+ :rtype: Boolean value.
+ :returns: True if two cluster state instances are not equal.
+ """
+ return not self == other
+
+
+def _create_row_request(
+ table_name,
+ start_key=None,
+ end_key=None,
+ filter_=None,
+ limit=None,
+ end_inclusive=False,
+ app_profile_id=None,
+ row_set=None,
+):
+ """Creates a request to read rows in a table.
+
+ :type table_name: str
+ :param table_name: The name of the table to read from.
+
+ :type start_key: bytes
+ :param start_key: (Optional) The beginning of a range of row keys to
+ read from. The range will include ``start_key``. If
+ left empty, will be interpreted as the empty string.
+
+ :type end_key: bytes
+ :param end_key: (Optional) The end of a range of row keys to read from.
+ The range will not include ``end_key``. If left empty,
+ will be interpreted as an infinite string.
+
+ :type filter_: :class:`.RowFilter`
+ :param filter_: (Optional) The filter to apply to the contents of the
+ specified row(s). If unset, reads the entire table.
+
+ :type limit: int
+ :param limit: (Optional) The read will terminate after committing to N
+ rows' worth of results. The default (zero) is to return
+ all results.
+
+ :type end_inclusive: bool
+ :param end_inclusive: (Optional) Whether the ``end_key`` should be
+ considered inclusive. The default is False (exclusive).
+
+ :type: app_profile_id: str
+ :param app_profile_id: (Optional) The unique name of the AppProfile.
+
+ :type row_set: :class:`.RowSet`
+ :param row_set: (Optional) The row set containing multiple row keys and
+ row_ranges.
+
+ :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
+ :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
+ :raises: :class:`ValueError ` if both
+ ``row_set`` and one of ``start_key`` or ``end_key`` are set
+ """
+ request_kwargs = {"table_name": table_name}
+ if (start_key is not None or end_key is not None) and row_set is not None:
+ raise ValueError("Row range and row set cannot be " "set simultaneously")
+
+ if filter_ is not None:
+ request_kwargs["filter"] = filter_.to_pb()
+ if limit is not None:
+ request_kwargs["rows_limit"] = limit
+ if app_profile_id is not None:
+ request_kwargs["app_profile_id"] = app_profile_id
+
+ message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
+
+ if start_key is not None or end_key is not None:
+ row_set = RowSet()
+ row_set.add_row_range(RowRange(start_key, end_key, end_inclusive=end_inclusive))
+
+ if row_set is not None:
+ row_set._update_message_request(message)
+
+ return message
+
+
+def _compile_mutation_entries(table_name, rows):
+ """Create list of mutation entries
+
+ :type table_name: str
+ :param table_name: The name of the table to write to.
+
+ :type rows: list
+ :param rows: List or other iterable of :class:`.DirectRow` instances.
+
+ :rtype: List[:class:`data_messages_v2_pb2.MutateRowsRequest.Entry`]
+ :returns: entries corresponding to the inputs.
+ :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is
+ greater than the max ({})
+ """.format(
+ _MAX_BULK_MUTATIONS
+ )
+ entries = []
+ mutations_count = 0
+ entry_klass = data_messages_v2_pb2.MutateRowsRequest.Entry
+
+ for row in rows:
+ _check_row_table_name(table_name, row)
+ _check_row_type(row)
+ mutations = row._get_mutations()
+ entries.append(entry_klass(row_key=row.row_key, mutations=mutations))
+ mutations_count += len(mutations)
+
+ if mutations_count > _MAX_BULK_MUTATIONS:
+ raise TooManyMutationsError(
+ "Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,)
+ )
+ return entries
+
+
+def _check_row_table_name(table_name, row):
+ """Checks that a row belongs to a table.
+
+ :type table_name: str
+ :param table_name: The name of the table.
+
+ :type row: :class:`~google.cloud.bigtable.row.Row`
+ :param row: An instance of :class:`~google.cloud.bigtable.row.Row`
+ subclasses.
+
+ :raises: :exc:`~.table.TableMismatchError` if the row does not belong to
+ the table.
+ """
+ if row.table is not None and row.table.name != table_name:
+ raise TableMismatchError(
+ "Row %s is a part of %s table. Current table: %s"
+ % (row.row_key, row.table.name, table_name)
+ )
+
+
+def _check_row_type(row):
+ """Checks that a row is an instance of :class:`.DirectRow`.
+
+ :type row: :class:`~google.cloud.bigtable.row.Row`
+ :param row: An instance of :class:`~google.cloud.bigtable.row.Row`
+ subclasses.
+
+ :raises: :class:`TypeError ` if the row is not an
+ instance of DirectRow.
+ """
+ if not isinstance(row, DirectRow):
+ raise TypeError(
+ "Bulk processing can not be applied for " "conditional or append mutations."
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py
new file mode 100644
index 000000000000..00353ea96958
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py
@@ -0,0 +1,451 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.cloud.bigtable_admin import gapic_version as package_version
+
+__version__ = package_version.__version__
+
+
+from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client import (
+ BigtableInstanceAdminClient,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.async_client import (
+ BigtableInstanceAdminAsyncClient,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import (
+ BaseBigtableTableAdminClient,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import (
+ BaseBigtableTableAdminAsyncClient,
+)
+
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateAppProfileRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateClusterMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateClusterRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateInstanceMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateInstanceRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateLogicalViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateLogicalViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateMaterializedViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ CreateMaterializedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ DeleteAppProfileRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ DeleteClusterRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ DeleteInstanceRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ DeleteLogicalViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ DeleteMaterializedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ GetAppProfileRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ GetClusterRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ GetInstanceRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ GetLogicalViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ GetMaterializedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListAppProfilesRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListAppProfilesResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListClustersRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListClustersResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListHotTabletsRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListHotTabletsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListInstancesRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListInstancesResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListLogicalViewsRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListLogicalViewsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListMaterializedViewsRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ ListMaterializedViewsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ PartialUpdateClusterMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ PartialUpdateClusterRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ PartialUpdateInstanceRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateAppProfileMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateAppProfileRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateClusterMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateInstanceMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateLogicalViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateLogicalViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateMaterializedViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
+ UpdateMaterializedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CheckConsistencyRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CheckConsistencyResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateAuthorizedViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateAuthorizedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateBackupMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateBackupRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateSchemaBundleMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateSchemaBundleRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateTableFromSnapshotMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateTableFromSnapshotRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DataBoostReadLocalWrites,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DeleteAuthorizedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DeleteBackupRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DeleteSchemaBundleRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DeleteSnapshotRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteTableRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DropRowRangeRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ GenerateConsistencyTokenRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ GenerateConsistencyTokenResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ GetAuthorizedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ GetSchemaBundleRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListAuthorizedViewsRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListAuthorizedViewsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListBackupsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSchemaBundlesRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSchemaBundlesResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSnapshotsRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSnapshotsResponse,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesResponse
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ModifyColumnFamiliesRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ OptimizeRestoredTableMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ RestoreTableMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ RestoreTableRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ SnapshotTableMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ SnapshotTableRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ StandardReadRemoteWrites,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UndeleteTableMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UndeleteTableRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateAuthorizedViewMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateAuthorizedViewRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateBackupRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateSchemaBundleMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateSchemaBundleRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateTableMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableRequest
+from google.cloud.bigtable_admin_v2.types.common import OperationProgress
+from google.cloud.bigtable_admin_v2.types.common import StorageType
+from google.cloud.bigtable_admin_v2.types.instance import AppProfile
+from google.cloud.bigtable_admin_v2.types.instance import AutoscalingLimits
+from google.cloud.bigtable_admin_v2.types.instance import AutoscalingTargets
+from google.cloud.bigtable_admin_v2.types.instance import Cluster
+from google.cloud.bigtable_admin_v2.types.instance import HotTablet
+from google.cloud.bigtable_admin_v2.types.instance import Instance
+from google.cloud.bigtable_admin_v2.types.instance import LogicalView
+from google.cloud.bigtable_admin_v2.types.instance import MaterializedView
+from google.cloud.bigtable_admin_v2.types.table import AuthorizedView
+from google.cloud.bigtable_admin_v2.types.table import Backup
+from google.cloud.bigtable_admin_v2.types.table import BackupInfo
+from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig
+from google.cloud.bigtable_admin_v2.types.table import ColumnFamily
+from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo
+from google.cloud.bigtable_admin_v2.types.table import GcRule
+from google.cloud.bigtable_admin_v2.types.table import ProtoSchema
+from google.cloud.bigtable_admin_v2.types.table import RestoreInfo
+from google.cloud.bigtable_admin_v2.types.table import SchemaBundle
+from google.cloud.bigtable_admin_v2.types.table import Snapshot
+from google.cloud.bigtable_admin_v2.types.table import Table
+from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType
+from google.cloud.bigtable_admin_v2.types.types import Type
+
+__all__ = (
+ "BigtableInstanceAdminClient",
+ "BigtableInstanceAdminAsyncClient",
+ "BaseBigtableTableAdminClient",
+ "BaseBigtableTableAdminAsyncClient",
+ "CreateAppProfileRequest",
+ "CreateClusterMetadata",
+ "CreateClusterRequest",
+ "CreateInstanceMetadata",
+ "CreateInstanceRequest",
+ "CreateLogicalViewMetadata",
+ "CreateLogicalViewRequest",
+ "CreateMaterializedViewMetadata",
+ "CreateMaterializedViewRequest",
+ "DeleteAppProfileRequest",
+ "DeleteClusterRequest",
+ "DeleteInstanceRequest",
+ "DeleteLogicalViewRequest",
+ "DeleteMaterializedViewRequest",
+ "GetAppProfileRequest",
+ "GetClusterRequest",
+ "GetInstanceRequest",
+ "GetLogicalViewRequest",
+ "GetMaterializedViewRequest",
+ "ListAppProfilesRequest",
+ "ListAppProfilesResponse",
+ "ListClustersRequest",
+ "ListClustersResponse",
+ "ListHotTabletsRequest",
+ "ListHotTabletsResponse",
+ "ListInstancesRequest",
+ "ListInstancesResponse",
+ "ListLogicalViewsRequest",
+ "ListLogicalViewsResponse",
+ "ListMaterializedViewsRequest",
+ "ListMaterializedViewsResponse",
+ "PartialUpdateClusterMetadata",
+ "PartialUpdateClusterRequest",
+ "PartialUpdateInstanceRequest",
+ "UpdateAppProfileMetadata",
+ "UpdateAppProfileRequest",
+ "UpdateClusterMetadata",
+ "UpdateInstanceMetadata",
+ "UpdateLogicalViewMetadata",
+ "UpdateLogicalViewRequest",
+ "UpdateMaterializedViewMetadata",
+ "UpdateMaterializedViewRequest",
+ "CheckConsistencyRequest",
+ "CheckConsistencyResponse",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
+ "CreateAuthorizedViewMetadata",
+ "CreateAuthorizedViewRequest",
+ "CreateBackupMetadata",
+ "CreateBackupRequest",
+ "CreateSchemaBundleMetadata",
+ "CreateSchemaBundleRequest",
+ "CreateTableFromSnapshotMetadata",
+ "CreateTableFromSnapshotRequest",
+ "CreateTableRequest",
+ "DataBoostReadLocalWrites",
+ "DeleteAuthorizedViewRequest",
+ "DeleteBackupRequest",
+ "DeleteSchemaBundleRequest",
+ "DeleteSnapshotRequest",
+ "DeleteTableRequest",
+ "DropRowRangeRequest",
+ "GenerateConsistencyTokenRequest",
+ "GenerateConsistencyTokenResponse",
+ "GetAuthorizedViewRequest",
+ "GetBackupRequest",
+ "GetSchemaBundleRequest",
+ "GetSnapshotRequest",
+ "GetTableRequest",
+ "ListAuthorizedViewsRequest",
+ "ListAuthorizedViewsResponse",
+ "ListBackupsRequest",
+ "ListBackupsResponse",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
+ "ListSnapshotsRequest",
+ "ListSnapshotsResponse",
+ "ListTablesRequest",
+ "ListTablesResponse",
+ "ModifyColumnFamiliesRequest",
+ "OptimizeRestoredTableMetadata",
+ "RestoreTableMetadata",
+ "RestoreTableRequest",
+ "SnapshotTableMetadata",
+ "SnapshotTableRequest",
+ "StandardReadRemoteWrites",
+ "UndeleteTableMetadata",
+ "UndeleteTableRequest",
+ "UpdateAuthorizedViewMetadata",
+ "UpdateAuthorizedViewRequest",
+ "UpdateBackupRequest",
+ "UpdateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
+ "UpdateTableMetadata",
+ "UpdateTableRequest",
+ "OperationProgress",
+ "StorageType",
+ "AppProfile",
+ "AutoscalingLimits",
+ "AutoscalingTargets",
+ "Cluster",
+ "HotTablet",
+ "Instance",
+ "LogicalView",
+ "MaterializedView",
+ "AuthorizedView",
+ "Backup",
+ "BackupInfo",
+ "ChangeStreamConfig",
+ "ColumnFamily",
+ "EncryptionInfo",
+ "GcRule",
+ "ProtoSchema",
+ "RestoreInfo",
+ "SchemaBundle",
+ "Snapshot",
+ "Table",
+ "RestoreSourceType",
+ "Type",
+)
+
+import google.cloud.bigtable_admin_v2.overlay # noqa: F401
+from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403
+
+__all__ += google.cloud.bigtable_admin_v2.overlay.__all__
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py
new file mode 100644
index 000000000000..b31b170e1e8f
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+__version__ = "2.34.0" # {x-release-please-version}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed
new file mode 100644
index 000000000000..bc26f20697c2
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-bigtable-admin package uses inline types.
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py
new file mode 100644
index 000000000000..713b2408f2a6
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py
@@ -0,0 +1,274 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.cloud.bigtable_admin_v2 import gapic_version as package_version
+
+__version__ = package_version.__version__
+
+
+from .services.bigtable_instance_admin import BigtableInstanceAdminClient
+from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient
+from .services.bigtable_table_admin import BaseBigtableTableAdminClient
+from .services.bigtable_table_admin import BaseBigtableTableAdminAsyncClient
+
+from .types.bigtable_instance_admin import CreateAppProfileRequest
+from .types.bigtable_instance_admin import CreateClusterMetadata
+from .types.bigtable_instance_admin import CreateClusterRequest
+from .types.bigtable_instance_admin import CreateInstanceMetadata
+from .types.bigtable_instance_admin import CreateInstanceRequest
+from .types.bigtable_instance_admin import CreateLogicalViewMetadata
+from .types.bigtable_instance_admin import CreateLogicalViewRequest
+from .types.bigtable_instance_admin import CreateMaterializedViewMetadata
+from .types.bigtable_instance_admin import CreateMaterializedViewRequest
+from .types.bigtable_instance_admin import DeleteAppProfileRequest
+from .types.bigtable_instance_admin import DeleteClusterRequest
+from .types.bigtable_instance_admin import DeleteInstanceRequest
+from .types.bigtable_instance_admin import DeleteLogicalViewRequest
+from .types.bigtable_instance_admin import DeleteMaterializedViewRequest
+from .types.bigtable_instance_admin import GetAppProfileRequest
+from .types.bigtable_instance_admin import GetClusterRequest
+from .types.bigtable_instance_admin import GetInstanceRequest
+from .types.bigtable_instance_admin import GetLogicalViewRequest
+from .types.bigtable_instance_admin import GetMaterializedViewRequest
+from .types.bigtable_instance_admin import ListAppProfilesRequest
+from .types.bigtable_instance_admin import ListAppProfilesResponse
+from .types.bigtable_instance_admin import ListClustersRequest
+from .types.bigtable_instance_admin import ListClustersResponse
+from .types.bigtable_instance_admin import ListHotTabletsRequest
+from .types.bigtable_instance_admin import ListHotTabletsResponse
+from .types.bigtable_instance_admin import ListInstancesRequest
+from .types.bigtable_instance_admin import ListInstancesResponse
+from .types.bigtable_instance_admin import ListLogicalViewsRequest
+from .types.bigtable_instance_admin import ListLogicalViewsResponse
+from .types.bigtable_instance_admin import ListMaterializedViewsRequest
+from .types.bigtable_instance_admin import ListMaterializedViewsResponse
+from .types.bigtable_instance_admin import PartialUpdateClusterMetadata
+from .types.bigtable_instance_admin import PartialUpdateClusterRequest
+from .types.bigtable_instance_admin import PartialUpdateInstanceRequest
+from .types.bigtable_instance_admin import UpdateAppProfileMetadata
+from .types.bigtable_instance_admin import UpdateAppProfileRequest
+from .types.bigtable_instance_admin import UpdateClusterMetadata
+from .types.bigtable_instance_admin import UpdateInstanceMetadata
+from .types.bigtable_instance_admin import UpdateLogicalViewMetadata
+from .types.bigtable_instance_admin import UpdateLogicalViewRequest
+from .types.bigtable_instance_admin import UpdateMaterializedViewMetadata
+from .types.bigtable_instance_admin import UpdateMaterializedViewRequest
+from .types.bigtable_table_admin import CheckConsistencyRequest
+from .types.bigtable_table_admin import CheckConsistencyResponse
+from .types.bigtable_table_admin import CopyBackupMetadata
+from .types.bigtable_table_admin import CopyBackupRequest
+from .types.bigtable_table_admin import CreateAuthorizedViewMetadata
+from .types.bigtable_table_admin import CreateAuthorizedViewRequest
+from .types.bigtable_table_admin import CreateBackupMetadata
+from .types.bigtable_table_admin import CreateBackupRequest
+from .types.bigtable_table_admin import CreateSchemaBundleMetadata
+from .types.bigtable_table_admin import CreateSchemaBundleRequest
+from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata
+from .types.bigtable_table_admin import CreateTableFromSnapshotRequest
+from .types.bigtable_table_admin import CreateTableRequest
+from .types.bigtable_table_admin import DataBoostReadLocalWrites
+from .types.bigtable_table_admin import DeleteAuthorizedViewRequest
+from .types.bigtable_table_admin import DeleteBackupRequest
+from .types.bigtable_table_admin import DeleteSchemaBundleRequest
+from .types.bigtable_table_admin import DeleteSnapshotRequest
+from .types.bigtable_table_admin import DeleteTableRequest
+from .types.bigtable_table_admin import DropRowRangeRequest
+from .types.bigtable_table_admin import GenerateConsistencyTokenRequest
+from .types.bigtable_table_admin import GenerateConsistencyTokenResponse
+from .types.bigtable_table_admin import GetAuthorizedViewRequest
+from .types.bigtable_table_admin import GetBackupRequest
+from .types.bigtable_table_admin import GetSchemaBundleRequest
+from .types.bigtable_table_admin import GetSnapshotRequest
+from .types.bigtable_table_admin import GetTableRequest
+from .types.bigtable_table_admin import ListAuthorizedViewsRequest
+from .types.bigtable_table_admin import ListAuthorizedViewsResponse
+from .types.bigtable_table_admin import ListBackupsRequest
+from .types.bigtable_table_admin import ListBackupsResponse
+from .types.bigtable_table_admin import ListSchemaBundlesRequest
+from .types.bigtable_table_admin import ListSchemaBundlesResponse
+from .types.bigtable_table_admin import ListSnapshotsRequest
+from .types.bigtable_table_admin import ListSnapshotsResponse
+from .types.bigtable_table_admin import ListTablesRequest
+from .types.bigtable_table_admin import ListTablesResponse
+from .types.bigtable_table_admin import ModifyColumnFamiliesRequest
+from .types.bigtable_table_admin import OptimizeRestoredTableMetadata
+from .types.bigtable_table_admin import RestoreTableMetadata
+from .types.bigtable_table_admin import RestoreTableRequest
+from .types.bigtable_table_admin import SnapshotTableMetadata
+from .types.bigtable_table_admin import SnapshotTableRequest
+from .types.bigtable_table_admin import StandardReadRemoteWrites
+from .types.bigtable_table_admin import UndeleteTableMetadata
+from .types.bigtable_table_admin import UndeleteTableRequest
+from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata
+from .types.bigtable_table_admin import UpdateAuthorizedViewRequest
+from .types.bigtable_table_admin import UpdateBackupRequest
+from .types.bigtable_table_admin import UpdateSchemaBundleMetadata
+from .types.bigtable_table_admin import UpdateSchemaBundleRequest
+from .types.bigtable_table_admin import UpdateTableMetadata
+from .types.bigtable_table_admin import UpdateTableRequest
+from .types.common import OperationProgress
+from .types.common import StorageType
+from .types.instance import AppProfile
+from .types.instance import AutoscalingLimits
+from .types.instance import AutoscalingTargets
+from .types.instance import Cluster
+from .types.instance import HotTablet
+from .types.instance import Instance
+from .types.instance import LogicalView
+from .types.instance import MaterializedView
+from .types.table import AuthorizedView
+from .types.table import Backup
+from .types.table import BackupInfo
+from .types.table import ChangeStreamConfig
+from .types.table import ColumnFamily
+from .types.table import EncryptionInfo
+from .types.table import GcRule
+from .types.table import ProtoSchema
+from .types.table import RestoreInfo
+from .types.table import SchemaBundle
+from .types.table import Snapshot
+from .types.table import Table
+from .types.table import RestoreSourceType
+from .types.types import Type
+
+__all__ = (
+ "BaseBigtableTableAdminAsyncClient",
+ "BigtableInstanceAdminAsyncClient",
+ "AppProfile",
+ "AuthorizedView",
+ "AutoscalingLimits",
+ "AutoscalingTargets",
+ "Backup",
+ "BackupInfo",
+ "BaseBigtableTableAdminClient",
+ "BigtableInstanceAdminClient",
+ "ChangeStreamConfig",
+ "CheckConsistencyRequest",
+ "CheckConsistencyResponse",
+ "Cluster",
+ "ColumnFamily",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
+ "CreateAppProfileRequest",
+ "CreateAuthorizedViewMetadata",
+ "CreateAuthorizedViewRequest",
+ "CreateBackupMetadata",
+ "CreateBackupRequest",
+ "CreateClusterMetadata",
+ "CreateClusterRequest",
+ "CreateInstanceMetadata",
+ "CreateInstanceRequest",
+ "CreateLogicalViewMetadata",
+ "CreateLogicalViewRequest",
+ "CreateMaterializedViewMetadata",
+ "CreateMaterializedViewRequest",
+ "CreateSchemaBundleMetadata",
+ "CreateSchemaBundleRequest",
+ "CreateTableFromSnapshotMetadata",
+ "CreateTableFromSnapshotRequest",
+ "CreateTableRequest",
+ "DataBoostReadLocalWrites",
+ "DeleteAppProfileRequest",
+ "DeleteAuthorizedViewRequest",
+ "DeleteBackupRequest",
+ "DeleteClusterRequest",
+ "DeleteInstanceRequest",
+ "DeleteLogicalViewRequest",
+ "DeleteMaterializedViewRequest",
+ "DeleteSchemaBundleRequest",
+ "DeleteSnapshotRequest",
+ "DeleteTableRequest",
+ "DropRowRangeRequest",
+ "EncryptionInfo",
+ "GcRule",
+ "GenerateConsistencyTokenRequest",
+ "GenerateConsistencyTokenResponse",
+ "GetAppProfileRequest",
+ "GetAuthorizedViewRequest",
+ "GetBackupRequest",
+ "GetClusterRequest",
+ "GetInstanceRequest",
+ "GetLogicalViewRequest",
+ "GetMaterializedViewRequest",
+ "GetSchemaBundleRequest",
+ "GetSnapshotRequest",
+ "GetTableRequest",
+ "HotTablet",
+ "Instance",
+ "ListAppProfilesRequest",
+ "ListAppProfilesResponse",
+ "ListAuthorizedViewsRequest",
+ "ListAuthorizedViewsResponse",
+ "ListBackupsRequest",
+ "ListBackupsResponse",
+ "ListClustersRequest",
+ "ListClustersResponse",
+ "ListHotTabletsRequest",
+ "ListHotTabletsResponse",
+ "ListInstancesRequest",
+ "ListInstancesResponse",
+ "ListLogicalViewsRequest",
+ "ListLogicalViewsResponse",
+ "ListMaterializedViewsRequest",
+ "ListMaterializedViewsResponse",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
+ "ListSnapshotsRequest",
+ "ListSnapshotsResponse",
+ "ListTablesRequest",
+ "ListTablesResponse",
+ "LogicalView",
+ "MaterializedView",
+ "ModifyColumnFamiliesRequest",
+ "OperationProgress",
+ "OptimizeRestoredTableMetadata",
+ "PartialUpdateClusterMetadata",
+ "PartialUpdateClusterRequest",
+ "PartialUpdateInstanceRequest",
+ "ProtoSchema",
+ "RestoreInfo",
+ "RestoreSourceType",
+ "RestoreTableMetadata",
+ "RestoreTableRequest",
+ "SchemaBundle",
+ "Snapshot",
+ "SnapshotTableMetadata",
+ "SnapshotTableRequest",
+ "StandardReadRemoteWrites",
+ "StorageType",
+ "Table",
+ "Type",
+ "UndeleteTableMetadata",
+ "UndeleteTableRequest",
+ "UpdateAppProfileMetadata",
+ "UpdateAppProfileRequest",
+ "UpdateAuthorizedViewMetadata",
+ "UpdateAuthorizedViewRequest",
+ "UpdateBackupRequest",
+ "UpdateClusterMetadata",
+ "UpdateInstanceMetadata",
+ "UpdateLogicalViewMetadata",
+ "UpdateLogicalViewRequest",
+ "UpdateMaterializedViewMetadata",
+ "UpdateMaterializedViewRequest",
+ "UpdateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
+ "UpdateTableMetadata",
+ "UpdateTableRequest",
+)
+
+from .overlay import * # noqa: F403
+
+__all__ += overlay.__all__ # noqa: F405
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json
new file mode 100644
index 000000000000..9725d3384819
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json
@@ -0,0 +1,1037 @@
+ {
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
+ "language": "python",
+ "libraryPackage": "google.cloud.bigtable_admin_v2",
+ "protoPackage": "google.bigtable.admin.v2",
+ "schema": "1.0",
+ "services": {
+ "BigtableInstanceAdmin": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "BigtableInstanceAdminClient",
+ "rpcs": {
+ "CreateAppProfile": {
+ "methods": [
+ "create_app_profile"
+ ]
+ },
+ "CreateCluster": {
+ "methods": [
+ "create_cluster"
+ ]
+ },
+ "CreateInstance": {
+ "methods": [
+ "create_instance"
+ ]
+ },
+ "CreateLogicalView": {
+ "methods": [
+ "create_logical_view"
+ ]
+ },
+ "CreateMaterializedView": {
+ "methods": [
+ "create_materialized_view"
+ ]
+ },
+ "DeleteAppProfile": {
+ "methods": [
+ "delete_app_profile"
+ ]
+ },
+ "DeleteCluster": {
+ "methods": [
+ "delete_cluster"
+ ]
+ },
+ "DeleteInstance": {
+ "methods": [
+ "delete_instance"
+ ]
+ },
+ "DeleteLogicalView": {
+ "methods": [
+ "delete_logical_view"
+ ]
+ },
+ "DeleteMaterializedView": {
+ "methods": [
+ "delete_materialized_view"
+ ]
+ },
+ "GetAppProfile": {
+ "methods": [
+ "get_app_profile"
+ ]
+ },
+ "GetCluster": {
+ "methods": [
+ "get_cluster"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetInstance": {
+ "methods": [
+ "get_instance"
+ ]
+ },
+ "GetLogicalView": {
+ "methods": [
+ "get_logical_view"
+ ]
+ },
+ "GetMaterializedView": {
+ "methods": [
+ "get_materialized_view"
+ ]
+ },
+ "ListAppProfiles": {
+ "methods": [
+ "list_app_profiles"
+ ]
+ },
+ "ListClusters": {
+ "methods": [
+ "list_clusters"
+ ]
+ },
+ "ListHotTablets": {
+ "methods": [
+ "list_hot_tablets"
+ ]
+ },
+ "ListInstances": {
+ "methods": [
+ "list_instances"
+ ]
+ },
+ "ListLogicalViews": {
+ "methods": [
+ "list_logical_views"
+ ]
+ },
+ "ListMaterializedViews": {
+ "methods": [
+ "list_materialized_views"
+ ]
+ },
+ "PartialUpdateCluster": {
+ "methods": [
+ "partial_update_cluster"
+ ]
+ },
+ "PartialUpdateInstance": {
+ "methods": [
+ "partial_update_instance"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateAppProfile": {
+ "methods": [
+ "update_app_profile"
+ ]
+ },
+ "UpdateCluster": {
+ "methods": [
+ "update_cluster"
+ ]
+ },
+ "UpdateInstance": {
+ "methods": [
+ "update_instance"
+ ]
+ },
+ "UpdateLogicalView": {
+ "methods": [
+ "update_logical_view"
+ ]
+ },
+ "UpdateMaterializedView": {
+ "methods": [
+ "update_materialized_view"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "BigtableInstanceAdminAsyncClient",
+ "rpcs": {
+ "CreateAppProfile": {
+ "methods": [
+ "create_app_profile"
+ ]
+ },
+ "CreateCluster": {
+ "methods": [
+ "create_cluster"
+ ]
+ },
+ "CreateInstance": {
+ "methods": [
+ "create_instance"
+ ]
+ },
+ "CreateLogicalView": {
+ "methods": [
+ "create_logical_view"
+ ]
+ },
+ "CreateMaterializedView": {
+ "methods": [
+ "create_materialized_view"
+ ]
+ },
+ "DeleteAppProfile": {
+ "methods": [
+ "delete_app_profile"
+ ]
+ },
+ "DeleteCluster": {
+ "methods": [
+ "delete_cluster"
+ ]
+ },
+ "DeleteInstance": {
+ "methods": [
+ "delete_instance"
+ ]
+ },
+ "DeleteLogicalView": {
+ "methods": [
+ "delete_logical_view"
+ ]
+ },
+ "DeleteMaterializedView": {
+ "methods": [
+ "delete_materialized_view"
+ ]
+ },
+ "GetAppProfile": {
+ "methods": [
+ "get_app_profile"
+ ]
+ },
+ "GetCluster": {
+ "methods": [
+ "get_cluster"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetInstance": {
+ "methods": [
+ "get_instance"
+ ]
+ },
+ "GetLogicalView": {
+ "methods": [
+ "get_logical_view"
+ ]
+ },
+ "GetMaterializedView": {
+ "methods": [
+ "get_materialized_view"
+ ]
+ },
+ "ListAppProfiles": {
+ "methods": [
+ "list_app_profiles"
+ ]
+ },
+ "ListClusters": {
+ "methods": [
+ "list_clusters"
+ ]
+ },
+ "ListHotTablets": {
+ "methods": [
+ "list_hot_tablets"
+ ]
+ },
+ "ListInstances": {
+ "methods": [
+ "list_instances"
+ ]
+ },
+ "ListLogicalViews": {
+ "methods": [
+ "list_logical_views"
+ ]
+ },
+ "ListMaterializedViews": {
+ "methods": [
+ "list_materialized_views"
+ ]
+ },
+ "PartialUpdateCluster": {
+ "methods": [
+ "partial_update_cluster"
+ ]
+ },
+ "PartialUpdateInstance": {
+ "methods": [
+ "partial_update_instance"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateAppProfile": {
+ "methods": [
+ "update_app_profile"
+ ]
+ },
+ "UpdateCluster": {
+ "methods": [
+ "update_cluster"
+ ]
+ },
+ "UpdateInstance": {
+ "methods": [
+ "update_instance"
+ ]
+ },
+ "UpdateLogicalView": {
+ "methods": [
+ "update_logical_view"
+ ]
+ },
+ "UpdateMaterializedView": {
+ "methods": [
+ "update_materialized_view"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "BigtableInstanceAdminClient",
+ "rpcs": {
+ "CreateAppProfile": {
+ "methods": [
+ "create_app_profile"
+ ]
+ },
+ "CreateCluster": {
+ "methods": [
+ "create_cluster"
+ ]
+ },
+ "CreateInstance": {
+ "methods": [
+ "create_instance"
+ ]
+ },
+ "CreateLogicalView": {
+ "methods": [
+ "create_logical_view"
+ ]
+ },
+ "CreateMaterializedView": {
+ "methods": [
+ "create_materialized_view"
+ ]
+ },
+ "DeleteAppProfile": {
+ "methods": [
+ "delete_app_profile"
+ ]
+ },
+ "DeleteCluster": {
+ "methods": [
+ "delete_cluster"
+ ]
+ },
+ "DeleteInstance": {
+ "methods": [
+ "delete_instance"
+ ]
+ },
+ "DeleteLogicalView": {
+ "methods": [
+ "delete_logical_view"
+ ]
+ },
+ "DeleteMaterializedView": {
+ "methods": [
+ "delete_materialized_view"
+ ]
+ },
+ "GetAppProfile": {
+ "methods": [
+ "get_app_profile"
+ ]
+ },
+ "GetCluster": {
+ "methods": [
+ "get_cluster"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetInstance": {
+ "methods": [
+ "get_instance"
+ ]
+ },
+ "GetLogicalView": {
+ "methods": [
+ "get_logical_view"
+ ]
+ },
+ "GetMaterializedView": {
+ "methods": [
+ "get_materialized_view"
+ ]
+ },
+ "ListAppProfiles": {
+ "methods": [
+ "list_app_profiles"
+ ]
+ },
+ "ListClusters": {
+ "methods": [
+ "list_clusters"
+ ]
+ },
+ "ListHotTablets": {
+ "methods": [
+ "list_hot_tablets"
+ ]
+ },
+ "ListInstances": {
+ "methods": [
+ "list_instances"
+ ]
+ },
+ "ListLogicalViews": {
+ "methods": [
+ "list_logical_views"
+ ]
+ },
+ "ListMaterializedViews": {
+ "methods": [
+ "list_materialized_views"
+ ]
+ },
+ "PartialUpdateCluster": {
+ "methods": [
+ "partial_update_cluster"
+ ]
+ },
+ "PartialUpdateInstance": {
+ "methods": [
+ "partial_update_instance"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateAppProfile": {
+ "methods": [
+ "update_app_profile"
+ ]
+ },
+ "UpdateCluster": {
+ "methods": [
+ "update_cluster"
+ ]
+ },
+ "UpdateInstance": {
+ "methods": [
+ "update_instance"
+ ]
+ },
+ "UpdateLogicalView": {
+ "methods": [
+ "update_logical_view"
+ ]
+ },
+ "UpdateMaterializedView": {
+ "methods": [
+ "update_materialized_view"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "BigtableTableAdmin": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "BaseBigtableTableAdminClient",
+ "rpcs": {
+ "CheckConsistency": {
+ "methods": [
+ "check_consistency"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateAuthorizedView": {
+ "methods": [
+ "create_authorized_view"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
+ "CreateTable": {
+ "methods": [
+ "create_table"
+ ]
+ },
+ "CreateTableFromSnapshot": {
+ "methods": [
+ "create_table_from_snapshot"
+ ]
+ },
+ "DeleteAuthorizedView": {
+ "methods": [
+ "delete_authorized_view"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
+ "DeleteSnapshot": {
+ "methods": [
+ "delete_snapshot"
+ ]
+ },
+ "DeleteTable": {
+ "methods": [
+ "delete_table"
+ ]
+ },
+ "DropRowRange": {
+ "methods": [
+ "drop_row_range"
+ ]
+ },
+ "GenerateConsistencyToken": {
+ "methods": [
+ "generate_consistency_token"
+ ]
+ },
+ "GetAuthorizedView": {
+ "methods": [
+ "get_authorized_view"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
+ "GetSnapshot": {
+ "methods": [
+ "get_snapshot"
+ ]
+ },
+ "GetTable": {
+ "methods": [
+ "get_table"
+ ]
+ },
+ "ListAuthorizedViews": {
+ "methods": [
+ "list_authorized_views"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
+ "ListSnapshots": {
+ "methods": [
+ "list_snapshots"
+ ]
+ },
+ "ListTables": {
+ "methods": [
+ "list_tables"
+ ]
+ },
+ "ModifyColumnFamilies": {
+ "methods": [
+ "modify_column_families"
+ ]
+ },
+ "RestoreTable": {
+ "methods": [
+ "_restore_table"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "SnapshotTable": {
+ "methods": [
+ "snapshot_table"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UndeleteTable": {
+ "methods": [
+ "undelete_table"
+ ]
+ },
+ "UpdateAuthorizedView": {
+ "methods": [
+ "update_authorized_view"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
+ "UpdateTable": {
+ "methods": [
+ "update_table"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "BaseBigtableTableAdminAsyncClient",
+ "rpcs": {
+ "CheckConsistency": {
+ "methods": [
+ "check_consistency"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateAuthorizedView": {
+ "methods": [
+ "create_authorized_view"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
+ "CreateTable": {
+ "methods": [
+ "create_table"
+ ]
+ },
+ "CreateTableFromSnapshot": {
+ "methods": [
+ "create_table_from_snapshot"
+ ]
+ },
+ "DeleteAuthorizedView": {
+ "methods": [
+ "delete_authorized_view"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
+ "DeleteSnapshot": {
+ "methods": [
+ "delete_snapshot"
+ ]
+ },
+ "DeleteTable": {
+ "methods": [
+ "delete_table"
+ ]
+ },
+ "DropRowRange": {
+ "methods": [
+ "drop_row_range"
+ ]
+ },
+ "GenerateConsistencyToken": {
+ "methods": [
+ "generate_consistency_token"
+ ]
+ },
+ "GetAuthorizedView": {
+ "methods": [
+ "get_authorized_view"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
+ "GetSnapshot": {
+ "methods": [
+ "get_snapshot"
+ ]
+ },
+ "GetTable": {
+ "methods": [
+ "get_table"
+ ]
+ },
+ "ListAuthorizedViews": {
+ "methods": [
+ "list_authorized_views"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
+ "ListSnapshots": {
+ "methods": [
+ "list_snapshots"
+ ]
+ },
+ "ListTables": {
+ "methods": [
+ "list_tables"
+ ]
+ },
+ "ModifyColumnFamilies": {
+ "methods": [
+ "modify_column_families"
+ ]
+ },
+ "RestoreTable": {
+ "methods": [
+ "_restore_table"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "SnapshotTable": {
+ "methods": [
+ "snapshot_table"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UndeleteTable": {
+ "methods": [
+ "undelete_table"
+ ]
+ },
+ "UpdateAuthorizedView": {
+ "methods": [
+ "update_authorized_view"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
+ "UpdateTable": {
+ "methods": [
+ "update_table"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "BaseBigtableTableAdminClient",
+ "rpcs": {
+ "CheckConsistency": {
+ "methods": [
+ "check_consistency"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateAuthorizedView": {
+ "methods": [
+ "create_authorized_view"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
+ "CreateTable": {
+ "methods": [
+ "create_table"
+ ]
+ },
+ "CreateTableFromSnapshot": {
+ "methods": [
+ "create_table_from_snapshot"
+ ]
+ },
+ "DeleteAuthorizedView": {
+ "methods": [
+ "delete_authorized_view"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
+ "DeleteSnapshot": {
+ "methods": [
+ "delete_snapshot"
+ ]
+ },
+ "DeleteTable": {
+ "methods": [
+ "delete_table"
+ ]
+ },
+ "DropRowRange": {
+ "methods": [
+ "drop_row_range"
+ ]
+ },
+ "GenerateConsistencyToken": {
+ "methods": [
+ "generate_consistency_token"
+ ]
+ },
+ "GetAuthorizedView": {
+ "methods": [
+ "get_authorized_view"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
+ "GetSnapshot": {
+ "methods": [
+ "get_snapshot"
+ ]
+ },
+ "GetTable": {
+ "methods": [
+ "get_table"
+ ]
+ },
+ "ListAuthorizedViews": {
+ "methods": [
+ "list_authorized_views"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
+ "ListSnapshots": {
+ "methods": [
+ "list_snapshots"
+ ]
+ },
+ "ListTables": {
+ "methods": [
+ "list_tables"
+ ]
+ },
+ "ModifyColumnFamilies": {
+ "methods": [
+ "modify_column_families"
+ ]
+ },
+ "RestoreTable": {
+ "methods": [
+ "_restore_table"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "SnapshotTable": {
+ "methods": [
+ "snapshot_table"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UndeleteTable": {
+ "methods": [
+ "undelete_table"
+ ]
+ },
+ "UpdateAuthorizedView": {
+ "methods": [
+ "update_authorized_view"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
+ "UpdateTable": {
+ "methods": [
+ "update_table"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py
new file mode 100644
index 000000000000..b31b170e1e8f
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+__version__ = "2.34.0" # {x-release-please-version}
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py
new file mode 100644
index 000000000000..f66c7f8dd885
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This directory and all its subdirectories are the only handwritten
+# components of the otherwise autogenerated google/cloud/bigtable/admin_v2.
+# The purpose of the overlay directory is to add additional functionality to
+# the autogenerated library while preserving its developer experience. These
+# handwritten additions currently consist of the following:
+#
+# 1. TODO: Document final GcRule design choice here
+# 2. An LRO class for restore_table that exposes an Operation for
+# OptimizeRestoreTable, if that LRO exists.
+# 3. New methods (wait_for_consistency and wait_for_replication) that return
+# a polling future class for automatically polling check_consistency.
+#
+# This directory is structured to mirror that of a typical autogenerated library (e.g.
+# services/types subdirectories), and the aforementioned handwritten additions are
+# currently implemented as either types under overlay/types or in methods in an overwritten
+# client class under overlay/services.
+
+from .types import (
+ AsyncRestoreTableOperation,
+ RestoreTableOperation,
+ WaitForConsistencyRequest,
+)
+
+from .services.bigtable_table_admin import (
+ BigtableTableAdminAsyncClient,
+ BigtableTableAdminClient,
+)
+
+__all__ = (
+ "AsyncRestoreTableOperation",
+ "RestoreTableOperation",
+ "BigtableTableAdminAsyncClient",
+ "BigtableTableAdminClient",
+ "WaitForConsistencyRequest",
+)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py
new file mode 100644
index 000000000000..ab7686e260fc
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py
new file mode 100644
index 000000000000..f80e3234f064
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: Add the async client after owlbot changes.
+
+from .async_client import BigtableTableAdminAsyncClient
+from .client import BigtableTableAdminClient
+
+__all__ = (
+ "BigtableTableAdminAsyncClient",
+ "BigtableTableAdminClient",
+)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py
new file mode 100644
index 000000000000..ee8e5757d23a
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py
@@ -0,0 +1,375 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import functools
+
+from typing import Callable, Optional, Sequence, Tuple, Union
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
+
+from google.api_core import client_options as client_options_lib
+from google.auth import credentials as ga_credentials # type: ignore
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ async_client as base_client,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import (
+ BigtableTableAdminTransport,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ async_consistency,
+ async_restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable.gapic_version import __version__ as bigtable_version
+
+
+DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO)
+DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay-async"
+
+
+class BigtableTableAdminAsyncClient(base_client.BaseBigtableTableAdminAsyncClient):
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[
+ str,
+ BigtableTableAdminTransport,
+ Callable[..., BigtableTableAdminTransport],
+ ]
+ ] = "grpc_asyncio",
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the Bigtable table admin async client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the BigtableTableAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide a client certificate for mTLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ super(BigtableTableAdminAsyncClient, self).__init__(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def restore_table(
+ self,
+ request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> async_restore_table.AsyncRestoreTableOperation:
+ r"""Create a new table by restoring from a completed backup. The
+ returned table :class:`long-running operation
+ `
+ can be used to track the progress of the operation, and to cancel it. The
+ :attr:`metadata ` field type is
+ :class:`RestoreTableMetadata `.
+ The :meth:`response ` type is
+ :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful.
+
+ Additionally, the returned :class:`long-running-operation `
+ provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation.optimize_restore_table_operation` that
+ provides access to a :class:`google.api_core.operation_async.AsyncOperation` object representing the OptimizeRestoreTable long-running-operation
+ after the current one has completed.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ #
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ async def sample_restore_table():
+ # Create a client
+ client = admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = await client.restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+ # Handle LRO2
+ optimize_operation = await operation.optimize_restore_table_operation()
+
+ if optimize_operation:
+ print("Waiting for table optimization to complete...")
+
+ response = await optimize_operation.result()
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
+ The request object. The request for
+ [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp.
+ Each table is served using the resources of its
+ parent cluster.
+ """
+ operation = await self._restore_table(
+ request=request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ restore_table_operation = async_restore_table.AsyncRestoreTableOperation(
+ self._client._transport.operations_client, operation
+ )
+ return restore_table_operation
+
+ async def wait_for_consistency(
+ self,
+ request: Optional[
+ Union[wait_for_consistency_request.WaitForConsistencyRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bool:
+ r"""Blocks until the mutations for the specified Table that have been
+ made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly`
+ can see all writes committed before the token was created. This is done by generating
+ a consistency token for the Table, then polling :meth:`check_consistency`
+ for the specified table until the call returns True.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ async def sample_wait_for_consistency():
+ # Create a client
+ client = admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.WaitForConsistencyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ print("Waiting for operation to complete...")
+
+ response = await client.wait_for_replication(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]):
+ The request object.
+ name (str):
+ Required. The unique name of the Table for which to
+ create a consistency token. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ bool:
+ If the `standard_read_remote_writes` mode is specified in the request object, returns
+ `True` after the mutations of the specified table have been fully replicated. If the
+ `data_boost_read_local_writes` mode is specified in the request object, returns `True`
+ after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes
+ committed before the token was created.
+
+ Raises:
+ google.api_core.GoogleAPICallError: If the operation errors or if
+ the timeout is reached before the operation completes.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, wait_for_consistency_request.WaitForConsistencyRequest
+ ):
+ request = wait_for_consistency_request.WaitForConsistencyRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Generate the consistency token.
+ generate_consistency_token_request = (
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=request.name,
+ )
+ )
+
+ generate_consistency_response = await self.generate_consistency_token(
+ generate_consistency_token_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Create the CheckConsistencyRequest object.
+ check_consistency_request = bigtable_table_admin.CheckConsistencyRequest(
+ name=request.name,
+ consistency_token=generate_consistency_response.consistency_token,
+ )
+
+ # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to
+ # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check
+ # whether or not that field is defined in the original request object.
+ mode_oneof_field = request._pb.WhichOneof("mode")
+ if mode_oneof_field:
+ setattr(
+ check_consistency_request,
+ mode_oneof_field,
+ getattr(request, mode_oneof_field),
+ )
+
+ check_consistency_call = functools.partial(
+ self.check_consistency,
+ check_consistency_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Block and wait until the polling harness returns True.
+ check_consistency_future = (
+ async_consistency._AsyncCheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ )
+ return await check_consistency_future.result()
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py
new file mode 100644
index 000000000000..1b6770b10195
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py
@@ -0,0 +1,373 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import functools
+
+from typing import Callable, Optional, Sequence, Tuple, Union
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+from google.api_core import client_options as client_options_lib
+from google.auth import credentials as ga_credentials # type: ignore
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ client as base_client,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import (
+ BigtableTableAdminTransport,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ consistency,
+ restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable.gapic_version import __version__ as bigtable_version
+
+
+DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO)
+DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay"
+
+
+class BigtableTableAdminClient(base_client.BaseBigtableTableAdminClient):
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[
+ str,
+ BigtableTableAdminTransport,
+ Callable[..., BigtableTableAdminTransport],
+ ]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the Bigtable table admin client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the BigtableTableAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide a client certificate for mTLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that the ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ super(BigtableTableAdminClient, self).__init__(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ def restore_table(
+ self,
+ request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> restore_table.RestoreTableOperation:
+ r"""Create a new table by restoring from a completed backup. The
+ returned table :class:`long-running operation
+ `
+ can be used to track the progress of the operation, and to cancel it. The
+ :attr:`metadata ` field type is
+ :class:`RestoreTableMetadata `.
+ The :meth:`response ` type is
+ :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful.
+
+ Additionally, the returned :class:`long-running-operation `
+ provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation.optimize_restore_table_operation` that
+ provides access to a :class:`google.api_core.operation.Operation` object representing the OptimizeRestoreTable long-running-operation
+ after the current one has completed.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ #
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ def sample_restore_table():
+ # Create a client
+ client = admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ # Handle LRO2
+ optimize_operation = operation.optimize_restore_table_operation()
+
+ if optimize_operation:
+ print("Waiting for table optimization to complete...")
+
+ response = optimize_operation.result()
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
+ The request object. The request for
+ [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp.
+ Each table is served using the resources of its
+ parent cluster.
+ """
+ operation = self._restore_table(
+ request=request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ restore_table_operation = restore_table.RestoreTableOperation(
+ self._transport.operations_client, operation
+ )
+ return restore_table_operation
+
+ def wait_for_consistency(
+ self,
+ request: Optional[
+ Union[wait_for_consistency_request.WaitForConsistencyRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bool:
+ r"""Blocks until the mutations for the specified Table that have been
+ made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly`
+ can see all writes committed before the token was created. This is done by generating
+ a consistency token for the Table, then polling :meth:`check_consistency`
+ for the specified table until the call returns True.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ def sample_wait_for_consistency():
+ # Create a client
+ client = admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.WaitForConsistencyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ print("Waiting for operation to complete...")
+
+ response = client.wait_for_replication(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]):
+ The request object.
+ name (str):
+ Required. The unique name of the Table for which to
+ create a consistency token. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ bool:
+ If the `standard_read_remote_writes` mode is specified in the request object, returns
+ `True` after the mutations of the specified table have been fully replicated. If the
+ `data_boost_read_local_writes` mode is specified in the request object, returns `True`
+ after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes
+ committed before the token was created.
+
+ Raises:
+ google.api_core.GoogleAPICallError: If the operation errors or if
+ the timeout is reached before the operation completes.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, wait_for_consistency_request.WaitForConsistencyRequest
+ ):
+ request = wait_for_consistency_request.WaitForConsistencyRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Generate the consistency token.
+ generate_consistency_token_request = (
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=request.name,
+ )
+ )
+
+ generate_consistency_response = self.generate_consistency_token(
+ generate_consistency_token_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Create the CheckConsistencyRequest object.
+ check_consistency_request = bigtable_table_admin.CheckConsistencyRequest(
+ name=request.name,
+ consistency_token=generate_consistency_response.consistency_token,
+ )
+
+ # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to
+ # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check
+ # whether or not that field is defined in the original request object.
+ mode_oneof_field = request._pb.WhichOneof("mode")
+ if mode_oneof_field:
+ setattr(
+ check_consistency_request,
+ mode_oneof_field,
+ getattr(request, mode_oneof_field),
+ )
+
+ check_consistency_call = functools.partial(
+ self.check_consistency,
+ check_consistency_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Block and wait until the polling harness returns True.
+ check_consistency_future = consistency._CheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ return check_consistency_future.result()
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py
new file mode 100644
index 000000000000..16b032ac4743
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .async_restore_table import (
+ AsyncRestoreTableOperation,
+)
+
+from .restore_table import (
+ RestoreTableOperation,
+)
+
+from .wait_for_consistency_request import (
+ WaitForConsistencyRequest,
+)
+
+__all__ = (
+ "AsyncRestoreTableOperation",
+ "RestoreTableOperation",
+ "WaitForConsistencyRequest",
+)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py
new file mode 100644
index 000000000000..0703940d5138
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py
@@ -0,0 +1,104 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Awaitable, Union, Callable
+
+from google.api_core.future import async_future
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+
+# The consistency check could take a very long time, so we wait indefinitely.
+DEFAULT_RETRY = async_future.DEFAULT_RETRY.with_timeout(None)
+
+
+class _AsyncCheckConsistencyPollingFuture(async_future.AsyncFuture):
+ """A Future that polls an underlying `check_consistency` operation until it returns True.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_consistency`
+ or
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_replication`
+ methods.
+
+ Args:
+ check_consistency_call(Callable[
+ [Optional[google.api_core.retry.Retry],
+ google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]):
+ A :meth:`check_consistency
+ `
+ call from the admin client. The call should fix every user parameter except for retry,
+ which will be done via :meth:`functools.partial`.
+ default_retry(Optional[google.api_core.retry.Retry]): The `retry` parameter passed in to either
+ :meth:`wait_for_consistency
+ `
+ or :meth:`wait_for_replication
+ `
+ retry (google.api_core.retry.AsyncRetry): The retry configuration used
+ when polling. This can be used to control how often :meth:`done`
+ is polled. Regardless of the retry's ``deadline``, it will be
+ overridden by the ``timeout`` argument to :meth:`result`.
+ """
+
+ def __init__(
+ self,
+ check_consistency_call: Callable[
+ [OptionalRetry], Awaitable[bigtable_table_admin.CheckConsistencyResponse]
+ ],
+ retry: retries.AsyncRetry = DEFAULT_RETRY,
+ **kwargs
+ ):
+ super(_AsyncCheckConsistencyPollingFuture, self).__init__(retry=retry, **kwargs)
+
+ # Done is called with two different scenarios, retry is specified or not specified.
+ # API_call will be a functools partial with everything except retry specified because of
+ # that.
+ self._check_consistency_call = check_consistency_call
+
+ async def done(self, retry: OptionalRetry = None):
+ """Polls the underlying `check_consistency` call to see if the future is complete.
+
+ Args:
+ retry (google.api_core.retry.Retry): (Optional) How to retry the
+ polling RPC (to not be confused with polling configuration. See
+ the documentation for :meth:`result `
+ for details).
+
+ Returns:
+ bool: True if the future is complete, False otherwise.
+ """
+ if self._future.done():
+ return True
+
+ try:
+ check_consistency_response = await self._check_consistency_call()
+ if check_consistency_response.consistent:
+ self.set_result(True)
+
+ return check_consistency_response.consistent
+ except Exception as e:
+ self.set_exception(e)
+
+ def cancel(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
+
+ def cancelled(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py
new file mode 100644
index 000000000000..9edfb4963cd3
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py
@@ -0,0 +1,99 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from google.api_core import exceptions
+from google.api_core import operation_async
+from google.protobuf import empty_pb2
+
+from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata
+
+
+class AsyncRestoreTableOperation(operation_async.AsyncOperation):
+ """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation.
+
+ This is needed to expose a potential long-running operation that might run after this operation
+ finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation`
+ method.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's :meth:`restore_table
+ `
+ method.
+
+ Args:
+ operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations
+ client from the admin client class's transport.
+ restore_table_operation (google.api_core.operation_async.AsyncOperation): A
+ :class:`google.api_core.operation_async.AsyncOperation`
+ instance resembling a RestoreTable long-running operation
+ """
+
+ def __init__(
+ self, operations_client, restore_table_operation: operation_async.AsyncOperation
+ ):
+ self._operations_client = operations_client
+ self._optimize_restored_table_operation = None
+ super().__init__(
+ restore_table_operation._operation,
+ restore_table_operation._refresh,
+ restore_table_operation._cancel,
+ restore_table_operation._result_type,
+ restore_table_operation._metadata_type,
+ retry=restore_table_operation._retry,
+ )
+
+ async def optimize_restored_table_operation(
+ self,
+ ) -> Optional[operation_async.AsyncOperation]:
+ """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes.
+ The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this
+ method will return `None`.
+ This method must not be called before the parent restore_table operation is complete.
+ Returns:
+ An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation
+ after this one.
+ Raises:
+ RuntimeError: raised when accessed before the restore_table operation is complete
+
+ Raises:
+ google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete
+ """
+ if not await self.done():
+ raise exceptions.GoogleAPIError(
+ "optimize_restored_table operation can't be accessed until the restore_table operation is complete"
+ )
+
+ if self._optimize_restored_table_operation is not None:
+ return self._optimize_restored_table_operation
+
+ operation_name = self.metadata.optimize_table_operation_name
+
+ # When the RestoreTable operation finishes, it might not necessarily trigger
+ # an optimize operation.
+ if operation_name:
+ gapic_operation = await self._operations_client.get_operation(
+ name=operation_name
+ )
+ self._optimize_restored_table_operation = operation_async.from_gapic(
+ gapic_operation,
+ self._operations_client,
+ empty_pb2.Empty,
+ metadata_type=OptimizeRestoredTableMetadata,
+ )
+ return self._optimize_restored_table_operation
+ else:
+ # no optimize operation found
+ return None
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py
new file mode 100644
index 000000000000..63a110975442
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py
@@ -0,0 +1,101 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Callable
+
+from google.api_core.future import polling
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+
+# The consistency check could take a very long time, so we wait indefinitely.
+DEFAULT_RETRY = polling.DEFAULT_POLLING.with_timeout(None)
+
+
+class _CheckConsistencyPollingFuture(polling.PollingFuture):
+ """A Future that polls an underlying `check_consistency` operation until it returns True.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_consistency`
+ or
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_replication`
+ methods.
+
+ Args:
+ check_consistency_call(Callable[
+ [Optional[google.api_core.retry.Retry],
+ google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]):
+ A :meth:`check_consistency
+ `
+ call from the admin client. The call should fix every user parameter,
+ which will be done via :meth:`functools.partial`.
+ polling (google.api_core.retry.Retry): The configuration used for polling.
+ This parameter controls how often :meth:`done` is polled. If the
+ ``timeout`` argument is specified in the :meth:`result
+ ` method it will
+ override the ``polling.timeout`` property.
+ """
+
+ def __init__(
+ self,
+ check_consistency_call: Callable[
+ [OptionalRetry], bigtable_table_admin.CheckConsistencyResponse
+ ],
+ polling: retries.Retry = DEFAULT_RETRY,
+ **kwargs
+ ):
+ super(_CheckConsistencyPollingFuture, self).__init__(polling=polling, **kwargs)
+
+ # Done is called with two different scenarios, retry is specified or not specified.
+ # API_call will be a functools partial with everything except retry specified because of
+ # that.
+ self._check_consistency_call = check_consistency_call
+
+ def done(self, retry: OptionalRetry = None):
+ """Polls the underlying `check_consistency` call to see if the future is complete.
+
+ Args:
+ retry (google.api_core.retry.Retry): (Optional) How to retry the
+ polling RPC (to not be confused with polling configuration. See
+ the documentation for :meth:`result `
+ for details).
+
+ Returns:
+ bool: True if the future is complete, False otherwise.
+ """
+
+ if self._result_set:
+ return True
+
+ try:
+ check_consistency_response = self._check_consistency_call()
+ if check_consistency_response.consistent:
+ self.set_result(True)
+
+ return check_consistency_response.consistent
+ except Exception as e:
+ self.set_exception(e)
+
+ def cancel(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
+
+ def cancelled(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py
new file mode 100644
index 000000000000..84c9c5d91644
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py
@@ -0,0 +1,102 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from google.api_core import exceptions
+from google.api_core import operation
+from google.protobuf import empty_pb2
+
+from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata
+
+
+class RestoreTableOperation(operation.Operation):
+ """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation.
+
+ This is needed to expose a potential long-running operation that might run after this operation
+ finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation`
+ method.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's :meth:`restore_table
+ `
+ method.
+
+ Args:
+ operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations
+ client from the admin client class's transport.
+ restore_table_operation (google.api_core.operation.Operation): A :class:`google.api_core.operation.Operation`
+ instance resembling a RestoreTable long-running operation
+ """
+
+ def __init__(self, operations_client, restore_table_operation: operation.Operation):
+ self._operations_client = operations_client
+ self._optimize_restored_table_operation = None
+ super().__init__(
+ restore_table_operation._operation,
+ restore_table_operation._refresh,
+ restore_table_operation._cancel,
+ restore_table_operation._result_type,
+ restore_table_operation._metadata_type,
+ polling=restore_table_operation._polling,
+ )
+
+ def optimize_restored_table_operation(self) -> Optional[operation.Operation]:
+ """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes.
+
+ This must not be called before the parent restore_table operation is complete. You can guarantee
+ this happening by calling this function after this class's :meth:`google.api_core.operation.Operation.result`
+ method.
+
+ The follow-up operation has
+ :attr:`metadata ` type
+ :class:`OptimizeRestoredTableMetadata
+ `
+ and no return value, but can be waited for with `result`.
+
+ The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this
+ method will return `None`.
+
+ Returns:
+ Optional[google.api_core.operation.Operation]:
+ An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation
+ after this one.
+
+ Raises:
+ google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete
+ """
+ if not self.done():
+ raise exceptions.GoogleAPIError(
+ "optimize_restored_table operation can't be accessed until the restore_table operation is complete"
+ )
+
+ if self._optimize_restored_table_operation is not None:
+ return self._optimize_restored_table_operation
+
+ operation_name = self.metadata.optimize_table_operation_name
+
+ # When the RestoreTable operation finishes, it might not necessarily trigger
+ # an optimize operation.
+ if operation_name:
+ gapic_operation = self._operations_client.get_operation(name=operation_name)
+ self._optimize_restored_table_operation = operation.from_gapic(
+ gapic_operation,
+ self._operations_client,
+ empty_pb2.Empty,
+ metadata_type=OptimizeRestoredTableMetadata,
+ )
+ return self._optimize_restored_table_operation
+ else:
+ # no optimize operation found
+ return None
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py
new file mode 100644
index 000000000000..51070230a857
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+__protobuf__ = proto.module(
+ package="google.bigtable.admin.v2",
+ manifest={
+ "WaitForConsistencyRequest",
+ },
+)
+
+
+# The WaitForConsistencyRequest object is not a real proto. It is a wrapper
+# class intended for the handwritten method wait_for_consistency. It is
+# constructed by extending a Proto Plus message class to get a developer
+# experience closest to that of an autogenerated GAPIC method, and to allow
+# developers to manipulate the wrapper class like they would a request proto
+# for an autogenerated call.
+class WaitForConsistencyRequest(proto.Message):
+ """Wrapper class for encapsulating parameters for the `wait_for_consistency` method in both
+ :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client.BigtableTableAdminClient`
+ and :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client.BigtableTableAdmiAsyncClient`.
+
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ name (str):
+ Required. The unique name of the Table for which to check
+ replication consistency. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+ standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites):
+ Checks that reads using an app profile with
+ ``StandardIsolation`` can see all writes committed before
+ the token was created, even if the read and write target
+ different clusters.
+
+ This field is a member of `oneof`_ ``mode``.
+ data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites):
+ Checks that reads using an app profile with
+ ``DataBoostIsolationReadOnly`` can see all writes committed
+ before the token was created, but only if the read and write
+ target the same cluster.
+
+ This field is a member of `oneof`_ ``mode``.
+ """
+
+ name: str = proto.Field(proto.STRING, number=1)
+ standard_read_remote_writes: bigtable_table_admin.StandardReadRemoteWrites = (
+ proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="mode",
+ message=bigtable_table_admin.StandardReadRemoteWrites,
+ )
+ )
+ data_boost_read_local_writes: bigtable_table_admin.DataBoostReadLocalWrites = (
+ proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="mode",
+ message=bigtable_table_admin.DataBoostReadLocalWrites,
+ )
+ )
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed
new file mode 100644
index 000000000000..bc26f20697c2
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-bigtable-admin package uses inline types.
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py
new file mode 100644
index 000000000000..cbf94b283c70
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py
new file mode 100644
index 000000000000..20ac9e4fc5f6
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from .client import BigtableInstanceAdminClient
+from .async_client import BigtableInstanceAdminAsyncClient
+
+__all__ = (
+ "BigtableInstanceAdminClient",
+ "BigtableInstanceAdminAsyncClient",
+)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
new file mode 100644
index 000000000000..632496543912
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
@@ -0,0 +1,4340 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging as std_logging
+from collections import OrderedDict
+import re
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from google.cloud.bigtable_admin_v2 import gapic_version as package_version
+
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry_async as retries
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
+from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
+from google.cloud.bigtable_admin_v2.types import common
+from google.cloud.bigtable_admin_v2.types import instance
+from google.cloud.bigtable_admin_v2.types import instance as gba_instance
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport
+from .client import BigtableInstanceAdminClient
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class BigtableInstanceAdminAsyncClient:
+ """Service for creating, configuring, and deleting Cloud
+ Bigtable Instances and Clusters. Provides access to the Instance
+ and Cluster schemas only, not the tables' metadata or data
+ stored in those tables.
+ """
+
+ _client: BigtableInstanceAdminClient
+
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
+ DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE
+
+ app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path)
+ parse_app_profile_path = staticmethod(
+ BigtableInstanceAdminClient.parse_app_profile_path
+ )
+ cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path)
+ parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path)
+ crypto_key_path = staticmethod(BigtableInstanceAdminClient.crypto_key_path)
+ parse_crypto_key_path = staticmethod(
+ BigtableInstanceAdminClient.parse_crypto_key_path
+ )
+ hot_tablet_path = staticmethod(BigtableInstanceAdminClient.hot_tablet_path)
+ parse_hot_tablet_path = staticmethod(
+ BigtableInstanceAdminClient.parse_hot_tablet_path
+ )
+ instance_path = staticmethod(BigtableInstanceAdminClient.instance_path)
+ parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path)
+ logical_view_path = staticmethod(BigtableInstanceAdminClient.logical_view_path)
+ parse_logical_view_path = staticmethod(
+ BigtableInstanceAdminClient.parse_logical_view_path
+ )
+ materialized_view_path = staticmethod(
+ BigtableInstanceAdminClient.materialized_view_path
+ )
+ parse_materialized_view_path = staticmethod(
+ BigtableInstanceAdminClient.parse_materialized_view_path
+ )
+ table_path = staticmethod(BigtableInstanceAdminClient.table_path)
+ parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path)
+ common_billing_account_path = staticmethod(
+ BigtableInstanceAdminClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ BigtableInstanceAdminClient.parse_common_billing_account_path
+ )
+ common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ BigtableInstanceAdminClient.parse_common_folder_path
+ )
+ common_organization_path = staticmethod(
+ BigtableInstanceAdminClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ BigtableInstanceAdminClient.parse_common_organization_path
+ )
+ common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ BigtableInstanceAdminClient.parse_common_project_path
+ )
+ common_location_path = staticmethod(
+ BigtableInstanceAdminClient.common_location_path
+ )
+ parse_common_location_path = staticmethod(
+ BigtableInstanceAdminClient.parse_common_location_path
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BigtableInstanceAdminAsyncClient: The constructed client.
+ """
+ return BigtableInstanceAdminClient.from_service_account_info.__func__(BigtableInstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BigtableInstanceAdminAsyncClient: The constructed client.
+ """
+ return BigtableInstanceAdminClient.from_service_account_file.__func__(BigtableInstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore
+
+ from_service_account_json = from_service_account_file
+
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variable is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
+ @property
+ def transport(self) -> BigtableInstanceAdminTransport:
+ """Returns the transport used by the client instance.
+
+ Returns:
+ BigtableInstanceAdminTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = BigtableInstanceAdminClient.get_transport_class
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[
+ str,
+ BigtableInstanceAdminTransport,
+ Callable[..., BigtableInstanceAdminTransport],
+ ]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the bigtable instance admin async client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the BigtableInstanceAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide a client certificate for mTLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ self._client = BigtableInstanceAdminClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.bigtable.admin_v2.BigtableInstanceAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "credentialsType": None,
+ },
+ )
+
+ async def create_instance(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.CreateInstanceRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ instance_id: Optional[str] = None,
+ instance: Optional[gba_instance.Instance] = None,
+ clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Create an instance within a project.
+
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.CreateInstance.
+ parent (:class:`str`):
+ Required. The unique name of the project in which to
+ create the new instance. Values are of the form
+ ``projects/{project}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_id (:class:`str`):
+ Required. The ID to be used when referring to the new
+ instance within its project, e.g., just ``myinstance``
+ rather than ``projects/myproject/instances/myinstance``.
+
+ This corresponds to the ``instance_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`):
+ Required. The instance to create. Fields marked
+ ``OutputOnly`` must be left blank.
+
+ This corresponds to the ``instance`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ clusters (:class:`MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`):
+ Required. The clusters to be created within the
+ instance, mapped by desired cluster ID, e.g., just
+ ``mycluster`` rather than
+ ``projects/myproject/instances/myinstance/clusters/mycluster``.
+ Fields marked ``OutputOnly`` must be left blank.
+
+ This corresponds to the ``clusters`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+ the resources that serve them. All tables in an
+ instance are served from all
+ [Clusters][google.bigtable.admin.v2.Cluster] in the
+ instance.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, instance_id, instance, clusters]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest):
+ request = bigtable_instance_admin.CreateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if instance_id is not None:
+ request.instance_id = instance_id
+ if instance is not None:
+ request.instance = instance
+
+ if clusters:
+ request.clusters.update(clusters)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gba_instance.Instance,
+ metadata_type=bigtable_instance_admin.CreateInstanceMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_instance(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.GetInstanceRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.Instance:
+ r"""Gets information about an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.GetInstance.
+ name (:class:`str`):
+ Required. The unique name of the requested instance.
+ Values are of the form
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.Instance:
+ A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+ the resources that serve them. All tables in an
+ instance are served from all
+ [Clusters][google.bigtable.admin.v2.Cluster] in the
+ instance.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.GetInstanceRequest):
+ request = bigtable_instance_admin.GetInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_instances(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListInstancesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bigtable_instance_admin.ListInstancesResponse:
+ r"""Lists information about instances in a project.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_instances():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListInstancesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_instances(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListInstances.
+ parent (:class:`str`):
+ Required. The unique name of the project for which a
+ list of instances is requested. Values are of the form
+ ``projects/{project}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.ListInstancesResponse:
+ Response message for
+ BigtableInstanceAdmin.ListInstances.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.ListInstancesRequest):
+ request = bigtable_instance_admin.ListInstancesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instances
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_instance(
+ self,
+ request: Optional[Union[instance.Instance, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.Instance:
+ r"""Updates an instance within a project. This method
+ updates only the display name and type for an Instance.
+ To update other Instance properties, such as labels, use
+ PartialUpdateInstance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Instance(
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = await client.update_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]):
+ The request object. A collection of Bigtable
+ [Tables][google.bigtable.admin.v2.Table] and the
+ resources that serve them. All tables in an instance are
+ served from all
+ [Clusters][google.bigtable.admin.v2.Cluster] in the
+ instance.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.Instance:
+ A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+ the resources that serve them. All tables in an
+ instance are served from all
+ [Clusters][google.bigtable.admin.v2.Cluster] in the
+ instance.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, instance.Instance):
+ request = instance.Instance(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def partial_update_instance(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict]
+ ] = None,
+ *,
+ instance: Optional[gba_instance.Instance] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Partially updates an instance within a project. This
+ method can modify all fields of an Instance and is the
+ preferred way to update an Instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_partial_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.PartialUpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.partial_update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.PartialUpdateInstance.
+ instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`):
+ Required. The Instance which will
+ (partially) replace the current value.
+
+ This corresponds to the ``instance`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The subset of Instance
+ fields which should be replaced. Must be
+ explicitly set.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
+ the resources that serve them. All tables in an
+ instance are served from all
+ [Clusters][google.bigtable.admin.v2.Cluster] in the
+ instance.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [instance, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, bigtable_instance_admin.PartialUpdateInstanceRequest
+ ):
+ request = bigtable_instance_admin.PartialUpdateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if instance is not None:
+ request.instance = instance
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.partial_update_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("instance.name", request.instance.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gba_instance.Instance,
+ metadata_type=bigtable_instance_admin.UpdateInstanceMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_instance(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.DeleteInstanceRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Delete an instance from a project.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_instance(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.DeleteInstance.
+ name (:class:`str`):
+ Required. The unique name of the instance to be deleted.
+ Values are of the form
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest):
+ request = bigtable_instance_admin.DeleteInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def create_cluster(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.CreateClusterRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ cluster_id: Optional[str] = None,
+ cluster: Optional[instance.Cluster] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a cluster within an instance.
+
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateClusterRequest(
+ parent="parent_value",
+ cluster_id="cluster_id_value",
+ )
+
+ # Make the request
+ operation = client.create_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.CreateCluster.
+ parent (:class:`str`):
+ Required. The unique name of the instance in which to
+ create the new cluster. Values are of the form
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ cluster_id (:class:`str`):
+ Required. The ID to be used when referring to the new
+ cluster within its instance, e.g., just ``mycluster``
+ rather than
+ ``projects/myproject/instances/myinstance/clusters/mycluster``.
+
+ This corresponds to the ``cluster_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`):
+ Required. The cluster to be created. Fields marked
+ ``OutputOnly`` must be left blank.
+
+ This corresponds to the ``cluster`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, cluster_id, cluster]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.CreateClusterRequest):
+ request = bigtable_instance_admin.CreateClusterRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if cluster_id is not None:
+ request.cluster_id = cluster_id
+ if cluster is not None:
+ request.cluster = cluster
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_cluster
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.Cluster,
+ metadata_type=bigtable_instance_admin.CreateClusterMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_cluster(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.GetClusterRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.Cluster:
+ r"""Gets information about a cluster.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_cluster(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.GetCluster.
+ name (:class:`str`):
+ Required. The unique name of the requested cluster.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.Cluster:
+ A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.GetClusterRequest):
+ request = bigtable_instance_admin.GetClusterRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_cluster
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_clusters(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListClustersRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bigtable_instance_admin.ListClustersResponse:
+ r"""Lists information about clusters in an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_clusters():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListClustersRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_clusters(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListClusters.
+ parent (:class:`str`):
+ Required. The unique name of the instance for which a
+ list of clusters is requested. Values are of the form
+ ``projects/{project}/instances/{instance}``. Use
+ ``{instance} = '-'`` to list Clusters for all Instances
+ in a project, e.g., ``projects/myproject/instances/-``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.ListClustersResponse:
+ Response message for
+ BigtableInstanceAdmin.ListClusters.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.ListClustersRequest):
+ request = bigtable_instance_admin.ListClustersRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_clusters
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_cluster(
+ self,
+ request: Optional[Union[instance.Cluster, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a cluster within an instance.
+
+ Note that UpdateCluster does not support updating
+ cluster_config.cluster_autoscaling_config. In order to update
+ it, you must use PartialUpdateCluster.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Cluster(
+ )
+
+ # Make the request
+ operation = client.update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]):
+ The request object. A resizable group of nodes in a particular cloud
+ location, capable of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the parent
+ [Instance][google.bigtable.admin.v2.Instance].
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, instance.Cluster):
+ request = instance.Cluster(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_cluster
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.Cluster,
+ metadata_type=bigtable_instance_admin.UpdateClusterMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def partial_update_cluster(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict]
+ ] = None,
+ *,
+ cluster: Optional[instance.Cluster] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Partially updates a cluster within a project. This method is the
+ preferred way to update a Cluster.
+
+ To enable and update autoscaling, set
+ cluster_config.cluster_autoscaling_config. When autoscaling is
+ enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
+ that updates to it are ignored. Note that an update cannot
+ simultaneously set serve_nodes to non-zero and
+ cluster_config.cluster_autoscaling_config to non-empty, and also
+ specify both in the update_mask.
+
+ To disable autoscaling, clear
+ cluster_config.cluster_autoscaling_config, and explicitly set a
+ serve_node count via the update_mask.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_partial_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.PartialUpdateClusterRequest(
+ )
+
+ # Make the request
+ operation = client.partial_update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.PartialUpdateCluster.
+ cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`):
+ Required. The Cluster which contains the partial updates
+ to be applied, subject to the update_mask.
+
+ This corresponds to the ``cluster`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The subset of Cluster
+ fields which should be replaced.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [cluster, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest):
+ request = bigtable_instance_admin.PartialUpdateClusterRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if cluster is not None:
+ request.cluster = cluster
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.partial_update_cluster
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("cluster.name", request.cluster.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.Cluster,
+ metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_cluster(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.DeleteClusterRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a cluster from an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_cluster(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.DeleteCluster.
+ name (:class:`str`):
+ Required. The unique name of the cluster to be deleted.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest):
+ request = bigtable_instance_admin.DeleteClusterRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_cluster
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def create_app_profile(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.CreateAppProfileRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ app_profile_id: Optional[str] = None,
+ app_profile: Optional[instance.AppProfile] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.AppProfile:
+ r"""Creates an app profile within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.CreateAppProfileRequest(
+ parent="parent_value",
+ app_profile_id="app_profile_id_value",
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ response = await client.create_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.CreateAppProfile.
+ parent (:class:`str`):
+ Required. The unique name of the instance in which to
+ create the new app profile. Values are of the form
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ app_profile_id (:class:`str`):
+ Required. The ID to be used when referring to the new
+ app profile within its instance, e.g., just
+ ``myprofile`` rather than
+ ``projects/myproject/instances/myinstance/appProfiles/myprofile``.
+
+ This corresponds to the ``app_profile_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`):
+ Required. The app profile to be created. Fields marked
+ ``OutputOnly`` will be ignored.
+
+ This corresponds to the ``app_profile`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.AppProfile:
+ A configuration object describing how
+ Cloud Bigtable should treat traffic from
+ a particular end user application.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, app_profile_id, app_profile]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest):
+ request = bigtable_instance_admin.CreateAppProfileRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if app_profile_id is not None:
+ request.app_profile_id = app_profile_id
+ if app_profile is not None:
+ request.app_profile = app_profile
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_app_profile
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_app_profile(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.GetAppProfileRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.AppProfile:
+ r"""Gets information about an app profile.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAppProfileRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.GetAppProfile.
+ name (:class:`str`):
+ Required. The unique name of the requested app profile.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.AppProfile:
+ A configuration object describing how
+ Cloud Bigtable should treat traffic from
+ a particular end user application.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest):
+ request = bigtable_instance_admin.GetAppProfileRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_app_profile
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_app_profiles(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListAppProfilesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListAppProfilesAsyncPager:
+ r"""Lists information about app profiles in an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_app_profiles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAppProfilesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_app_profiles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListAppProfiles.
+ parent (:class:`str`):
+ Required. The unique name of the instance for which a
+ list of app profiles is requested. Values are of the
+ form ``projects/{project}/instances/{instance}``. Use
+ ``{instance} = '-'`` to list AppProfiles for all
+ Instances in a project, e.g.,
+ ``projects/myproject/instances/-``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager:
+ Response message for
+ BigtableInstanceAdmin.ListAppProfiles.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest):
+ request = bigtable_instance_admin.ListAppProfilesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_app_profiles
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListAppProfilesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_app_profile(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.UpdateAppProfileRequest, dict]
+ ] = None,
+ *,
+ app_profile: Optional[instance.AppProfile] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates an app profile within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.UpdateAppProfileRequest(
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ operation = client.update_app_profile(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.UpdateAppProfile.
+ app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`):
+ Required. The app profile which will
+ (partially) replace the current value.
+
+ This corresponds to the ``app_profile`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The subset of app profile
+ fields which should be replaced. If
+ unset, all fields will be replaced.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic
+ from a particular end user application.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [app_profile, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest):
+ request = bigtable_instance_admin.UpdateAppProfileRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if app_profile is not None:
+ request.app_profile = app_profile
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_app_profile
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("app_profile.name", request.app_profile.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.AppProfile,
+ metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_app_profile(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.DeleteAppProfileRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ ignore_warnings: Optional[bool] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes an app profile from an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAppProfileRequest(
+ name="name_value",
+ ignore_warnings=True,
+ )
+
+ # Make the request
+ await client.delete_app_profile(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.DeleteAppProfile.
+ name (:class:`str`):
+ Required. The unique name of the app profile to be
+ deleted. Values are of the form
+ ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ ignore_warnings (:class:`bool`):
+ Required. If true, ignore safety
+ checks when deleting the app profile.
+
+ This corresponds to the ``ignore_warnings`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name, ignore_warnings]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest):
+ request = bigtable_instance_admin.DeleteAppProfileRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+ if ignore_warnings is not None:
+ request.ignore_warnings = ignore_warnings
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_app_profile
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def get_iam_policy(
+ self,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Gets the access control policy for an instance
+ resource. Returns an empty policy if an instance exists
+ but does not have a policy set.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
+ The request object. Request message for ``GetIamPolicy`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy is being requested. See the
+ operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
+
+ A Policy is a collection of bindings. A binding binds
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
+
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
+
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+
+ For a description of IAM and its features, see the
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.GetIamPolicyRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_iam_policy
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def set_iam_policy(
+ self,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Sets the access control policy on an instance
+ resource. Replaces any existing policy.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
+ The request object. Request message for ``SetIamPolicy`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy is being specified. See the
+ operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
+
+ A Policy is a collection of bindings. A binding binds
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
+
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
+
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+
+ For a description of IAM and its features, see the
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.SetIamPolicyRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.SetIamPolicyRequest(resource=resource)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.set_iam_policy
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def test_iam_permissions(
+ self,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ r"""Returns permissions that the caller has on the
+ specified instance resource.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
+ The request object. Request message for ``TestIamPermissions`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy detail is being requested. See
+ the operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ permissions (:class:`MutableSequence[str]`):
+ The set of permissions to check for the ``resource``.
+ Permissions with wildcards (such as '*' or 'storage.*')
+ are not allowed. For more information see `IAM
+ Overview `__.
+
+ This corresponds to the ``permissions`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
+ Response message for TestIamPermissions method.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.TestIamPermissionsRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource=resource, permissions=permissions
+ )
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.test_iam_permissions
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_hot_tablets(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListHotTabletsRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListHotTabletsAsyncPager:
+ r"""Lists hot tablets in a cluster, within the time range
+ provided. Hot tablets are ordered based on CPU usage.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_hot_tablets():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListHotTabletsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hot_tablets(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListHotTablets.
+ parent (:class:`str`):
+ Required. The cluster name to list hot tablets. Value is
+ in the following form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager:
+ Response message for
+ BigtableInstanceAdmin.ListHotTablets.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest):
+ request = bigtable_instance_admin.ListHotTabletsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_hot_tablets
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListHotTabletsAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_logical_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.CreateLogicalViewRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ logical_view: Optional[instance.LogicalView] = None,
+ logical_view_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a logical view within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateLogicalViewRequest(
+ parent="parent_value",
+ logical_view_id="logical_view_id_value",
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.create_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.CreateLogicalView.
+ parent (:class:`str`):
+ Required. The parent instance where this logical view
+ will be created. Format:
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`):
+ Required. The logical view to create.
+ This corresponds to the ``logical_view`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ logical_view_id (:class:`str`):
+ Required. The ID to use for the
+ logical view, which will become the
+ final component of the logical view's
+ resource name.
+
+ This corresponds to the ``logical_view_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.LogicalView`
+ A SQL logical view object that can be referenced in SQL
+ queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, logical_view, logical_view_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest):
+ request = bigtable_instance_admin.CreateLogicalViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if logical_view is not None:
+ request.logical_view = logical_view
+ if logical_view_id is not None:
+ request.logical_view_id = logical_view_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_logical_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.LogicalView,
+ metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_logical_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.GetLogicalViewRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.LogicalView:
+ r"""Gets information about a logical view.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_logical_view(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.GetLogicalView.
+ name (:class:`str`):
+ Required. The unique name of the requested logical view.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.LogicalView:
+ A SQL logical view object that can be
+ referenced in SQL queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest):
+ request = bigtable_instance_admin.GetLogicalViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_logical_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_logical_views(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListLogicalViewsRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListLogicalViewsAsyncPager:
+ r"""Lists information about logical views in an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_logical_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListLogicalViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_logical_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListLogicalViews.
+ parent (:class:`str`):
+ Required. The unique name of the instance for which the
+ list of logical views is requested. Values are of the
+ form ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager:
+ Response message for
+ BigtableInstanceAdmin.ListLogicalViews.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest):
+ request = bigtable_instance_admin.ListLogicalViewsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_logical_views
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListLogicalViewsAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_logical_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict]
+ ] = None,
+ *,
+ logical_view: Optional[instance.LogicalView] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a logical view within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateLogicalViewRequest(
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.update_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.UpdateLogicalView.
+ logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`):
+ Required. The logical view to update.
+
+ The logical view's ``name`` field is used to identify
+ the view to update. Format:
+ ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``.
+
+ This corresponds to the ``logical_view`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Optional. The list of fields to
+ update.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.LogicalView`
+ A SQL logical view object that can be referenced in SQL
+ queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [logical_view, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest):
+ request = bigtable_instance_admin.UpdateLogicalViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if logical_view is not None:
+ request.logical_view = logical_view
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_logical_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("logical_view.name", request.logical_view.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.LogicalView,
+ metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_logical_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a logical view from an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_logical_view(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.DeleteLogicalView.
+ name (:class:`str`):
+ Required. The unique name of the logical view to be
+ deleted. Format:
+ ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest):
+ request = bigtable_instance_admin.DeleteLogicalViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_logical_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def create_materialized_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ materialized_view: Optional[instance.MaterializedView] = None,
+ materialized_view_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a materialized view within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateMaterializedViewRequest(
+ parent="parent_value",
+ materialized_view_id="materialized_view_id_value",
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.create_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.CreateMaterializedView.
+ parent (:class:`str`):
+ Required. The parent instance where this materialized
+ view will be created. Format:
+ ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`):
+ Required. The materialized view to
+ create.
+
+ This corresponds to the ``materialized_view`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ materialized_view_id (:class:`str`):
+ Required. The ID to use for the
+ materialized view, which will become the
+ final component of the materialized
+ view's resource name.
+
+ This corresponds to the ``materialized_view_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.MaterializedView`
+ A materialized view object that can be referenced in SQL
+ queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, materialized_view, materialized_view_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, bigtable_instance_admin.CreateMaterializedViewRequest
+ ):
+ request = bigtable_instance_admin.CreateMaterializedViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if materialized_view is not None:
+ request.materialized_view = materialized_view
+ if materialized_view_id is not None:
+ request.materialized_view_id = materialized_view_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_materialized_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.MaterializedView,
+ metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_materialized_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.GetMaterializedViewRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> instance.MaterializedView:
+ r"""Gets information about a materialized view.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_materialized_view(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.GetMaterializedView.
+ name (:class:`str`):
+ Required. The unique name of the requested materialized
+ view. Values are of the form
+ ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.MaterializedView:
+ A materialized view object that can
+ be referenced in SQL queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest):
+ request = bigtable_instance_admin.GetMaterializedViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_materialized_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_materialized_views(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListMaterializedViewsAsyncPager:
+ r"""Lists information about materialized views in an
+ instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_materialized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListMaterializedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_materialized_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.ListMaterializedViews.
+ parent (:class:`str`):
+ Required. The unique name of the instance for which the
+ list of materialized views is requested. Values are of
+ the form ``projects/{project}/instances/{instance}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager:
+ Response message for
+ BigtableInstanceAdmin.ListMaterializedViews.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, bigtable_instance_admin.ListMaterializedViewsRequest
+ ):
+ request = bigtable_instance_admin.ListMaterializedViewsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_materialized_views
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListMaterializedViewsAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_materialized_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict]
+ ] = None,
+ *,
+ materialized_view: Optional[instance.MaterializedView] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a materialized view within an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateMaterializedViewRequest(
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.update_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.UpdateMaterializedView.
+ materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`):
+ Required. The materialized view to update.
+
+ The materialized view's ``name`` field is used to
+ identify the view to update. Format:
+ ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``.
+
+ This corresponds to the ``materialized_view`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Optional. The list of fields to
+ update.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.MaterializedView`
+ A materialized view object that can be referenced in SQL
+ queries.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [materialized_view, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, bigtable_instance_admin.UpdateMaterializedViewRequest
+ ):
+ request = bigtable_instance_admin.UpdateMaterializedViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if materialized_view is not None:
+ request.materialized_view = materialized_view
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_materialized_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("materialized_view.name", request.materialized_view.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.MaterializedView,
+ metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_materialized_view(
+ self,
+ request: Optional[
+ Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a materialized view from an instance.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_materialized_view(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]):
+ The request object. Request message for
+ BigtableInstanceAdmin.DeleteMaterializedView.
+ name (:class:`str`):
+ Required. The unique name of the materialized view to be
+ deleted. Format:
+ ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, bigtable_instance_admin.DeleteMaterializedViewRequest
+ ):
+ request = bigtable_instance_admin.DeleteMaterializedViewRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_materialized_view
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient":
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
+
+
+__all__ = ("BigtableInstanceAdminAsyncClient",)
diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
new file mode 100644
index 000000000000..7c72be99730b
--- /dev/null
+++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
@@ -0,0 +1,4818 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from collections import OrderedDict
+from http import HTTPStatus
+import json
+import logging as std_logging
+import os
+import re
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+import warnings
+
+from google.cloud.bigtable_admin_v2 import gapic_version as package_version
+
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.auth import credentials as ga_credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
+from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
+from google.cloud.bigtable_admin_v2.types import common
+from google.cloud.bigtable_admin_v2.types import instance
+from google.cloud.bigtable_admin_v2.types import instance as gba_instance
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import BigtableInstanceAdminGrpcTransport
+from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport
+from .transports.rest import BigtableInstanceAdminRestTransport
+
+
+class BigtableInstanceAdminClientMeta(type):
+ """Metaclass for the BigtableInstanceAdmin client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[BigtableInstanceAdminTransport]]
+ _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport
+ _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport
+ _transport_registry["rest"] = BigtableInstanceAdminRestTransport
+
+ def get_transport_class(
+ cls,
+ label: Optional[str] = None,
+ ) -> Type[BigtableInstanceAdminTransport]:
+ """Returns an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta):
+ """Service for creating, configuring, and deleting Cloud
+ Bigtable Instances and Clusters. Provides access to the Instance
+ and Cluster schemas only, not the tables' metadata or data
+ stored in those tables.
+ """
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Converts api endpoint to mTLS endpoint.
+
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
+ DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}"
+ _DEFAULT_UNIVERSE = "googleapis.com"
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BigtableInstanceAdminClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BigtableInstanceAdminClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> BigtableInstanceAdminTransport:
+ """Returns the transport used by the client instance.
+
+ Returns:
+ BigtableInstanceAdminTransport: The transport used by the client
+ instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def app_profile_path(
+ project: str,
+ instance: str,
+ app_profile: str,
+ ) -> str:
+ """Returns a fully-qualified app_profile string."""
+ return (
+ "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(
+ project=project,
+ instance=instance,
+ app_profile=app_profile,
+ )
+ )
+
+ @staticmethod
+ def parse_app_profile_path(path: str) -> Dict[str, str]:
+ """Parses a app_profile path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def cluster_path(
+ project: str,
+ instance: str,
+ cluster: str,
+ ) -> str:
+ """Returns a fully-qualified cluster string."""
+ return "projects/{project}/instances/{instance}/clusters/{cluster}".format(
+ project=project,
+ instance=instance,
+ cluster=cluster,
+ )
+
+ @staticmethod
+ def parse_cluster_path(path: str) -> Dict[str, str]:
+ """Parses a cluster path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def crypto_key_path(
+ project: str,
+ location: str,
+ key_ring: str,
+ crypto_key: str,
+ ) -> str:
+ """Returns a fully-qualified crypto_key string."""
+ return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
+ project=project,
+ location=location,
+ key_ring=key_ring,
+ crypto_key=crypto_key,
+ )
+
+ @staticmethod
+ def parse_crypto_key_path(path: str) -> Dict[str, str]:
+ """Parses a crypto_key path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def hot_tablet_path(
+ project: str,
+ instance: str,
+ cluster: str,
+ hot_tablet: str,
+ ) -> str:
+ """Returns a fully-qualified hot_tablet string."""
+ return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format(
+ project=project,
+ instance=instance,
+ cluster=cluster,
+ hot_tablet=hot_tablet,
+ )
+
+ @staticmethod
+ def parse_hot_tablet_path(path: str) -> Dict[str, str]:
+ """Parses a hot_tablet path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P