From 15fa51442af0791aae70965435152caa4cbbf57a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Mon, 29 Sep 2025 10:57:50 +0200 Subject: [PATCH 01/12] add script to convert async apis into sync --- scripts/generate_code_snippets.py | 3 +- scripts/sync_client_codegen/main.py | 571 ++++++++++++++++++++++++++++ 2 files changed, 572 insertions(+), 2 deletions(-) create mode 100644 scripts/sync_client_codegen/main.py diff --git a/scripts/generate_code_snippets.py b/scripts/generate_code_snippets.py index c7aeee3990..1dcc9e3f53 100644 --- a/scripts/generate_code_snippets.py +++ b/scripts/generate_code_snippets.py @@ -4,9 +4,8 @@ from collections import defaultdict from doctest import DocTestParser, Example -from cognite.client import ClientConfig +from cognite.client import ClientConfig, CogniteClient from cognite.client._api_client import APIClient -from cognite.client.beta import CogniteClient from cognite.client.credentials import Token diff --git a/scripts/sync_client_codegen/main.py b/scripts/sync_client_codegen/main.py new file mode 100644 index 0000000000..e67e5fdd39 --- /dev/null +++ b/scripts/sync_client_codegen/main.py @@ -0,0 +1,571 @@ +from __future__ import annotations + +import ast +import hashlib +import inspect +import re +import shlex +import subprocess +import textwrap +from collections.abc import Iterator +from functools import cache +from pathlib import Path + +SYNC_CLIENT_PATH = Path("cognite/client/_sync_cognite_client.py") + +try: + # Future devs will most likely delete a file in order to get it regenerated. This will always + # end up with a ModuleNotFoundError, so we catch that and proceed with a dummy CogniteClient + # (class which we always regenerate anyway). + from cognite.client import AsyncCogniteClient +except ImportError as e: + if "cognite.client._sync_api" in str(e): + SYNC_CLIENT_PATH.write_text("class CogniteClient:\n ...\n") + else: + raise + +from cognite.client import AsyncCogniteClient # noqa: E402 +from cognite.client._api_client import APIClient # noqa: E402 +from cognite.client.config import ClientConfig, global_config # noqa: E402 +from cognite.client.credentials import Token # noqa: E402 + +EIGHT_SPACES = " " * 8 +SKIP_API_NAMES = { + "PrincipalsAPI", +} +KNOWN_FILES_SKIP_LIST = { + Path("cognite/client/_api/datapoint_tasks.py"), + Path("cognite/client/_api/functions/utils.py"), + Path("cognite/client/_api/org_apis/principals.py"), # TODO? +} +MAYBE_IMPORTS = ( + "SortSpec: TypeAlias", + "_FILTERS_SUPPORTED: frozenset[type[Filter]]", + "AggregateAssetProperty: TypeAlias", + "Source: TypeAlias", + "RunStatus: TypeAlias", + "WorkflowIdentifier: TypeAlias", + "WorkflowVersionIdentifier: TypeAlias", + "ComparableCapability: TypeAlias", +) +ASYNC_API_DIR = Path("cognite/client/_api") +SYNC_API_DIR = Path("cognite/client/_sync_api") + +# Template for the generated sync client code: +# - we rely on other tools to clean up imports +SYNC_API_TEMPLATE = '''\ +""" +=============================================================================== +{file_hash} +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +{existing_imports} +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils._concurrency import ConcurrencySettings +from typing import Any, Iterator, TypeVar, TYPE_CHECKING, overload +from collections.abc import Coroutine + +if TYPE_CHECKING: + import pandas as pd + {type_checking_imports} + +_T = TypeVar("_T") + + +class Sync{class_name}(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + {nested_apis_init} + +''' + + +def get_api_class_by_attribute(cls_: object, parent_name: tuple[str, ...] = ()) -> dict[str, type[APIClient]]: + available_apis: dict[str, type[APIClient]] = {} + for attr, obj in cls_.__dict__.items(): + if attr.startswith("_") or not isinstance(obj, APIClient): + continue + obj_attr = (*parent_name, attr) + available_apis[".".join(obj_attr)] = obj.__class__ + available_apis.update(get_api_class_by_attribute(obj, parent_name=obj_attr)) + return available_apis + + +def find_api_class_name(source_code: str, file: Path, raise_on_missing: bool = True) -> str | None: + match re.findall(r"class (\w+API)\(APIClient\):", source_code): + case []: + return None + case [cls_name]: + return cls_name + case [*multiple]: + # Dev. note: so you've hit this error and wonder where to -> please split your APIs into separate files + # (sorry if this feels like Java all of a sudden). It makes the codebase much easier to auto-convert + # from async to sync. + raise RuntimeError(f"Found multiple API classes in file='{file}': {multiple}") + + +def hash_file(path: Path) -> str: + return hashlib.new("md5", path.read_bytes()).hexdigest() + + +def is_pyfile(file: Path) -> bool: + return file.suffix == ".py" + + +def list_apis() -> Iterator[Path]: + return Path("cognite/client/_api").rglob("*") + + +def list_sync_apis() -> Iterator[Path]: + return Path("cognite/client/_sync_api").rglob("*") + + +def path_as_importable(path: Path) -> str: + return ".".join(path.with_suffix("").parts) + + +def is_md5_hash(s: str) -> bool: + return bool(re.match(r"^[a-f0-9]{32}$", s)) + + +def read_hash_from_file(path: Path) -> tuple[bool, str | None]: + with path.open("r") as f: + f.readline() + f.readline() + maybe_hash = f.readline().strip() + + return is_md5_hash(maybe_hash), maybe_hash + + +def file_has_changed(write_file: Path, read_file_hash: str) -> bool: + # Skip a file if it exists and the stored hash matches: + if write_file.exists(): + is_valid, existing_hash = read_hash_from_file(write_file) + if is_valid and existing_hash == read_file_hash: + return False + return True + + +def get_module_level_imports(tree: ast.Module): + import_nodes = [node for node in tree.body if isinstance(node, (ast.Import, ast.ImportFrom))] + return "\n".join(ast.unparse(node) for node in import_nodes) + + +def get_module_level_type_checking_imports(tree: ast.Module) -> str: + imports: list[str] = [] + for node in tree.body: + if not isinstance(node, ast.If): + continue + + match node.test: + # check for "if TYPE_CHECKING:" + case ast.Name(id="TYPE_CHECKING"): + pass + # or "if typing.TYPE_CHECKING:" + case ast.Attribute(value=ast.Name(id="typing"), attr="TYPE_CHECKING"): + pass + case _: + continue + + for sub in node.body: + if isinstance(sub, (ast.Import, ast.ImportFrom)): + imports.append(ast.unparse(sub)) + return "\n".join(imports) + + +def get_all_imports(tree: ast.Module, source_code: str, source_path: Path) -> tuple[str, str]: + all_imports = get_module_level_imports(tree) + type_checking_imports = get_module_level_type_checking_imports(tree) + extras = [] + parent = source_path.parent + parent_api_in_init = parent != Path("cognite/client/_api") and (parent / "__init__.py").exists() + parent_source = get_source_code(parent / "__init__.py") if parent_api_in_init else "" + + for maybe in MAYBE_IMPORTS: + # Typically there are type aliases defined in the async API module that we need to import: + if maybe in source_code: + to_import = maybe.split(": ")[0] + import_path = path_as_importable(source_path).replace(".__init__", "") + extras.append(f"from {import_path} import {to_import}") + # For a lot of 'nested' APIs, these are in parent / init: + elif maybe in parent_source: + to_import = maybe.split(": ")[0] + extras.append(f"from {path_as_importable(parent)} import {to_import}") + + if extras: + all_imports += "\n" + return all_imports + "\n".join(extras), type_checking_imports + + +def find_class_node(tree: ast.Module, class_name: str) -> ast.ClassDef: + for node in tree.body: + if isinstance(node, ast.ClassDef) and node.name == class_name: + return node + raise ValueError(f"Could not find class '{class_name}' in AST") + + +def find_self_assignments(class_node: ast.ClassDef) -> tuple[list[str], list[str]]: + init_node = None + for node in class_node.body: + if isinstance(node, ast.FunctionDef) and node.name == "__init__": + init_node = node + break + + if not init_node: + return [], [] + + names, nested_apis = [], [] + for stmt in init_node.body: + if not isinstance(stmt, ast.Assign): + continue + + for t in stmt.targets: + if ( + isinstance(t, ast.Attribute) + and isinstance(t.value, ast.Name) + and t.value.id == "self" + and isinstance(stmt.value, ast.Call) + and stmt.value.func.id.endswith("API") + and stmt.value.func.id not in SKIP_API_NAMES + ): + names.append(cls_name := foolish_cls_name_rewrite(stmt.value.func.id)) + nested_apis.append( + # As opposed to the regular APIs, we only need to pass the async client here: + f"self.{t.attr} = Sync{cls_name}(async_client)" + ) + return names, nested_apis + + +def foolish_cls_name_rewrite(class_name: str) -> str: + # Yes, YESSSS + return class_name.replace("ThreeD", "3D") + + +def inverse_foolish_cls_name_rewrite(class_name: str) -> str: + return class_name.replace("3D", "ThreeD") # Needed when searching + + +def method_should_be_converted(node: ast.AST) -> bool: + # There's more to it than just an isinstance check: 'async def __call__' does not return + # a coroutine, but an async generator. This in turn means that mypy forces the overloads + # to NOT be 'async def' but just 'def'. Wait what?! I for sure had to Google it. So we need + # to treat __call__ as a special case in order to not lose all that typing goodies... + return isinstance(node, ast.AsyncFunctionDef) or getattr(node, "name", None) == "__call__" + + +def generate_sync_client_code( + class_name: str, + source_code: str, + source_path: Path, + dot_path_lookup: dict[str, str], + file_hash: str, +) -> str | None: + if class_name is None: + return None + try: + dotted_path = dot_path_lookup[class_name] + except KeyError: + raise RuntimeError( + f"Could not find the dotted path for class='{class_name}', e.g.:" + "EventsAPI -> 'events', DatapointsAPI -> 'timer_series.data'." + ) + + tree = ast.parse(source_code) + class_def = find_class_node(tree, class_name) + + # Group methods by name to handle overloads correctly. + generated_methods = [] + methods_by_name = {} + all_method_nodes = [m for m in class_def.body if method_should_be_converted(m)] + for method_node in all_method_nodes: + methods_by_name.setdefault(method_node.name, []).append(method_node) + + for name, method_nodes in methods_by_name.items(): + if name.startswith("_") and name != "__call__": + continue + + # The last definition is the implementation, the rest are overloads + overloads = method_nodes[:-1] + implementation = method_nodes[-1] + + for overload_node in overloads: + sync_def = "@overload\n def {name}({args}) -> {return_type}: ...".format( + name=name, + args=ast.unparse(overload_node.args), + return_type=ast.unparse(overload_node.returns).replace("AsyncIterator", "Iterator"), + ) + generated_methods.append(sync_def) + + docstring = ast.get_docstring(implementation) + + # Create the list of arguments to pass to the async call + call_parts = [] + # 1. Handle positional-only arguments (e.g., func(a, /)) + call_parts.extend([arg.arg for arg in implementation.args.posonlyargs]) + # 2. Handle regular arguments (can be pos or keyword) + # We will pass these by keyword for safety. + regular_args = [f"{arg.arg}={arg.arg}" for arg in implementation.args.args if arg.arg != "self"] + call_parts.extend(regular_args) + # 3. Handle variadic positional arguments (*args) + if implementation.args.vararg: + call_parts.append(f"*{implementation.args.vararg.arg}") + # 4. Handle keyword-only arguments (e.g., func(*, a)) + kw_only_args = [f"{arg.arg}={arg.arg}" for arg in implementation.args.kwonlyargs] + call_parts.extend(kw_only_args) + # 5. Handle variadic keyword arguments (**kwargs) + if implementation.args.kwarg: + call_parts.append(f"**{implementation.args.kwarg.arg}") + + # Check return type for AsyncIterator + return_type_str = ast.unparse(implementation.returns) + is_iterator = "AsyncIterator" in return_type_str + sync_return_type = return_type_str.replace("AsyncIterator", "Iterator") + + method_body = "" + if is_iterator: + # Skip name here (__call__): + method_body = f"yield from SyncIterator(self.__async_client.{dotted_path}({', '.join(call_parts)}))" + else: + method_body = f"return run_sync(self.__async_client.{dotted_path}.{name}({', '.join(call_parts)}))" + + indented_docstring = "" + if docstring: + indented_docstring = f'{EIGHT_SPACES}"""\n{textwrap.indent(docstring, EIGHT_SPACES)}\n{EIGHT_SPACES}"""\n' + impl_def = ( + f"def {name}({ast.unparse(implementation.args)}) -> {sync_return_type}:\n" + f"{indented_docstring}{EIGHT_SPACES}{method_body}" + ) + generated_methods.append(impl_def) + + all_imports, type_checking_imports = get_all_imports(tree, source_code, source_path) + # In init, we find nested APIs - we also may need to modify existing imports: + api_names, nested_apis = find_self_assignments(class_def) + all_imports = fix_imports_for_sync_apis(all_imports, api_names) + + # Combine everything 🤞 + return ( + textwrap.dedent( + SYNC_API_TEMPLATE.format( + file_hash=file_hash, + class_name=foolish_cls_name_rewrite(class_name), + existing_imports=all_imports, + type_checking_imports=type_checking_imports, + nested_apis_init="\n ".join(nested_apis), + ) + ) + + " " + + "\n\n ".join(generated_methods) + + "\n" + ) + + +def fix_imports_for_sync_apis(all_imports: str, lst_of_api_names: list[str]) -> str: + """ + This function performs two main changes for each API name provided: + 1. It changes the import path from `cognite.client._api...` to `cognite.client._sync_api...`. + 2. It prepends "Sync" to the imported class name (e.g., "DatapointsAPI" -> "SyncDatapointsAPI"). + """ + if not lst_of_api_names: + return all_imports + + api_name_options = "|".join(map(re.escape, lst_of_api_names)) # escape is prob overkill + pattern = re.compile(rf"^from cognite\.client\._api(\..*? import\s+)(.*?)({api_name_options})(.*)$", re.MULTILINE) + + def replacer(match: re.Match) -> str: + """This function is called for each match found by re.sub.""" + module_and_import, leading_imports, matched_api_name, trailing_imports = match.groups() + if leading_imports or trailing_imports: + # To the poor dev who needs to fix this in the future, here is a long and nice error message. + # Having one API class per source file seems like good practice anyhow, and if you don't import + # directly from the defining module, the fix is easy. + raise ValueError( + f"Cannot handle multiple imports in the same line for API '{matched_api_name}'. " + "Example: This functions changes:\n" + "from cognite.client._api.time_series import DatapointsAPI\n" + "to:\n" + "from cognite.client._sync_api.time_series import SyncDatapointsAPI\n" + ) + return f"from cognite.client._sync_api{module_and_import}Sync{matched_api_name}" + + # Perform the substitution in a single pass: + return pattern.sub(replacer, all_imports) + + +def run_ruff(file_paths: list[Path]) -> None: + if not file_paths: + return + # We exit nonzero if ruff fixes anything, so we run with check=False to not raise: + base = f"poetry run pre-commit run ruff-{{}} --files {shlex.join(map(str, file_paths))}" + command = shlex.split(base.format("check")) + print(shlex.join(command)) + subprocess.run(command, check=False, capture_output=True) + command = shlex.split(base.format("format")) + print(shlex.join(command)) + subprocess.run(command, check=False, capture_output=True) + + +def get_dot_path_lookup(async_client: AsyncCogniteClient) -> tuple[dict[str, str], dict[str, str]]: + api_cls_lookup = get_api_class_by_attribute(async_client) + dot_path_lookup = {v.__name__: k for k, v in api_cls_lookup.items()} + if len(dot_path_lookup) != len(api_cls_lookup): + raise ValueError("API class names not unique, cannot continue") + file_path_lookup = {v.__name__: inspect.getfile(v) for v in api_cls_lookup.values()} + return dot_path_lookup, file_path_lookup + + +def ensure_parent_dir(file: Path) -> None: + if not file.parent.is_dir(): + file.parent.mkdir(parents=True, exist_ok=True) + + +@cache +def get_source_code(file: Path) -> str: + return file.read_text() + + +def main(read_file: Path, dot_path_lookup: dict[str, str]) -> tuple[Path | None, bool]: + """Generate sync client code for a given async API file then return the path to the generated file.""" + if read_file in KNOWN_FILES_SKIP_LIST: + # print(f"- Skipping codegen for '{read_file}': on skip list ⏭️") + return None, False + + source_code = get_source_code(read_file) + class_name = find_api_class_name(source_code, read_file) + write_file = SYNC_API_DIR / read_file.relative_to(ASYNC_API_DIR) + if read_file.name == "__init__.py" and class_name is None: + ensure_parent_dir(write_file) + write_file.touch(exist_ok=True) + # print(f"- Skipping codegen for '{read_file}': empty __init__.py file ⏭️") + return write_file, False + + if class_name is None: + raise RuntimeError(f"Could not find API class name in file='{read_file}'") + read_file_hash = hash_file(read_file) + if not file_has_changed(write_file, read_file_hash): + # print(f"- Skipping codegen for '{read_file}': no changes detected ⏭️") + return write_file, False + + generated_code = generate_sync_client_code(class_name, source_code, read_file, dot_path_lookup, read_file_hash) + if generated_code is None: + # print(f"- Skipping codegen for '{read_file}': on skip list ⏭️") + return None, False + + ensure_parent_dir(write_file) + write_file.write_text(generated_code) + print(f"- Generated sync client code for: '{read_file}' ✅") + return write_file, True + + +COGNITE_CLIENT_TEMPLATE = '''\ +""" +=================================================== +This file is auto-generated - do not edit manually! +=================================================== +""" +from __future__ import annotations + +from typing import TYPE_CHECKING +from cognite.client import AsyncCogniteClient +{all_api_imports} + +if TYPE_CHECKING: + from cognite.client import ClientConfig + + +class CogniteClient: + """Main entrypoint into the Cognite Python SDK. + + All Cognite Data Fusion APIs are accessible through this synchronous client. + For the asynchronous client, see :class:`~cognite.client._cognite_client.AsyncCogniteClient`. + + Args: + config (ClientConfig | None): The configuration for this client. + """ + + def __init__(self, config: ClientConfig | None = None) -> None: + self.__async_client = async_client = AsyncCogniteClient(config) + + # Initialize all sync. APIs: + {nested_apis_init} +''' + + +def filter_base_apis_and_sort_alphabetically(dct: dict[str, str]) -> list[tuple[str, str]]: + return sorted((k, v) for k, v in dct.items() if "." not in v) + + +def create_sync_cognite_client( + dot_path_lookup: dict[str, str], + file_path_lookup: dict[str, str], +) -> None: + all_apis = [] + all_imports = [] + for api, attr in filter_base_apis_and_sort_alphabetically(dot_path_lookup): + override_api_name = foolish_cls_name_rewrite(api) + all_apis.append(f"self.{attr} = Sync{override_api_name}(async_client)\n") + + import_path = path_as_importable( + SYNC_API_DIR / Path(file_path_lookup[api]).relative_to(ASYNC_API_DIR.resolve()) + ).replace(".__init__", "") + all_imports.append(f"from {import_path} import Sync{override_api_name}") + + content = COGNITE_CLIENT_TEMPLATE.format( + file_hash="TODO", all_api_imports="\n".join(all_imports), nested_apis_init=" ".join(all_apis) + ) + SYNC_CLIENT_PATH.write_text(content) + print(f"- Generated sync CogniteClient in: '{SYNC_CLIENT_PATH}' ✅") + + +def clean_up_files(all_expected_files: list[Path]) -> None: + clean_up = set(filter(is_pyfile, list_sync_apis())).difference(all_expected_files) + if not clean_up: + # print("No files to clean up!") + return + print(f"Cleaning up {len(clean_up)} files no longer needed:") + for f in clean_up: + print(f"- Deleting: '{f}'") + f.unlink() + + +def setup_async_mock_client() -> AsyncCogniteClient: + return AsyncCogniteClient(ClientConfig(client_name="name", project="proj", credentials=Token("not-a-token"))) + + +if __name__ == "__main__": + global_config.disable_pypi_version_check = True + + # We need a client to inspect, it does not need to be functional: + async_client = setup_async_mock_client() + + # Let's say I have the SimulatorRoutineRevisionsAPI, and want to know the `simulators.routines.revisions` + # (aka dotted) path to it so that I can magically do `self.__async_client..`: + dot_path_lookup, file_path_lookup = get_dot_path_lookup(async_client) + + # Run convert on all AsyncSomethingAPIs: + all_expected_files = [] + files_needing_lint = [] + for read_file in filter(is_pyfile, list_apis()): + try: + write_file, was_modified = main(read_file, dot_path_lookup) + if write_file is not None: + all_expected_files.append(write_file) + if was_modified: + files_needing_lint.append(write_file) + except Exception as e: + print(f"- Failed to generate sync client code for: '{read_file}' ❌ {e}") + continue + + # Invoke run via pre-commit (subprocess) as it doesn't have a python API interface: + run_ruff(files_needing_lint) + + # Clean up files that are no longer needed: + clean_up_files(all_expected_files) + + # Finally, gather all sync APIs into the CogniteClient class itself: + create_sync_cognite_client(dot_path_lookup, file_path_lookup) From d21c739f58acee92adb858d6b0bbf63b281dba8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Sat, 4 Oct 2025 21:27:07 +0200 Subject: [PATCH 02/12] add Mock for async client: AsyncCogniteClientMock --- cognite/client/testing.py | 212 ++++++++++++++++++++++++++++++++++++-- docs/source/testing.rst | 10 +- 2 files changed, 214 insertions(+), 8 deletions(-) diff --git a/cognite/client/testing.py b/cognite/client/testing.py index 9ec7ac8ea4..638e5bf051 100644 --- a/cognite/client/testing.py +++ b/cognite/client/testing.py @@ -5,7 +5,7 @@ from typing import Any from unittest.mock import MagicMock -from cognite.client import AsyncCogniteClient +from cognite.client import AsyncCogniteClient, CogniteClient from cognite.client._api.agents import AgentsAPI from cognite.client._api.ai import AIAPI from cognite.client._api.ai.tools import AIToolsAPI @@ -88,18 +88,97 @@ from cognite.client._api.workflows.tasks import WorkflowTaskAPI from cognite.client._api.workflows.triggers import WorkflowTriggerAPI from cognite.client._api.workflows.versions import WorkflowVersionAPI - - +from cognite.client._sync_api.agents.agents import SyncAgentsAPI +from cognite.client._sync_api.ai import SyncAIAPI +from cognite.client._sync_api.ai.tools import SyncAIToolsAPI +from cognite.client._sync_api.ai.tools.documents import SyncAIDocumentsAPI +from cognite.client._sync_api.annotations import SyncAnnotationsAPI +from cognite.client._sync_api.assets import SyncAssetsAPI +from cognite.client._sync_api.data_modeling import SyncDataModelingAPI +from cognite.client._sync_api.data_modeling.containers import SyncContainersAPI +from cognite.client._sync_api.data_modeling.data_models import SyncDataModelsAPI +from cognite.client._sync_api.data_modeling.graphql import SyncDataModelingGraphQLAPI +from cognite.client._sync_api.data_modeling.instances import SyncInstancesAPI +from cognite.client._sync_api.data_modeling.space_statistics import SyncSpaceStatisticsAPI +from cognite.client._sync_api.data_modeling.spaces import SyncSpacesAPI +from cognite.client._sync_api.data_modeling.statistics import SyncStatisticsAPI +from cognite.client._sync_api.data_modeling.views import SyncViewsAPI +from cognite.client._sync_api.data_sets import SyncDataSetsAPI +from cognite.client._sync_api.datapoints import SyncDatapointsAPI +from cognite.client._sync_api.datapoints_subscriptions import SyncDatapointsSubscriptionAPI +from cognite.client._sync_api.diagrams import SyncDiagramsAPI +from cognite.client._sync_api.document_preview import SyncDocumentPreviewAPI +from cognite.client._sync_api.documents import SyncDocumentsAPI +from cognite.client._sync_api.entity_matching import SyncEntityMatchingAPI +from cognite.client._sync_api.events import SyncEventsAPI +from cognite.client._sync_api.extractionpipelines import SyncExtractionPipelinesAPI +from cognite.client._sync_api.extractionpipelines.configs import SyncExtractionPipelineConfigsAPI +from cognite.client._sync_api.extractionpipelines.runs import SyncExtractionPipelineRunsAPI +from cognite.client._sync_api.files import SyncFilesAPI +from cognite.client._sync_api.functions import SyncFunctionsAPI +from cognite.client._sync_api.functions.calls import SyncFunctionCallsAPI +from cognite.client._sync_api.functions.schedules import SyncFunctionSchedulesAPI +from cognite.client._sync_api.geospatial import SyncGeospatialAPI +from cognite.client._sync_api.hosted_extractors import SyncHostedExtractorsAPI +from cognite.client._sync_api.hosted_extractors.destinations import SyncDestinationsAPI +from cognite.client._sync_api.hosted_extractors.jobs import SyncJobsAPI +from cognite.client._sync_api.hosted_extractors.mappings import SyncMappingsAPI +from cognite.client._sync_api.hosted_extractors.sources import SyncSourcesAPI +from cognite.client._sync_api.iam import SyncIAMAPI +from cognite.client._sync_api.iam.groups import SyncGroupsAPI +from cognite.client._sync_api.iam.security_categories import SyncSecurityCategoriesAPI +from cognite.client._sync_api.iam.sessions import SyncSessionsAPI +from cognite.client._sync_api.iam.token import SyncTokenAPI +from cognite.client._sync_api.labels import SyncLabelsAPI +from cognite.client._sync_api.postgres_gateway import SyncPostgresGatewaysAPI +from cognite.client._sync_api.postgres_gateway.tables import SyncTablesAPI as SyncPostgresTablesAPI +from cognite.client._sync_api.postgres_gateway.users import SyncUsersAPI as SyncPostgresUsersAPI +from cognite.client._sync_api.raw import SyncRawAPI +from cognite.client._sync_api.raw.databases import SyncRawDatabasesAPI +from cognite.client._sync_api.raw.rows import SyncRawRowsAPI +from cognite.client._sync_api.raw.tables import SyncRawTablesAPI +from cognite.client._sync_api.relationships import SyncRelationshipsAPI +from cognite.client._sync_api.sequence_data import SyncSequencesDataAPI +from cognite.client._sync_api.sequences import SyncSequencesAPI +from cognite.client._sync_api.simulators import SyncSimulatorsAPI +from cognite.client._sync_api.simulators.integrations import SyncSimulatorIntegrationsAPI +from cognite.client._sync_api.simulators.logs import SyncSimulatorLogsAPI +from cognite.client._sync_api.simulators.models import SyncSimulatorModelsAPI +from cognite.client._sync_api.simulators.models_revisions import SyncSimulatorModelRevisionsAPI +from cognite.client._sync_api.simulators.routine_revisions import SyncSimulatorRoutineRevisionsAPI +from cognite.client._sync_api.simulators.routines import SyncSimulatorRoutinesAPI +from cognite.client._sync_api.simulators.runs import SyncSimulatorRunsAPI +from cognite.client._sync_api.synthetic_time_series import SyncSyntheticDatapointsAPI +from cognite.client._sync_api.three_d import Sync3DAPI +from cognite.client._sync_api.three_d.asset_mapping import Sync3DAssetMappingAPI +from cognite.client._sync_api.three_d.files import Sync3DFilesAPI +from cognite.client._sync_api.three_d.models import Sync3DModelsAPI +from cognite.client._sync_api.three_d.revisions import Sync3DRevisionsAPI +from cognite.client._sync_api.time_series import SyncTimeSeriesAPI +from cognite.client._sync_api.transformations import SyncTransformationsAPI +from cognite.client._sync_api.transformations.jobs import SyncTransformationJobsAPI +from cognite.client._sync_api.transformations.notifications import SyncTransformationNotificationsAPI +from cognite.client._sync_api.transformations.schedules import SyncTransformationSchedulesAPI +from cognite.client._sync_api.transformations.schema import SyncTransformationSchemaAPI +from cognite.client._sync_api.unit_system import SyncUnitSystemAPI +from cognite.client._sync_api.units import SyncUnitAPI +from cognite.client._sync_api.user_profiles import SyncUserProfilesAPI +from cognite.client._sync_api.vision import SyncVisionAPI +from cognite.client._sync_api.workflows import SyncWorkflowAPI +from cognite.client._sync_api.workflows.executions import SyncWorkflowExecutionAPI +from cognite.client._sync_api.workflows.tasks import SyncWorkflowTaskAPI +from cognite.client._sync_api.workflows.triggers import SyncWorkflowTriggerAPI +from cognite.client._sync_api.workflows.versions import SyncWorkflowVersionAPI + + +# TODO: Async methods should be AsyncMocks, so this needs some improvement: class AsyncCogniteClientMock(MagicMock): """Mock for AsyncCogniteClient object - All APIs are replaced with specced MagicMock objects. + All APIs are replaced with specced MagicMock objects and all async methods with AsyncMocks. """ def __init__(self, *args: Any, **kwargs: Any) -> None: - if "parent" in kwargs: - super().__init__(*args, **kwargs) - return None super().__init__(spec=AsyncCogniteClient, *args, **kwargs) # Developer note: # - Please add your mocked APIs in chronological order @@ -213,6 +292,125 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.units.systems = MagicMock(spec_set=UnitSystemAPI) +class CogniteClientMock(MagicMock): + """Mock for CogniteClient object + + All APIs are replaced with specced MagicMock objects. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(spec=CogniteClient, *args, **kwargs) + # Developer note: + # - Please add your mocked APIs in chronological order + # - For nested APIs: + # - Add spacing above and below + # - Use `spec=MyAPI` only for "top level" + # - Use `spec_set=MyNestedAPI` for all nested APIs + self.ai = MagicMock(spec=SyncAIAPI) + self.ai.tools = MagicMock(spec=SyncAIToolsAPI) + self.ai.tools.documents = MagicMock(spec_set=SyncAIDocumentsAPI) + + self.agents = MagicMock(spec_set=SyncAgentsAPI) + self.annotations = MagicMock(spec_set=SyncAnnotationsAPI) + self.assets = MagicMock(spec_set=SyncAssetsAPI) + + self.data_modeling = MagicMock(spec=SyncDataModelingAPI) + self.data_modeling.containers = MagicMock(spec_set=SyncContainersAPI) + self.data_modeling.data_models = MagicMock(spec_set=SyncDataModelsAPI) + self.data_modeling.spaces = MagicMock(spec_set=SyncSpacesAPI) + self.data_modeling.views = MagicMock(spec_set=SyncViewsAPI) + self.data_modeling.instances = MagicMock(spec_set=SyncInstancesAPI) + self.data_modeling.graphql = MagicMock(spec_set=SyncDataModelingGraphQLAPI) + self.data_modeling.statistics = MagicMock(spec=SyncStatisticsAPI) + self.data_modeling.statistics.spaces = MagicMock(spec_set=SyncSpaceStatisticsAPI) + + self.data_sets = MagicMock(spec_set=SyncDataSetsAPI) + + self.diagrams = MagicMock(spec_set=SyncDiagramsAPI) + self.documents = MagicMock(spec=SyncDocumentsAPI) + self.documents.previews = MagicMock(spec_set=SyncDocumentPreviewAPI) + self.entity_matching = MagicMock(spec_set=SyncEntityMatchingAPI) + self.events = MagicMock(spec_set=SyncEventsAPI) + + self.extraction_pipelines = MagicMock(spec=SyncExtractionPipelinesAPI) + self.extraction_pipelines.config = MagicMock(spec_set=SyncExtractionPipelineConfigsAPI) + self.extraction_pipelines.runs = MagicMock(spec_set=SyncExtractionPipelineRunsAPI) + + self.files = MagicMock(spec_set=SyncFilesAPI) + + self.functions = MagicMock(spec=SyncFunctionsAPI) + self.functions.calls = MagicMock(spec_set=SyncFunctionCallsAPI) + self.functions.schedules = MagicMock(spec_set=SyncFunctionSchedulesAPI) + + self.geospatial = MagicMock(spec_set=SyncGeospatialAPI) + + self.iam = MagicMock(spec=SyncIAMAPI) + self.iam.groups = MagicMock(spec_set=SyncGroupsAPI) + self.iam.security_categories = MagicMock(spec_set=SyncSecurityCategoriesAPI) + self.iam.sessions = MagicMock(spec_set=SyncSessionsAPI) + self.iam.user_profiles = MagicMock(spec_set=SyncUserProfilesAPI) + self.iam.token = MagicMock(spec_set=SyncTokenAPI) + + self.labels = MagicMock(spec_set=SyncLabelsAPI) + + self.raw = MagicMock(spec=SyncRawAPI) + self.raw.databases = MagicMock(spec_set=SyncRawDatabasesAPI) + self.raw.rows = MagicMock(spec_set=SyncRawRowsAPI) + self.raw.tables = MagicMock(spec_set=SyncRawTablesAPI) + + self.relationships = MagicMock(spec_set=SyncRelationshipsAPI) + + self.simulators = MagicMock(spec=SyncSimulatorsAPI) + self.simulators.integrations = MagicMock(spec_set=SyncSimulatorIntegrationsAPI) + self.simulators.models = MagicMock(spec=SyncSimulatorModelsAPI) + self.simulators.models.revisions = MagicMock(spec_set=SyncSimulatorModelRevisionsAPI) + self.simulators.runs = MagicMock(spec_set=SyncSimulatorRunsAPI) + self.simulators.routines = MagicMock(spec=SyncSimulatorRoutinesAPI) + self.simulators.routines.revisions = MagicMock(spec_set=SyncSimulatorRoutineRevisionsAPI) + self.simulators.logs = MagicMock(spec_set=SyncSimulatorLogsAPI) + + self.sequences = MagicMock(spec=SyncSequencesAPI) + self.sequences.data = MagicMock(spec_set=SyncSequencesDataAPI) + + self.hosted_extractors = MagicMock(spec=SyncHostedExtractorsAPI) + self.hosted_extractors.sources = MagicMock(spec_set=SyncSourcesAPI) + self.hosted_extractors.destinations = MagicMock(spec_set=SyncDestinationsAPI) + self.hosted_extractors.jobs = MagicMock(spec_set=SyncJobsAPI) + self.hosted_extractors.mappings = MagicMock(spec_set=SyncMappingsAPI) + + self.postgres_gateway = MagicMock(spec=SyncPostgresGatewaysAPI) + self.postgres_gateway.users = MagicMock(spec_set=SyncPostgresUsersAPI) + self.postgres_gateway.tables = MagicMock(spec_set=SyncPostgresTablesAPI) + + self.three_d = MagicMock(spec=Sync3DAPI) + self.three_d.asset_mappings = MagicMock(spec_set=Sync3DAssetMappingAPI) + self.three_d.files = MagicMock(spec_set=Sync3DFilesAPI) + self.three_d.models = MagicMock(spec_set=Sync3DModelsAPI) + self.three_d.revisions = MagicMock(spec_set=Sync3DRevisionsAPI) + + self.time_series = MagicMock(spec=SyncTimeSeriesAPI) + self.time_series.data = MagicMock(spec=SyncDatapointsAPI) + self.time_series.data.synthetic = MagicMock(spec_set=SyncSyntheticDatapointsAPI) + self.time_series.subscriptions = MagicMock(spec_set=SyncDatapointsSubscriptionAPI) + + self.transformations = MagicMock(spec=SyncTransformationsAPI) + self.transformations.jobs = MagicMock(spec_set=SyncTransformationJobsAPI) + self.transformations.notifications = MagicMock(spec_set=SyncTransformationNotificationsAPI) + self.transformations.schedules = MagicMock(spec_set=SyncTransformationSchedulesAPI) + self.transformations.schema = MagicMock(spec_set=SyncTransformationSchemaAPI) + + self.vision = MagicMock(spec_set=SyncVisionAPI) + + self.workflows = MagicMock(spec=SyncWorkflowAPI) + self.workflows.versions = MagicMock(spec_set=SyncWorkflowVersionAPI) + self.workflows.executions = MagicMock(spec_set=SyncWorkflowExecutionAPI) + self.workflows.tasks = MagicMock(spec_set=SyncWorkflowTaskAPI) + self.workflows.triggers = MagicMock(spec_set=SyncWorkflowTriggerAPI) + + self.units = MagicMock(spec=SyncUnitAPI) + self.units.systems = MagicMock(spec_set=SyncUnitSystemAPI) + + @contextmanager def monkeypatch_cognite_client() -> Iterator[AsyncCogniteClientMock]: """Context manager for monkeypatching the AsyncCogniteClient. diff --git a/docs/source/testing.rst b/docs/source/testing.rst index e6fecb46c1..cd0c69d979 100644 --- a/docs/source/testing.rst +++ b/docs/source/testing.rst @@ -4,6 +4,14 @@ Object to use as a mock for CogniteClient ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: cognite.client.testing.CogniteClientMock +Object to use as a mock for AsyncCogniteClient +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cognite.client.testing.AsyncCogniteClientMock + Use a context manager to monkeypatch CogniteClient ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autofunction:: cognite.client.testing.monkeypatch_cognite_client \ No newline at end of file +.. autofunction:: cognite.client.testing.monkeypatch_cognite_client + +Use a context manager to monkeypatch AsyncCogniteClient +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cognite.client.testing.monkeypatch_async_cognite_client From 353832977c4d1f033670f6b17a85f39f201f021b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Tue, 14 Oct 2025 10:32:51 +0200 Subject: [PATCH 03/12] properly fix missing AsyncMocks in AsyncCogniteClientMock --- cognite/client/testing.py | 315 ++++++++++++++++++++++++-------------- 1 file changed, 204 insertions(+), 111 deletions(-) diff --git a/cognite/client/testing.py b/cognite/client/testing.py index 638e5bf051..347affdedf 100644 --- a/cognite/client/testing.py +++ b/cognite/client/testing.py @@ -3,7 +3,7 @@ from collections.abc import Iterator from contextlib import contextmanager from typing import Any -from unittest.mock import MagicMock +from unittest.mock import MagicMock, create_autospec from cognite.client import AsyncCogniteClient, CogniteClient from cognite.client._api.agents import AgentsAPI @@ -171,8 +171,23 @@ from cognite.client._sync_api.workflows.versions import SyncWorkflowVersionAPI -# TODO: Async methods should be AsyncMocks, so this needs some improvement: -class AsyncCogniteClientMock(MagicMock): +def flip_spec_set_on(*mocked_apis: MagicMock) -> None: + for m in mocked_apis: + m._spec_set = True + + +class _SpecSetEnforcer(type): + """Metaclass that enforces spec_set=True on the top-level object (our client)""" + + # This is called when users do AsyncCogniteClientMock(): + def __call__(cls, *args: Any, **kwargs: Any) -> Any: + instance = super().__call__(*args, **kwargs) + # Now that the instance is fully constructed, we can freeze attribute assignments: + flip_spec_set_on(instance) + return instance + + +class AsyncCogniteClientMock(MagicMock, metaclass=_SpecSetEnforcer): """Mock for AsyncCogniteClient object All APIs are replaced with specced MagicMock objects and all async methods with AsyncMocks. @@ -182,114 +197,192 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(spec=AsyncCogniteClient, *args, **kwargs) # Developer note: # - Please add your mocked APIs in chronological order - # - For nested APIs: - # - Add spacing above and below - # - Use `spec=MyAPI` only for "top level" - # - Use `spec_set=MyNestedAPI` for all nested APIs - self.ai = MagicMock(spec=AIAPI) - self.ai.tools = MagicMock(spec=AIToolsAPI) - self.ai.tools.documents = MagicMock(spec_set=AIDocumentsAPI) - - self.agents = MagicMock(spec_set=AgentsAPI) - self.annotations = MagicMock(spec_set=AnnotationsAPI) - self.assets = MagicMock(spec_set=AssetsAPI) - - self.data_modeling = MagicMock(spec=DataModelingAPI) - self.data_modeling.containers = MagicMock(spec_set=ContainersAPI) - self.data_modeling.data_models = MagicMock(spec_set=DataModelsAPI) - self.data_modeling.spaces = MagicMock(spec_set=SpacesAPI) - self.data_modeling.views = MagicMock(spec_set=ViewsAPI) - self.data_modeling.instances = MagicMock(spec_set=InstancesAPI) - self.data_modeling.graphql = MagicMock(spec_set=DataModelingGraphQLAPI) - self.data_modeling.statistics = MagicMock(spec=StatisticsAPI) - self.data_modeling.statistics.spaces = MagicMock(spec_set=SpaceStatisticsAPI) - - self.data_sets = MagicMock(spec_set=DataSetsAPI) - - self.diagrams = MagicMock(spec_set=DiagramsAPI) - self.documents = MagicMock(spec=DocumentsAPI) - self.documents.previews = MagicMock(spec_set=DocumentPreviewAPI) - self.entity_matching = MagicMock(spec_set=EntityMatchingAPI) - self.events = MagicMock(spec_set=EventsAPI) - - self.extraction_pipelines = MagicMock(spec=ExtractionPipelinesAPI) - self.extraction_pipelines.config = MagicMock(spec_set=ExtractionPipelineConfigsAPI) - self.extraction_pipelines.runs = MagicMock(spec_set=ExtractionPipelineRunsAPI) - - self.files = MagicMock(spec_set=FilesAPI) - - self.functions = MagicMock(spec=FunctionsAPI) - self.functions.calls = MagicMock(spec_set=FunctionCallsAPI) - self.functions.schedules = MagicMock(spec_set=FunctionSchedulesAPI) - - self.geospatial = MagicMock(spec_set=GeospatialAPI) - - self.iam = MagicMock(spec=IAMAPI) - self.iam.groups = MagicMock(spec_set=GroupsAPI) - self.iam.security_categories = MagicMock(spec_set=SecurityCategoriesAPI) - self.iam.sessions = MagicMock(spec_set=SessionsAPI) - self.iam.principals = MagicMock(spec_set=PrincipalsAPI) - self.iam.user_profiles = MagicMock(spec_set=UserProfilesAPI) - self.iam.token = MagicMock(spec_set=TokenAPI) - - self.labels = MagicMock(spec_set=LabelsAPI) - - self.raw = MagicMock(spec=RawAPI) - self.raw.databases = MagicMock(spec_set=RawDatabasesAPI) - self.raw.rows = MagicMock(spec_set=RawRowsAPI) - self.raw.tables = MagicMock(spec_set=RawTablesAPI) - - self.relationships = MagicMock(spec_set=RelationshipsAPI) - - self.simulators = MagicMock(spec=SimulatorsAPI) - self.simulators.integrations = MagicMock(spec_set=SimulatorIntegrationsAPI) - self.simulators.models = MagicMock(spec=SimulatorModelsAPI) - self.simulators.models.revisions = MagicMock(spec_set=SimulatorModelRevisionsAPI) - self.simulators.runs = MagicMock(spec_set=SimulatorRunsAPI) - self.simulators.routines = MagicMock(spec=SimulatorRoutinesAPI) - self.simulators.routines.revisions = MagicMock(spec_set=SimulatorRoutineRevisionsAPI) - self.simulators.logs = MagicMock(spec_set=SimulatorLogsAPI) - - self.sequences = MagicMock(spec=SequencesAPI) - self.sequences.data = MagicMock(spec_set=SequencesDataAPI) - - self.hosted_extractors = MagicMock(spec=HostedExtractorsAPI) - self.hosted_extractors.sources = MagicMock(spec_set=SourcesAPI) - self.hosted_extractors.destinations = MagicMock(spec_set=DestinationsAPI) - self.hosted_extractors.jobs = MagicMock(spec_set=JobsAPI) - self.hosted_extractors.mappings = MagicMock(spec_set=MappingsAPI) - - self.postgres_gateway = MagicMock(spec=PostgresGatewaysAPI) - self.postgres_gateway.users = MagicMock(spec_set=PostgresUsersAPI) - self.postgres_gateway.tables = MagicMock(spec_set=PostgresTablesAPI) - - self.three_d = MagicMock(spec=ThreeDAPI) - self.three_d.asset_mappings = MagicMock(spec_set=ThreeDAssetMappingAPI) - self.three_d.files = MagicMock(spec_set=ThreeDFilesAPI) - self.three_d.models = MagicMock(spec_set=ThreeDModelsAPI) - self.three_d.revisions = MagicMock(spec_set=ThreeDRevisionsAPI) - - self.time_series = MagicMock(spec=TimeSeriesAPI) - self.time_series.data = MagicMock(spec=DatapointsAPI) - self.time_series.data.synthetic = MagicMock(spec_set=SyntheticDatapointsAPI) - self.time_series.subscriptions = MagicMock(spec_set=DatapointsSubscriptionAPI) - - self.transformations = MagicMock(spec=TransformationsAPI) - self.transformations.jobs = MagicMock(spec_set=TransformationJobsAPI) - self.transformations.notifications = MagicMock(spec_set=TransformationNotificationsAPI) - self.transformations.schedules = MagicMock(spec_set=TransformationSchedulesAPI) - self.transformations.schema = MagicMock(spec_set=TransformationSchemaAPI) - - self.vision = MagicMock(spec_set=VisionAPI) - - self.workflows = MagicMock(spec=WorkflowAPI) - self.workflows.versions = MagicMock(spec_set=WorkflowVersionAPI) - self.workflows.executions = MagicMock(spec_set=WorkflowExecutionAPI) - self.workflows.tasks = MagicMock(spec_set=WorkflowTaskAPI) - self.workflows.triggers = MagicMock(spec_set=WorkflowTriggerAPI) - - self.units = MagicMock(spec=UnitAPI) - self.units.systems = MagicMock(spec_set=UnitSystemAPI) + # - Use create_autospec with instance=True for better type safety and accurate mocking. + # For simple APIs, also pass spec_set=True to block arbitrary assignments. + # - Build composite APIs bottom-up (you can compose by passing kwargs to create_autospec + # as long as you don't pass spec_set=True). + # - Use flip_spec_set_on afterwards for proper spec enforcement on composite APIs + # (- Now repeat for CogniteClientMock) + + ai_tools_documents = create_autospec(AIDocumentsAPI, instance=True, spec_set=True) + ai_tools = create_autospec(AIToolsAPI, instance=True, documents=ai_tools_documents) + self.ai = create_autospec(AIAPI, instance=True, tools=ai_tools) + flip_spec_set_on(self.ai, ai_tools) + + self.agents = create_autospec(AgentsAPI, instance=True, spec_set=True) + self.annotations = create_autospec(AnnotationsAPI, instance=True, spec_set=True) + self.assets = create_autospec(AssetsAPI, instance=True, spec_set=True) + + dm_space_statistics = create_autospec(SpaceStatisticsAPI, instance=True, spec_set=True) + dm_statistics = create_autospec(StatisticsAPI, instance=True, spaces=dm_space_statistics) + dm_containers = create_autospec(ContainersAPI, instance=True, spec_set=True) + dm_data_models = create_autospec(DataModelsAPI, instance=True, spec_set=True) + dm_spaces = create_autospec(SpacesAPI, instance=True, spec_set=True) + dm_views = create_autospec(ViewsAPI, instance=True, spec_set=True) + dm_instances = create_autospec(InstancesAPI, instance=True, spec_set=True) + dm_graphql = create_autospec(DataModelingGraphQLAPI, instance=True, spec_set=True) + self.data_modeling = create_autospec( + DataModelingAPI, + instance=True, + containers=dm_containers, + data_models=dm_data_models, + spaces=dm_spaces, + views=dm_views, + instances=dm_instances, + graphql=dm_graphql, + statistics=dm_statistics, + ) + flip_spec_set_on(self.data_modeling, dm_statistics) + + self.data_sets = create_autospec(DataSetsAPI, instance=True, spec_set=True) + + self.diagrams = create_autospec(DiagramsAPI, instance=True, spec_set=True) + documents_previews = create_autospec(DocumentPreviewAPI, instance=True, spec_set=True) + self.documents = create_autospec(DocumentsAPI, instance=True, previews=documents_previews) + self.entity_matching = create_autospec(EntityMatchingAPI, instance=True, spec_set=True) + self.events = create_autospec(EventsAPI, instance=True, spec_set=True) + flip_spec_set_on(self.documents) + + extpipes_config = create_autospec(ExtractionPipelineConfigsAPI, instance=True, spec_set=True) + extpipes_runs = create_autospec(ExtractionPipelineRunsAPI, instance=True, spec_set=True) + self.extraction_pipelines = create_autospec( + ExtractionPipelinesAPI, instance=True, config=extpipes_config, runs=extpipes_runs + ) + flip_spec_set_on(self.extraction_pipelines) + + self.files = create_autospec(FilesAPI, instance=True, spec_set=True) + + fns_calls = create_autospec(FunctionCallsAPI, instance=True, spec_set=True) + fns_schedules = create_autospec(FunctionSchedulesAPI, instance=True, spec_set=True) + self.functions = create_autospec(FunctionsAPI, instance=True, calls=fns_calls, schedules=fns_schedules) + flip_spec_set_on(self.functions) + + self.geospatial = create_autospec(GeospatialAPI, instance=True, spec_set=True) + + iam_groups = create_autospec(GroupsAPI, instance=True, spec_set=True) + iam_security_categories = create_autospec(SecurityCategoriesAPI, instance=True, spec_set=True) + iam_sessions = create_autospec(SessionsAPI, instance=True, spec_set=True) + iam_principals = create_autospec(PrincipalsAPI, instance=True, spec_set=True) + iam_user_profiles = create_autospec(UserProfilesAPI, instance=True, spec_set=True) + iam_token = create_autospec(TokenAPI, instance=True, spec_set=True) + self.iam = create_autospec( + IAMAPI, + instance=True, + groups=iam_groups, + security_categories=iam_security_categories, + sessions=iam_sessions, + principals=iam_principals, + user_profiles=iam_user_profiles, + token=iam_token, + ) + flip_spec_set_on(self.iam) + + self.labels = create_autospec(LabelsAPI, instance=True, spec_set=True) + + raw_databases = create_autospec(RawDatabasesAPI, instance=True, spec_set=True) + raw_rows = create_autospec(RawRowsAPI, instance=True, spec_set=True) + raw_tables = create_autospec(RawTablesAPI, instance=True, spec_set=True) + self.raw = create_autospec(RawAPI, instance=True, databases=raw_databases, rows=raw_rows, tables=raw_tables) + flip_spec_set_on(self.raw) + + self.relationships = create_autospec(RelationshipsAPI, instance=True, spec_set=True) + + sim_integrations = create_autospec(SimulatorIntegrationsAPI, instance=True, spec_set=True) + sim_models_revisions = create_autospec(SimulatorModelRevisionsAPI, instance=True, spec_set=True) + sim_models = create_autospec(SimulatorModelsAPI, instance=True, revisions=sim_models_revisions) + sim_runs = create_autospec(SimulatorRunsAPI, instance=True, spec_set=True) + sim_routines_revisions = create_autospec(SimulatorRoutineRevisionsAPI, instance=True, spec_set=True) + sim_routines = create_autospec(SimulatorRoutinesAPI, instance=True, revisions=sim_routines_revisions) + sim_logs = create_autospec(SimulatorLogsAPI, instance=True, spec_set=True) + self.simulators = create_autospec( + SimulatorsAPI, + instance=True, + integrations=sim_integrations, + models=sim_models, + runs=sim_runs, + routines=sim_routines, + logs=sim_logs, + ) + flip_spec_set_on(self.simulators, sim_models, sim_routines) + + sequences_data = create_autospec(SequencesDataAPI, instance=True, spec_set=True) + self.sequences = create_autospec(SequencesAPI, instance=True, data=sequences_data) + flip_spec_set_on(self.sequences) + + ho_ex_sources = create_autospec(SourcesAPI, instance=True, spec_set=True) + ho_ex_destinations = create_autospec(DestinationsAPI, instance=True, spec_set=True) + ho_ex_jobs = create_autospec(JobsAPI, instance=True, spec_set=True) + ho_ex_mappings = create_autospec(MappingsAPI, instance=True, spec_set=True) + self.hosted_extractors = create_autospec( + HostedExtractorsAPI, + instance=True, + sources=ho_ex_sources, + destinations=ho_ex_destinations, + jobs=ho_ex_jobs, + mappings=ho_ex_mappings, + ) + flip_spec_set_on(self.hosted_extractors) + + pg_gw_users = create_autospec(PostgresUsersAPI, instance=True, spec_set=True) + pg_gw_tables = create_autospec(PostgresTablesAPI, instance=True, spec_set=True) + self.postgres_gateway = create_autospec( + PostgresGatewaysAPI, instance=True, users=pg_gw_users, tables=pg_gw_tables + ) + flip_spec_set_on(self.postgres_gateway) + + three_d_asset_mappings = create_autospec(ThreeDAssetMappingAPI, instance=True, spec_set=True) + three_d_files = create_autospec(ThreeDFilesAPI, instance=True, spec_set=True) + three_d_models = create_autospec(ThreeDModelsAPI, instance=True, spec_set=True) + three_d_revisions = create_autospec(ThreeDRevisionsAPI, instance=True, spec_set=True) + self.three_d = create_autospec( + ThreeDAPI, + instance=True, + asset_mappings=three_d_asset_mappings, + files=three_d_files, + models=three_d_models, + revisions=three_d_revisions, + ) + flip_spec_set_on(self.three_d) + + ts_synthetic = create_autospec(SyntheticDatapointsAPI, instance=True, spec_set=True) + ts_data = create_autospec(DatapointsAPI, instance=True, synthetic=ts_synthetic) + ts_subscriptions = create_autospec(DatapointsSubscriptionAPI, instance=True, spec_set=True) + self.time_series = create_autospec(TimeSeriesAPI, instance=True, data=ts_data, subscriptions=ts_subscriptions) + flip_spec_set_on(self.time_series, ts_data) + + tr_jobs = create_autospec(TransformationJobsAPI, instance=True, spec_set=True) + tr_notifications = create_autospec(TransformationNotificationsAPI, instance=True, spec_set=True) + tr_schedules = create_autospec(TransformationSchedulesAPI, instance=True, spec_set=True) + tr_schema = create_autospec(TransformationSchemaAPI, instance=True, spec_set=True) + self.transformations = create_autospec( + TransformationsAPI, + instance=True, + jobs=tr_jobs, + notifications=tr_notifications, + schedules=tr_schedules, + schema=tr_schema, + ) + flip_spec_set_on(self.transformations) + + self.vision = create_autospec(VisionAPI, instance=True, spec_set=True) + + wf_versions = create_autospec(WorkflowVersionAPI, instance=True, spec_set=True) + wf_executions = create_autospec(WorkflowExecutionAPI, instance=True, spec_set=True) + wf_tasks = create_autospec(WorkflowTaskAPI, instance=True, spec_set=True) + wf_triggers = create_autospec(WorkflowTriggerAPI, instance=True, spec_set=True) + self.workflows = create_autospec( + WorkflowAPI, + instance=True, + versions=wf_versions, + executions=wf_executions, + tasks=wf_tasks, + triggers=wf_triggers, + ) + flip_spec_set_on(self.workflows) + + units_systems = create_autospec(UnitSystemAPI, instance=True, spec_set=True) + self.units = create_autospec(UnitAPI, instance=True, systems=units_systems) + flip_spec_set_on(self.units) class CogniteClientMock(MagicMock): From 6f40d0db02e55631e8478b6a6b4a9e04fbc48071 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Tue, 14 Oct 2025 10:36:14 +0200 Subject: [PATCH 04/12] properly fix spec_set=True in CogniteClientMock --- cognite/client/testing.py | 298 ++++++++++++++++++++++++-------------- 1 file changed, 190 insertions(+), 108 deletions(-) diff --git a/cognite/client/testing.py b/cognite/client/testing.py index 347affdedf..fdf1f164db 100644 --- a/cognite/client/testing.py +++ b/cognite/client/testing.py @@ -130,6 +130,7 @@ from cognite.client._sync_api.iam.sessions import SyncSessionsAPI from cognite.client._sync_api.iam.token import SyncTokenAPI from cognite.client._sync_api.labels import SyncLabelsAPI +from cognite.client._sync_api.org_apis.principals import SyncPrincipalsAPI from cognite.client._sync_api.postgres_gateway import SyncPostgresGatewaysAPI from cognite.client._sync_api.postgres_gateway.tables import SyncTablesAPI as SyncPostgresTablesAPI from cognite.client._sync_api.postgres_gateway.users import SyncUsersAPI as SyncPostgresUsersAPI @@ -385,7 +386,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: flip_spec_set_on(self.units) -class CogniteClientMock(MagicMock): +class CogniteClientMock(MagicMock, metaclass=_SpecSetEnforcer): """Mock for CogniteClient object All APIs are replaced with specced MagicMock objects. @@ -395,113 +396,194 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(spec=CogniteClient, *args, **kwargs) # Developer note: # - Please add your mocked APIs in chronological order - # - For nested APIs: - # - Add spacing above and below - # - Use `spec=MyAPI` only for "top level" - # - Use `spec_set=MyNestedAPI` for all nested APIs - self.ai = MagicMock(spec=SyncAIAPI) - self.ai.tools = MagicMock(spec=SyncAIToolsAPI) - self.ai.tools.documents = MagicMock(spec_set=SyncAIDocumentsAPI) - - self.agents = MagicMock(spec_set=SyncAgentsAPI) - self.annotations = MagicMock(spec_set=SyncAnnotationsAPI) - self.assets = MagicMock(spec_set=SyncAssetsAPI) - - self.data_modeling = MagicMock(spec=SyncDataModelingAPI) - self.data_modeling.containers = MagicMock(spec_set=SyncContainersAPI) - self.data_modeling.data_models = MagicMock(spec_set=SyncDataModelsAPI) - self.data_modeling.spaces = MagicMock(spec_set=SyncSpacesAPI) - self.data_modeling.views = MagicMock(spec_set=SyncViewsAPI) - self.data_modeling.instances = MagicMock(spec_set=SyncInstancesAPI) - self.data_modeling.graphql = MagicMock(spec_set=SyncDataModelingGraphQLAPI) - self.data_modeling.statistics = MagicMock(spec=SyncStatisticsAPI) - self.data_modeling.statistics.spaces = MagicMock(spec_set=SyncSpaceStatisticsAPI) - - self.data_sets = MagicMock(spec_set=SyncDataSetsAPI) - - self.diagrams = MagicMock(spec_set=SyncDiagramsAPI) - self.documents = MagicMock(spec=SyncDocumentsAPI) - self.documents.previews = MagicMock(spec_set=SyncDocumentPreviewAPI) - self.entity_matching = MagicMock(spec_set=SyncEntityMatchingAPI) - self.events = MagicMock(spec_set=SyncEventsAPI) - - self.extraction_pipelines = MagicMock(spec=SyncExtractionPipelinesAPI) - self.extraction_pipelines.config = MagicMock(spec_set=SyncExtractionPipelineConfigsAPI) - self.extraction_pipelines.runs = MagicMock(spec_set=SyncExtractionPipelineRunsAPI) - - self.files = MagicMock(spec_set=SyncFilesAPI) - - self.functions = MagicMock(spec=SyncFunctionsAPI) - self.functions.calls = MagicMock(spec_set=SyncFunctionCallsAPI) - self.functions.schedules = MagicMock(spec_set=SyncFunctionSchedulesAPI) - - self.geospatial = MagicMock(spec_set=SyncGeospatialAPI) - - self.iam = MagicMock(spec=SyncIAMAPI) - self.iam.groups = MagicMock(spec_set=SyncGroupsAPI) - self.iam.security_categories = MagicMock(spec_set=SyncSecurityCategoriesAPI) - self.iam.sessions = MagicMock(spec_set=SyncSessionsAPI) - self.iam.user_profiles = MagicMock(spec_set=SyncUserProfilesAPI) - self.iam.token = MagicMock(spec_set=SyncTokenAPI) - - self.labels = MagicMock(spec_set=SyncLabelsAPI) - - self.raw = MagicMock(spec=SyncRawAPI) - self.raw.databases = MagicMock(spec_set=SyncRawDatabasesAPI) - self.raw.rows = MagicMock(spec_set=SyncRawRowsAPI) - self.raw.tables = MagicMock(spec_set=SyncRawTablesAPI) - - self.relationships = MagicMock(spec_set=SyncRelationshipsAPI) - - self.simulators = MagicMock(spec=SyncSimulatorsAPI) - self.simulators.integrations = MagicMock(spec_set=SyncSimulatorIntegrationsAPI) - self.simulators.models = MagicMock(spec=SyncSimulatorModelsAPI) - self.simulators.models.revisions = MagicMock(spec_set=SyncSimulatorModelRevisionsAPI) - self.simulators.runs = MagicMock(spec_set=SyncSimulatorRunsAPI) - self.simulators.routines = MagicMock(spec=SyncSimulatorRoutinesAPI) - self.simulators.routines.revisions = MagicMock(spec_set=SyncSimulatorRoutineRevisionsAPI) - self.simulators.logs = MagicMock(spec_set=SyncSimulatorLogsAPI) - - self.sequences = MagicMock(spec=SyncSequencesAPI) - self.sequences.data = MagicMock(spec_set=SyncSequencesDataAPI) - - self.hosted_extractors = MagicMock(spec=SyncHostedExtractorsAPI) - self.hosted_extractors.sources = MagicMock(spec_set=SyncSourcesAPI) - self.hosted_extractors.destinations = MagicMock(spec_set=SyncDestinationsAPI) - self.hosted_extractors.jobs = MagicMock(spec_set=SyncJobsAPI) - self.hosted_extractors.mappings = MagicMock(spec_set=SyncMappingsAPI) - - self.postgres_gateway = MagicMock(spec=SyncPostgresGatewaysAPI) - self.postgres_gateway.users = MagicMock(spec_set=SyncPostgresUsersAPI) - self.postgres_gateway.tables = MagicMock(spec_set=SyncPostgresTablesAPI) - - self.three_d = MagicMock(spec=Sync3DAPI) - self.three_d.asset_mappings = MagicMock(spec_set=Sync3DAssetMappingAPI) - self.three_d.files = MagicMock(spec_set=Sync3DFilesAPI) - self.three_d.models = MagicMock(spec_set=Sync3DModelsAPI) - self.three_d.revisions = MagicMock(spec_set=Sync3DRevisionsAPI) - - self.time_series = MagicMock(spec=SyncTimeSeriesAPI) - self.time_series.data = MagicMock(spec=SyncDatapointsAPI) - self.time_series.data.synthetic = MagicMock(spec_set=SyncSyntheticDatapointsAPI) - self.time_series.subscriptions = MagicMock(spec_set=SyncDatapointsSubscriptionAPI) - - self.transformations = MagicMock(spec=SyncTransformationsAPI) - self.transformations.jobs = MagicMock(spec_set=SyncTransformationJobsAPI) - self.transformations.notifications = MagicMock(spec_set=SyncTransformationNotificationsAPI) - self.transformations.schedules = MagicMock(spec_set=SyncTransformationSchedulesAPI) - self.transformations.schema = MagicMock(spec_set=SyncTransformationSchemaAPI) - - self.vision = MagicMock(spec_set=SyncVisionAPI) - - self.workflows = MagicMock(spec=SyncWorkflowAPI) - self.workflows.versions = MagicMock(spec_set=SyncWorkflowVersionAPI) - self.workflows.executions = MagicMock(spec_set=SyncWorkflowExecutionAPI) - self.workflows.tasks = MagicMock(spec_set=SyncWorkflowTaskAPI) - self.workflows.triggers = MagicMock(spec_set=SyncWorkflowTriggerAPI) - - self.units = MagicMock(spec=SyncUnitAPI) - self.units.systems = MagicMock(spec_set=SyncUnitSystemAPI) + # - Use create_autospec with instance=True for better type safety and accurate mocking. + # For simple APIs, also pass spec_set=True to block arbitrary assignments. + # - Build composite APIs bottom-up (you can compose by passing kwargs to create_autospec + # as long as you don't pass spec_set=True). + # - Use flip_spec_set_on afterwards for proper spec enforcement on composite APIs + # (- Now repeat for AsyncCogniteClientMock) + + ai_tools_documents = create_autospec(SyncAIDocumentsAPI, instance=True, spec_set=True) + ai_tools = create_autospec(SyncAIToolsAPI, instance=True, documents=ai_tools_documents) + self.ai = create_autospec(SyncAIAPI, instance=True, tools=ai_tools) + flip_spec_set_on(self.ai, ai_tools) + + self.agents = create_autospec(SyncAgentsAPI, instance=True, spec_set=True) + self.annotations = create_autospec(SyncAnnotationsAPI, instance=True, spec_set=True) + self.assets = create_autospec(SyncAssetsAPI, instance=True, spec_set=True) + + dm_space_statistics = create_autospec(SyncSpaceStatisticsAPI, instance=True, spec_set=True) + dm_statistics = create_autospec(SyncStatisticsAPI, instance=True, spaces=dm_space_statistics) + dm_containers = create_autospec(SyncContainersAPI, instance=True, spec_set=True) + dm_data_models = create_autospec(SyncDataModelsAPI, instance=True, spec_set=True) + dm_spaces = create_autospec(SyncSpacesAPI, instance=True, spec_set=True) + dm_views = create_autospec(SyncViewsAPI, instance=True, spec_set=True) + dm_instances = create_autospec(SyncInstancesAPI, instance=True, spec_set=True) + dm_graphql = create_autospec(SyncDataModelingGraphQLAPI, instance=True, spec_set=True) + self.data_modeling = create_autospec( + SyncDataModelingAPI, + instance=True, + containers=dm_containers, + data_models=dm_data_models, + spaces=dm_spaces, + views=dm_views, + instances=dm_instances, + graphql=dm_graphql, + statistics=dm_statistics, + ) + flip_spec_set_on(self.data_modeling, dm_statistics) + + self.data_sets = create_autospec(SyncDataSetsAPI, instance=True, spec_set=True) + + self.diagrams = create_autospec(SyncDiagramsAPI, instance=True, spec_set=True) + documents_previews = create_autospec(SyncDocumentPreviewAPI, instance=True, spec_set=True) + self.documents = create_autospec(SyncDocumentsAPI, instance=True, previews=documents_previews) + self.entity_matching = create_autospec(SyncEntityMatchingAPI, instance=True, spec_set=True) + self.events = create_autospec(SyncEventsAPI, instance=True, spec_set=True) + flip_spec_set_on(self.documents) + + extpipes_config = create_autospec(SyncExtractionPipelineConfigsAPI, instance=True, spec_set=True) + extpipes_runs = create_autospec(SyncExtractionPipelineRunsAPI, instance=True, spec_set=True) + self.extraction_pipelines = create_autospec( + SyncExtractionPipelinesAPI, instance=True, config=extpipes_config, runs=extpipes_runs + ) + flip_spec_set_on(self.extraction_pipelines) + + self.files = create_autospec(SyncFilesAPI, instance=True, spec_set=True) + + fns_calls = create_autospec(SyncFunctionCallsAPI, instance=True, spec_set=True) + fns_schedules = create_autospec(SyncFunctionSchedulesAPI, instance=True, spec_set=True) + self.functions = create_autospec(SyncFunctionsAPI, instance=True, calls=fns_calls, schedules=fns_schedules) + flip_spec_set_on(self.functions) + + self.geospatial = create_autospec(SyncGeospatialAPI, instance=True, spec_set=True) + + iam_groups = create_autospec(SyncGroupsAPI, instance=True, spec_set=True) + iam_security_categories = create_autospec(SyncSecurityCategoriesAPI, instance=True, spec_set=True) + iam_sessions = create_autospec(SyncSessionsAPI, instance=True, spec_set=True) + iam_principals = create_autospec(SyncPrincipalsAPI, instance=True, spec_set=True) + iam_user_profiles = create_autospec(SyncUserProfilesAPI, instance=True, spec_set=True) + iam_token = create_autospec(SyncTokenAPI, instance=True, spec_set=True) + self.iam = create_autospec( + SyncIAMAPI, + instance=True, + groups=iam_groups, + security_categories=iam_security_categories, + sessions=iam_sessions, + principals=iam_principals, + user_profiles=iam_user_profiles, + token=iam_token, + ) + flip_spec_set_on(self.iam) + + self.labels = create_autospec(SyncLabelsAPI, instance=True, spec_set=True) + + raw_databases = create_autospec(SyncRawDatabasesAPI, instance=True, spec_set=True) + raw_rows = create_autospec(SyncRawRowsAPI, instance=True, spec_set=True) + raw_tables = create_autospec(SyncRawTablesAPI, instance=True, spec_set=True) + self.raw = create_autospec(SyncRawAPI, instance=True, databases=raw_databases, rows=raw_rows, tables=raw_tables) + flip_spec_set_on(self.raw) + + self.relationships = create_autospec(SyncRelationshipsAPI, instance=True, spec_set=True) + + sim_integrations = create_autospec(SyncSimulatorIntegrationsAPI, instance=True, spec_set=True) + sim_models_revisions = create_autospec(SyncSimulatorModelRevisionsAPI, instance=True, spec_set=True) + sim_models = create_autospec(SyncSimulatorModelsAPI, instance=True, revisions=sim_models_revisions) + sim_runs = create_autospec(SyncSimulatorRunsAPI, instance=True, spec_set=True) + sim_routines_revisions = create_autospec(SyncSimulatorRoutineRevisionsAPI, instance=True, spec_set=True) + sim_routines = create_autospec(SyncSimulatorRoutinesAPI, instance=True, revisions=sim_routines_revisions) + sim_logs = create_autospec(SyncSimulatorLogsAPI, instance=True, spec_set=True) + self.simulators = create_autospec( + SyncSimulatorsAPI, + instance=True, + integrations=sim_integrations, + models=sim_models, + runs=sim_runs, + routines=sim_routines, + logs=sim_logs, + ) + flip_spec_set_on(self.simulators, sim_models) + + sequences_data = create_autospec(SyncSequencesDataAPI, instance=True, spec_set=True) + self.sequences = create_autospec(SyncSequencesAPI, instance=True, data=sequences_data) + flip_spec_set_on(self.sequences) + + ho_ex_sources = create_autospec(SyncSourcesAPI, instance=True, spec_set=True) + ho_ex_destinations = create_autospec(SyncDestinationsAPI, instance=True, spec_set=True) + ho_ex_jobs = create_autospec(SyncJobsAPI, instance=True, spec_set=True) + ho_ex_mappings = create_autospec(SyncMappingsAPI, instance=True, spec_set=True) + self.hosted_extractors = create_autospec( + SyncHostedExtractorsAPI, + instance=True, + sources=ho_ex_sources, + destinations=ho_ex_destinations, + jobs=ho_ex_jobs, + mappings=ho_ex_mappings, + ) + flip_spec_set_on(self.hosted_extractors) + + pg_gw_users = create_autospec(SyncPostgresUsersAPI, instance=True, spec_set=True) + pg_gw_tables = create_autospec(SyncPostgresTablesAPI, instance=True, spec_set=True) + self.postgres_gateway = create_autospec( + SyncPostgresGatewaysAPI, instance=True, users=pg_gw_users, tables=pg_gw_tables + ) + flip_spec_set_on(self.postgres_gateway) + + three_d_asset_mappings = create_autospec(Sync3DAssetMappingAPI, instance=True, spec_set=True) + three_d_files = create_autospec(Sync3DFilesAPI, instance=True, spec_set=True) + three_d_models = create_autospec(Sync3DModelsAPI, instance=True, spec_set=True) + three_d_revisions = create_autospec(Sync3DRevisionsAPI, instance=True, spec_set=True) + self.three_d = create_autospec( + Sync3DAPI, + instance=True, + asset_mappings=three_d_asset_mappings, + files=three_d_files, + models=three_d_models, + revisions=three_d_revisions, + ) + flip_spec_set_on(self.three_d) + + ts_synthetic = create_autospec(SyncSyntheticDatapointsAPI, instance=True, spec_set=True) + ts_data = create_autospec(SyncDatapointsAPI, instance=True, synthetic=ts_synthetic) + ts_subscriptions = create_autospec(SyncDatapointsSubscriptionAPI, instance=True, spec_set=True) + self.time_series = create_autospec( + SyncTimeSeriesAPI, instance=True, data=ts_data, subscriptions=ts_subscriptions + ) + flip_spec_set_on(self.time_series, ts_data) + + tr_jobs = create_autospec(SyncTransformationJobsAPI, instance=True, spec_set=True) + tr_notifications = create_autospec(SyncTransformationNotificationsAPI, instance=True, spec_set=True) + tr_schedules = create_autospec(SyncTransformationSchedulesAPI, instance=True, spec_set=True) + tr_schema = create_autospec(SyncTransformationSchemaAPI, instance=True, spec_set=True) + self.transformations = create_autospec( + SyncTransformationsAPI, + instance=True, + jobs=tr_jobs, + notifications=tr_notifications, + schedules=tr_schedules, + schema=tr_schema, + ) + flip_spec_set_on(self.transformations) + + self.vision = create_autospec(SyncVisionAPI, instance=True, spec_set=True) + + wf_versions = create_autospec(SyncWorkflowVersionAPI, instance=True, spec_set=True) + wf_executions = create_autospec(SyncWorkflowExecutionAPI, instance=True, spec_set=True) + wf_tasks = create_autospec(SyncWorkflowTaskAPI, instance=True, spec_set=True) + wf_triggers = create_autospec(SyncWorkflowTriggerAPI, instance=True, spec_set=True) + self.workflows = create_autospec( + SyncWorkflowAPI, + instance=True, + versions=wf_versions, + executions=wf_executions, + tasks=wf_tasks, + triggers=wf_triggers, + ) + flip_spec_set_on(self.workflows) + + units_systems = create_autospec(SyncUnitSystemAPI, instance=True, spec_set=True) + self.units = create_autospec(SyncUnitAPI, instance=True, systems=units_systems) + flip_spec_set_on(self.units) @contextmanager From d64d14732a322814e462b9bde32ace5757826e7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Tue, 14 Oct 2025 10:37:46 +0200 Subject: [PATCH 05/12] change monkeypatch client fns to use patch instead of hacking __new__ --- cognite/client/testing.py | 82 +++++++++++++++++++++++++++++++-------- 1 file changed, 66 insertions(+), 16 deletions(-) diff --git a/cognite/client/testing.py b/cognite/client/testing.py index fdf1f164db..7d32eb7c32 100644 --- a/cognite/client/testing.py +++ b/cognite/client/testing.py @@ -3,7 +3,7 @@ from collections.abc import Iterator from contextlib import contextmanager from typing import Any -from unittest.mock import MagicMock, create_autospec +from unittest.mock import MagicMock, create_autospec, patch from cognite.client import AsyncCogniteClient, CogniteClient from cognite.client._api.agents import AgentsAPI @@ -587,29 +587,28 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: @contextmanager -def monkeypatch_cognite_client() -> Iterator[AsyncCogniteClientMock]: - """Context manager for monkeypatching the AsyncCogniteClient. +def monkeypatch_cognite_client() -> Iterator[CogniteClientMock]: + """Context manager for monkeypatching the CogniteClient. Will patch all clients and replace them with specced MagicMock objects. Yields: - AsyncCogniteClientMock: The mock with which the AsyncCogniteClient has been replaced + CogniteClientMock: The mock with which the CogniteClient has been replaced Examples: - In this example we can run the following code without actually executing the underlying API calls:: + In this example we can run the following code without actually executing the underlying API calls: >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes import TimeSeriesWrite >>> from cognite.client.testing import monkeypatch_cognite_client >>> >>> with monkeypatch_cognite_client(): - >>> client = AsyncCogniteClient() + >>> client = CogniteClient() >>> client.time_series.create(TimeSeriesWrite(external_id="blabla")) - This example shows how to set the return value of a given method:: + This example shows how to set the return value of a given method: - >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes.iam import TokenInspection >>> from cognite.client.testing import monkeypatch_cognite_client >>> @@ -617,26 +616,77 @@ def monkeypatch_cognite_client() -> Iterator[AsyncCogniteClientMock]: >>> c_mock.iam.token.inspect.return_value = TokenInspection( >>> subject="subject", projects=[], capabilities=[] >>> ) - >>> client = AsyncCogniteClient() + >>> # Init. a new client yields the same mocked client: + >>> client = CogniteClient() >>> res = client.iam.token.inspect() >>> assert "subject" == res.subject - Here you can see how to have a given method raise an exception:: + Here you can see how to have a given method raise an exception: - >>> from cognite.client import CogniteClient >>> from cognite.client.exceptions import CogniteAPIError >>> from cognite.client.testing import monkeypatch_cognite_client >>> >>> with monkeypatch_cognite_client() as c_mock: >>> c_mock.iam.token.inspect.side_effect = CogniteAPIError(message="Something went wrong", code=400) + >>> try: + >>> res = c_mock.iam.token.inspect() + >>> except CogniteAPIError as e: + >>> assert 400 == e.code + >>> assert "Something went wrong" == e.message + """ + mock = CogniteClientMock() + with patch("cognite.client.CogniteClient", return_value=mock): + yield mock + + +@contextmanager +def monkeypatch_async_cognite_client() -> Iterator[AsyncCogniteClientMock]: + """Context manager for monkeypatching the AsyncCogniteClient. + + Will patch all clients and replace them with specced MagicMock objects. + + Yields: + AsyncCogniteClientMock: The mock with which the AsyncCogniteClient has been replaced + + Examples: + + In this example we can run the following code without actually executing the underlying API calls: + + >>> from cognite.client import AsyncCogniteClient + >>> from cognite.client.data_classes import TimeSeriesWrite + >>> from cognite.client.testing import monkeypatch_async_cognite_client + >>> + >>> with monkeypatch_async_cognite_client(): + >>> client = AsyncCogniteClient() + >>> await client.time_series.create(TimeSeriesWrite(external_id="blabla")) + + This example shows how to set the return value of a given method: + + >>> from cognite.client.data_classes.iam import TokenInspection + >>> from cognite.client.testing import monkeypatch_async_cognite_client + >>> + >>> with monkeypatch_async_cognite_client() as c_mock: + >>> c_mock.iam.token.inspect.return_value = TokenInspection( + >>> subject="subject", projects=[], capabilities=[] + >>> ) + >>> # Init. a new client yields the same mocked client: >>> client = AsyncCogniteClient() + >>> res = await client.iam.token.inspect() + >>> assert "subject" == res.subject + + Here you can see how to have a given method raise an exception: + + >>> from cognite.client.exceptions import CogniteAPIError + >>> from cognite.client.testing import monkeypatch_async_cognite_client + >>> + >>> with monkeypatch_async_cognite_client() as c_mock: + >>> c_mock.iam.token.inspect.side_effect = CogniteAPIError(message="Something went wrong", code=400) >>> try: - >>> res = client.iam.token.inspect() + >>> res = await c_mock.iam.token.inspect() >>> except CogniteAPIError as e: >>> assert 400 == e.code >>> assert "Something went wrong" == e.message """ - cognite_client_mock = AsyncCogniteClientMock() - AsyncCogniteClient.__new__ = lambda *args, **kwargs: cognite_client_mock # type: ignore[method-assign] - yield cognite_client_mock - AsyncCogniteClient.__new__ = lambda cls, *args, **kwargs: object.__new__(cls) # type: ignore[method-assign] + mock = AsyncCogniteClientMock() + with patch("cognite.client.AsyncCogniteClient", return_value=mock): + yield mock From a58f625f393f49178eaf29f222a8d323e0796538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Tue, 14 Oct 2025 10:41:54 +0200 Subject: [PATCH 06/12] update mock tests after refactoring --- tests/tests_unit/test_testing.py | 94 +++++++++++++++++++++----------- tests/utils.py | 25 +++++++-- 2 files changed, 82 insertions(+), 37 deletions(-) diff --git a/tests/tests_unit/test_testing.py b/tests/tests_unit/test_testing.py index 005f91e10b..f218af494a 100644 --- a/tests/tests_unit/test_testing.py +++ b/tests/tests_unit/test_testing.py @@ -2,30 +2,53 @@ import pytest -from cognite.client import ClientConfig, CogniteClient +from cognite.client import AsyncCogniteClient, ClientConfig, CogniteClient from cognite.client._api_client import APIClient -from cognite.client._org_client import OrgAPIClient from cognite.client.credentials import Token -from cognite.client.testing import CogniteClientMock, monkeypatch_cognite_client -from tests.utils import all_mock_children, all_subclasses, get_api_class_by_attribute +from cognite.client.testing import AsyncCogniteClientMock, CogniteClientMock, monkeypatch_cognite_client +from cognite.client.utils._auxiliary import all_concrete_subclasses +from tests.utils import all_mock_children, get_api_class_by_attribute -def test_ensure_all_apis_are_available_on_cognite_mock() -> None: - mocked_apis = all_mock_children(CogniteClientMock()) - available = {v.__class__ for v in mocked_apis.values()} - # OrgAPIClient is a base API and should not mocked directly. - # TODO: what about all_concrete_subclasses? - expected = set(all_subclasses(APIClient, exclude={OrgAPIClient})) +@pytest.fixture(scope="module") +def all_sync_client_mock_children() -> dict[str, tuple[type[APIClient], MagicMock]]: + # This is a slooow call, so we reuse the results in this module + return all_mock_children(CogniteClientMock()) + + +@pytest.fixture(scope="module") +def all_async_client_mock_children() -> dict[str, tuple[type[APIClient], MagicMock]]: + # This is a slooow call, so we reuse the results in this module + return all_mock_children(AsyncCogniteClientMock()) + + +@pytest.fixture(scope="module") +def mock_spec_cls_lookup( + all_async_client_mock_children: dict[str, tuple[type[APIClient], MagicMock]], +) -> dict[str, type[APIClient]]: + return {k: v for k, (v, _) in all_async_client_mock_children.items()} + + +@pytest.fixture(scope="module") +def mocked_api_lookup( + all_async_client_mock_children: dict[str, tuple[type[APIClient], MagicMock]], +) -> dict[str, MagicMock]: + return {k: v for k, (_, v) in all_async_client_mock_children.items()} + + +def test_ensure_all_apis_are_available_on_cognite_mock(mock_spec_cls_lookup: dict[str, type[APIClient]]) -> None: + available = set(mock_spec_cls_lookup.values()) + expected = set(all_concrete_subclasses(APIClient)) # Any new APIs that have not been added to CogniteClientMock? assert not expected.difference(available), f"Missing APIs: {expected.difference(available)}" # Any removed APIs that are still available on CogniteClientMock? assert not available.difference(expected), f"Removed APIs: {available.difference(expected)}" -def test_ensure_all_apis_use_equal_attr_paths_on_cognite_mock() -> None: - client = CogniteClient(ClientConfig(client_name="a", project="b", credentials="c")) # type: ignore[arg-type] - available_apis = {(attr, api_cls) for attr, api_cls in get_api_class_by_attribute(client).items()} - mocked_apis = {(attr, api.__class__) for attr, api in all_mock_children(CogniteClientMock()).items()} +def test_ensure_all_apis_use_equal_attr_paths_on_cognite_mock(mock_spec_cls_lookup: dict[str, type[APIClient]]) -> None: + client = AsyncCogniteClient(ClientConfig(client_name="a", project="b", credentials="c")) # type: ignore[arg-type] + available_apis = set(get_api_class_by_attribute(client).items()) + mocked_apis = set(mock_spec_cls_lookup.items()) missing_apis = available_apis.difference(mocked_apis) assert not missing_apis, f"Missing APIs: {missing_apis}" @@ -34,28 +57,37 @@ def test_ensure_all_apis_use_equal_attr_paths_on_cognite_mock() -> None: assert not extra_apis, f"Extra APIs: {extra_apis}" -@pytest.mark.parametrize("api", list(all_mock_children(CogniteClientMock()).values())) -def test_ensure_all_apis_are_specced_on_cognite_mock(api: MagicMock) -> None: - # All APIs raise when trying to access a non-existing attribute: - with pytest.raises(AttributeError): - api.does_not_exist +def test_ensure_both_cognite_client_mocks_are_in_sync( # pun intended + mock_spec_cls_lookup: dict[str, type[APIClient]], + all_sync_client_mock_children: dict[str, tuple[type[APIClient], MagicMock]], +) -> None: + sync_client_apis = { + # A bit magical this, but how we translate async API class names to sync API class names is by prefixing with "Sync" and + # replacing "3D" with "ThreeD". lol, worth it: + k: v.__name__.replace("Sync", "").replace("3D", "ThreeD") + for k, (v, _) in all_sync_client_mock_children.items() + } + async_client_apis = {k: v.__name__ for k, v in mock_spec_cls_lookup.items()} + + assert sync_client_apis == async_client_apis, "Sync and Async mock spec class lookups are not equal" - # ...but only APIs that do not contain other APIs have spec_set=True. - if api._spec_set is True: - assert not api._mock_children + +def test_ensure_all_apis_are_specced_on_cognite_mock(mocked_api_lookup: dict[str, MagicMock]) -> None: + for dotted_path, mock_api in mocked_api_lookup.items(): + # All APIs should raise when trying to access a non-existing attribute: + with pytest.raises(AttributeError): + mock_api.does_not_exist + # ...or set a non-existing attribute: with pytest.raises(AttributeError): - api.does_not_exist = 42 - else: - assert api._mock_children - api.does_not_exist = 42 + mock_api.does_not_exist = 123 + + # This will never trigger (above lines will), but this is actually what we ensure: + assert mock_api._spec_set is True, f"API {dotted_path} does not have _spec_set=True" def test_cognite_client_accepts_arguments_during_and_after_mock() -> None: + # This test was here to ensure the old style ".__new__" override didn't fail after + # reverting as that would break object.__new__ by passing more than the first arg. with monkeypatch_cognite_client(): CogniteClient(ClientConfig(client_name="bla", project="bla", credentials=Token("bla"))) CogniteClient(ClientConfig(client_name="bla", project="bla", credentials=Token("bla"))) - - -def test_client_mock_can_access_attributes_not_explicitly_defined_on_children() -> None: - c_mock = CogniteClientMock() - assert c_mock.config.max_workers diff --git a/tests/utils.py b/tests/utils.py index a6ab3b39e0..60875c2539 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -25,6 +25,7 @@ from cognite.client import CogniteClient from cognite.client._api_client import APIClient from cognite.client._constants import MAX_VALID_INTERNAL_ID +from cognite.client._sync_api_client import SyncAPIClient from cognite.client.data_classes import ( DataPointSubscriptionWrite, EndTimeFilter, @@ -111,12 +112,24 @@ def all_concrete_subclasses(base: T_Type, exclude: set[type] | None = None) -> l ] -def all_mock_children(mock: MagicMock, parent_name: tuple[str, ...] = ()) -> dict[str, MagicMock]: - """Returns a dictionary with correct dotted names mapping to mocked classes.""" - dct = {".".join((*parent_name, k)): v for k, v in mock._mock_children.items()} - for name, child in dct.copy().items(): - dct.update(all_mock_children(child, parent_name=(name,))) - return dct +def all_mock_children(mock_obj: MagicMock, parent_path: str = "") -> dict[str, tuple[type[APIClient], MagicMock]]: + """Returns a dictionary with correct dotted names mapping to mocked APIClient classes.""" + api_mocks = {} + for attr in dir(mock_obj): + if attr.startswith("_"): + continue + + api_mock = getattr(mock_obj, attr) + spec_class = getattr(api_mock, "_spec_class", None) + current_path = f"{parent_path}.{attr}" if parent_path else attr + + if spec_class is not None and issubclass(spec_class, APIClient | SyncAPIClient): + api_mocks[current_path] = (spec_class, api_mock) + # Recursively check nested APIs: + nested_apis = all_mock_children(api_mock, current_path) + api_mocks.update(nested_apis) + + return api_mocks def get_api_class_by_attribute(cls_: object, parent_name: tuple[str, ...] = ()) -> dict[str, type[APIClient]]: From 6767b3c55226a473feb702fee503c1b14c5c09db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Mon, 13 Oct 2025 15:10:00 +0200 Subject: [PATCH 07/12] refactor BasicAsyncAPIClient._request to log but re-raise --- cognite/client/_basic_api_client.py | 34 +++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/cognite/client/_basic_api_client.py b/cognite/client/_basic_api_client.py index 81f2ec0faa..ea919f3e43 100644 --- a/cognite/client/_basic_api_client.py +++ b/cognite/client/_basic_api_client.py @@ -213,13 +213,31 @@ async def _request( method: Literal["GET", "PUT", "HEAD"], /, full_url: str, - content: str | bytes | Iterable[bytes] | None = None, + content: bytes | AsyncIterator[bytes] | None = None, headers: dict[str, Any] | None = None, timeout: float | None = None, - api_subversion: str | None = None, include_cdf_headers: bool = False, + api_subversion: str | None = None, ) -> httpx.Response: - """Make a request to something that is outside Cognite Data Fusion""" + """ + Make a request to something that is outside Cognite Data Fusion, with retry enabled. + Requires the caller to handle errors coming from non-2xx response status codes. + + Args: + method (Literal['GET', 'PUT', 'HEAD']): HTTP method. + full_url (str): Full URL to make the request to. + content (bytes | AsyncIterator[bytes] | None): Optional body content to send along with the request. + headers (dict[str, Any] | None): Optional headers to include in the request. + timeout (float | None): Override the default timeout for this request. + include_cdf_headers (bool): Whether to include Cognite Data Fusion headers in the request. Defaults to False. + api_subversion (str | None): When include_cdf_headers=True, override the API subversion to use for the request. Has no effect otherwise. + + Returns: + httpx.Response: The response from the server. + + Raises: + httpx.HTTPStatusError: If the response status code is 4xx or 5xx. + """ client = self._select_async_http_client(method in {"GET", "PUT", "HEAD"}) if include_cdf_headers: headers = self._configure_headers(additional_headers=headers, api_subversion=api_subversion) @@ -227,11 +245,13 @@ async def _request( res = await client( method, full_url, content=content, headers=headers, timeout=timeout or self._config.timeout ) - except httpx.HTTPStatusError as err: - await self._handle_status_error(err) + self._log_successful_request(res) + return res - self._log_successful_request(res) - return res + except httpx.HTTPStatusError as err: + handler = await FailedRequestHandler.from_status_error(err, stream=False) + handler.log_failed_request() + raise @asynccontextmanager async def _stream( From 2b44c697503a42d0627fe61175d94a884cbd1403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Mon, 13 Oct 2025 15:11:10 +0200 Subject: [PATCH 08/12] fix: missing payload from on-error-logging/raising --- cognite/client/_basic_api_client.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cognite/client/_basic_api_client.py b/cognite/client/_basic_api_client.py index ea919f3e43..102013045a 100644 --- a/cognite/client/_basic_api_client.py +++ b/cognite/client/_basic_api_client.py @@ -261,14 +261,14 @@ async def _stream( *, url_path: str | None = None, full_url: str | None = None, - json: Any = None, + json: dict[str, Any] | None = None, headers: dict[str, Any] | None = None, full_headers: dict[str, Any] | None = None, timeout: float | None = None, api_subversion: str | None = None, ) -> AsyncIterator[httpx.Response]: assert url_path or full_url, "Either url_path or full_url must be provided" - full_url = full_url or resolve_url(self, "GET", cast(str, url_path))[1] + full_url = full_url or resolve_url(self, method, cast(str, url_path))[1] if full_headers is None: full_headers = self._configure_headers(headers, api_subversion) @@ -282,7 +282,7 @@ async def _stream( yield resp except httpx.HTTPStatusError as err: - await self._handle_status_error(err, stream=True) + await self._handle_status_error(err, payload=json, stream=True) async def _get( self, @@ -339,7 +339,7 @@ async def _post( semaphore=semaphore, ) except httpx.HTTPStatusError as err: - await self._handle_status_error(err) + await self._handle_status_error(err, payload=json) self._log_successful_request(res, payload=json) return res @@ -347,7 +347,7 @@ async def _post( async def _put( self, url_path: str, - content: str | bytes | Iterable[bytes] | None = None, + content: str | bytes | AsyncIterator[bytes] | None = None, json: dict[str, Any] | None = None, params: dict[str, Any] | None = None, headers: dict[str, Any] | None = None, @@ -357,10 +357,10 @@ async def _put( semaphore: asyncio.BoundedSemaphore | None = None, ) -> httpx.Response: _, full_url = resolve_url(self, "PUT", url_path) + full_headers = self._configure_headers(additional_headers=headers, api_subversion=api_subversion) if content is None: content = self._handle_json_dump(json, full_headers) - try: res = await self._http_client_with_retry( "PUT", @@ -373,7 +373,7 @@ async def _put( semaphore=semaphore, ) except httpx.HTTPStatusError as err: - await self._handle_status_error(err) + await self._handle_status_error(err, payload=json) self._log_successful_request(res, payload=json) return res @@ -401,7 +401,7 @@ def _refresh_auth_header(self, headers: MutableMapping[str, Any]) -> None: headers[auth_header_name] = auth_header_value async def _handle_status_error( - self, error: httpx.HTTPStatusError, payload: dict | None = None, stream: bool = False + self, error: httpx.HTTPStatusError, payload: dict[str, Any] | None = None, stream: bool = False ) -> NoReturn: """The response had an HTTP status code of 4xx or 5xx""" handler = await FailedRequestHandler.from_status_error(error, stream=stream) From 13b8a1b1168bfb30eda50985c5e311bf93687b89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Fri, 3 Oct 2025 00:38:05 +0200 Subject: [PATCH 09/12] run script to create all sync APIs + sync CogniteClient --- cognite/client/_cognite_client.py | 14 - cognite/client/_sync_api/__init__.py | 0 cognite/client/_sync_api/agents/__init__.py | 0 cognite/client/_sync_api/agents/agents.py | 327 ++++ cognite/client/_sync_api/ai/__init__.py | 25 + cognite/client/_sync_api/ai/tools/__init__.py | 25 + .../client/_sync_api/ai/tools/documents.py | 141 ++ cognite/client/_sync_api/annotations.py | 185 ++ cognite/client/_sync_api/assets.py | 909 ++++++++++ .../_sync_api/data_modeling/__init__.py | 37 + .../_sync_api/data_modeling/containers.py | 328 ++++ .../_sync_api/data_modeling/data_models.py | 226 +++ .../client/_sync_api/data_modeling/graphql.py | 90 + .../_sync_api/data_modeling/instances.py | 1250 +++++++++++++ .../data_modeling/space_statistics.py | 80 + .../client/_sync_api/data_modeling/spaces.py | 158 ++ .../_sync_api/data_modeling/statistics.py | 50 + .../client/_sync_api/data_modeling/views.py | 263 +++ cognite/client/_sync_api/data_sets.py | 289 +++ cognite/client/_sync_api/datapoints.py | 1559 +++++++++++++++++ .../_sync_api/datapoints_subscriptions.py | 314 ++++ cognite/client/_sync_api/diagrams.py | 217 +++ cognite/client/_sync_api/document_preview.py | 148 ++ cognite/client/_sync_api/documents.py | 466 +++++ cognite/client/_sync_api/entity_matching.py | 318 ++++ cognite/client/_sync_api/events.py | 688 ++++++++ .../_sync_api/extractionpipelines/__init__.py | 237 +++ .../_sync_api/extractionpipelines/configs.py | 118 ++ .../_sync_api/extractionpipelines/runs.py | 126 ++ cognite/client/_sync_api/files.py | 964 ++++++++++ .../client/_sync_api/functions/__init__.py | 448 +++++ cognite/client/_sync_api/functions/calls.py | 174 ++ .../client/_sync_api/functions/schedules.py | 286 +++ cognite/client/_sync_api/geospatial.py | 1014 +++++++++++ .../_sync_api/hosted_extractors/__init__.py | 31 + .../hosted_extractors/destinations.py | 219 +++ .../_sync_api/hosted_extractors/jobs.py | 282 +++ .../_sync_api/hosted_extractors/mappings.py | 201 +++ .../_sync_api/hosted_extractors/sources.py | 210 +++ cognite/client/_sync_api/iam/__init__.py | 85 + cognite/client/_sync_api/iam/groups.py | 140 ++ .../_sync_api/iam/security_categories.py | 100 ++ cognite/client/_sync_api/iam/sessions.py | 109 ++ cognite/client/_sync_api/iam/token.py | 40 + cognite/client/_sync_api/labels.py | 205 +++ .../client/_sync_api/org_apis/principals.py | 116 ++ .../_sync_api/postgres_gateway/__init__.py | 27 + .../_sync_api/postgres_gateway/tables.py | 193 ++ .../_sync_api/postgres_gateway/users.py | 219 +++ cognite/client/_sync_api/raw/__init__.py | 29 + cognite/client/_sync_api/raw/databases.py | 123 ++ cognite/client/_sync_api/raw/rows.py | 343 ++++ cognite/client/_sync_api/raw/tables.py | 128 ++ cognite/client/_sync_api/relationships.py | 416 +++++ cognite/client/_sync_api/sequence_data.py | 326 ++++ cognite/client/_sync_api/sequences.py | 725 ++++++++ .../client/_sync_api/simulators/__init__.py | 81 + .../_sync_api/simulators/integrations.py | 122 ++ cognite/client/_sync_api/simulators/logs.py | 70 + cognite/client/_sync_api/simulators/models.py | 251 +++ .../_sync_api/simulators/models_revisions.py | 231 +++ .../_sync_api/simulators/routine_revisions.py | 309 ++++ .../client/_sync_api/simulators/routines.py | 278 +++ cognite/client/_sync_api/simulators/runs.py | 258 +++ .../client/_sync_api/synthetic_time_series.py | 144 ++ cognite/client/_sync_api/three_d/__init__.py | 31 + .../client/_sync_api/three_d/asset_mapping.py | 155 ++ cognite/client/_sync_api/three_d/files.py | 40 + cognite/client/_sync_api/three_d/models.py | 224 +++ cognite/client/_sync_api/three_d/revisions.py | 343 ++++ cognite/client/_sync_api/time_series.py | 726 ++++++++ .../_sync_api/transformations/__init__.py | 535 ++++++ .../client/_sync_api/transformations/jobs.py | 132 ++ .../transformations/notifications.py | 165 ++ .../_sync_api/transformations/schedules.py | 259 +++ .../_sync_api/transformations/schema.py | 49 + cognite/client/_sync_api/unit_system.py | 38 + cognite/client/_sync_api/units.py | 157 ++ cognite/client/_sync_api/user_profiles.py | 141 ++ cognite/client/_sync_api/vision.py | 82 + .../client/_sync_api/workflows/__init__.py | 171 ++ .../client/_sync_api/workflows/executions.py | 225 +++ cognite/client/_sync_api/workflows/tasks.py | 62 + .../client/_sync_api/workflows/triggers.py | 159 ++ .../client/_sync_api/workflows/versions.py | 234 +++ cognite/client/_sync_api_client.py | 7 + cognite/client/_sync_cognite_client.py | 86 + scripts/sync_client_codegen/main.py | 43 +- 88 files changed, 21511 insertions(+), 38 deletions(-) create mode 100644 cognite/client/_sync_api/__init__.py create mode 100644 cognite/client/_sync_api/agents/__init__.py create mode 100644 cognite/client/_sync_api/agents/agents.py create mode 100644 cognite/client/_sync_api/ai/__init__.py create mode 100644 cognite/client/_sync_api/ai/tools/__init__.py create mode 100644 cognite/client/_sync_api/ai/tools/documents.py create mode 100644 cognite/client/_sync_api/annotations.py create mode 100644 cognite/client/_sync_api/assets.py create mode 100644 cognite/client/_sync_api/data_modeling/__init__.py create mode 100644 cognite/client/_sync_api/data_modeling/containers.py create mode 100644 cognite/client/_sync_api/data_modeling/data_models.py create mode 100644 cognite/client/_sync_api/data_modeling/graphql.py create mode 100644 cognite/client/_sync_api/data_modeling/instances.py create mode 100644 cognite/client/_sync_api/data_modeling/space_statistics.py create mode 100644 cognite/client/_sync_api/data_modeling/spaces.py create mode 100644 cognite/client/_sync_api/data_modeling/statistics.py create mode 100644 cognite/client/_sync_api/data_modeling/views.py create mode 100644 cognite/client/_sync_api/data_sets.py create mode 100644 cognite/client/_sync_api/datapoints.py create mode 100644 cognite/client/_sync_api/datapoints_subscriptions.py create mode 100644 cognite/client/_sync_api/diagrams.py create mode 100644 cognite/client/_sync_api/document_preview.py create mode 100644 cognite/client/_sync_api/documents.py create mode 100644 cognite/client/_sync_api/entity_matching.py create mode 100644 cognite/client/_sync_api/events.py create mode 100644 cognite/client/_sync_api/extractionpipelines/__init__.py create mode 100644 cognite/client/_sync_api/extractionpipelines/configs.py create mode 100644 cognite/client/_sync_api/extractionpipelines/runs.py create mode 100644 cognite/client/_sync_api/files.py create mode 100644 cognite/client/_sync_api/functions/__init__.py create mode 100644 cognite/client/_sync_api/functions/calls.py create mode 100644 cognite/client/_sync_api/functions/schedules.py create mode 100644 cognite/client/_sync_api/geospatial.py create mode 100644 cognite/client/_sync_api/hosted_extractors/__init__.py create mode 100644 cognite/client/_sync_api/hosted_extractors/destinations.py create mode 100644 cognite/client/_sync_api/hosted_extractors/jobs.py create mode 100644 cognite/client/_sync_api/hosted_extractors/mappings.py create mode 100644 cognite/client/_sync_api/hosted_extractors/sources.py create mode 100644 cognite/client/_sync_api/iam/__init__.py create mode 100644 cognite/client/_sync_api/iam/groups.py create mode 100644 cognite/client/_sync_api/iam/security_categories.py create mode 100644 cognite/client/_sync_api/iam/sessions.py create mode 100644 cognite/client/_sync_api/iam/token.py create mode 100644 cognite/client/_sync_api/labels.py create mode 100644 cognite/client/_sync_api/org_apis/principals.py create mode 100644 cognite/client/_sync_api/postgres_gateway/__init__.py create mode 100644 cognite/client/_sync_api/postgres_gateway/tables.py create mode 100644 cognite/client/_sync_api/postgres_gateway/users.py create mode 100644 cognite/client/_sync_api/raw/__init__.py create mode 100644 cognite/client/_sync_api/raw/databases.py create mode 100644 cognite/client/_sync_api/raw/rows.py create mode 100644 cognite/client/_sync_api/raw/tables.py create mode 100644 cognite/client/_sync_api/relationships.py create mode 100644 cognite/client/_sync_api/sequence_data.py create mode 100644 cognite/client/_sync_api/sequences.py create mode 100644 cognite/client/_sync_api/simulators/__init__.py create mode 100644 cognite/client/_sync_api/simulators/integrations.py create mode 100644 cognite/client/_sync_api/simulators/logs.py create mode 100644 cognite/client/_sync_api/simulators/models.py create mode 100644 cognite/client/_sync_api/simulators/models_revisions.py create mode 100644 cognite/client/_sync_api/simulators/routine_revisions.py create mode 100644 cognite/client/_sync_api/simulators/routines.py create mode 100644 cognite/client/_sync_api/simulators/runs.py create mode 100644 cognite/client/_sync_api/synthetic_time_series.py create mode 100644 cognite/client/_sync_api/three_d/__init__.py create mode 100644 cognite/client/_sync_api/three_d/asset_mapping.py create mode 100644 cognite/client/_sync_api/three_d/files.py create mode 100644 cognite/client/_sync_api/three_d/models.py create mode 100644 cognite/client/_sync_api/three_d/revisions.py create mode 100644 cognite/client/_sync_api/time_series.py create mode 100644 cognite/client/_sync_api/transformations/__init__.py create mode 100644 cognite/client/_sync_api/transformations/jobs.py create mode 100644 cognite/client/_sync_api/transformations/notifications.py create mode 100644 cognite/client/_sync_api/transformations/schedules.py create mode 100644 cognite/client/_sync_api/transformations/schema.py create mode 100644 cognite/client/_sync_api/unit_system.py create mode 100644 cognite/client/_sync_api/units.py create mode 100644 cognite/client/_sync_api/user_profiles.py create mode 100644 cognite/client/_sync_api/vision.py create mode 100644 cognite/client/_sync_api/workflows/__init__.py create mode 100644 cognite/client/_sync_api/workflows/executions.py create mode 100644 cognite/client/_sync_api/workflows/tasks.py create mode 100644 cognite/client/_sync_api/workflows/triggers.py create mode 100644 cognite/client/_sync_api/workflows/versions.py create mode 100644 cognite/client/_sync_api_client.py create mode 100644 cognite/client/_sync_cognite_client.py diff --git a/cognite/client/_cognite_client.py b/cognite/client/_cognite_client.py index c93ec938c2..b31f4c2c68 100644 --- a/cognite/client/_cognite_client.py +++ b/cognite/client/_cognite_client.py @@ -261,17 +261,3 @@ def load(cls, config: dict[str, Any] | str) -> AsyncCogniteClient: """ loaded = load_resource_to_dict(config) return cls(config=ClientConfig.load(loaded)) - - -class CogniteClient: - """Main entrypoint into the Cognite Python SDK. - - All Cognite Data Fusion APIs are accessible through this synchronous client. - For the asynchronous client, see :class:`~cognite.client._cognite_client.AsyncCogniteClient`. - - Args: - config (ClientConfig | None): The configuration for this client. - """ - - def __init__(self, config: ClientConfig | None = None) -> None: - raise NotImplementedError diff --git a/cognite/client/_sync_api/__init__.py b/cognite/client/_sync_api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cognite/client/_sync_api/agents/__init__.py b/cognite/client/_sync_api/agents/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cognite/client/_sync_api/agents/agents.py b/cognite/client/_sync_api/agents/agents.py new file mode 100644 index 0000000000..3b1772d852 --- /dev/null +++ b/cognite/client/_sync_api/agents/agents.py @@ -0,0 +1,327 @@ +""" +=============================================================================== +0c2ad8063135aae7d3d1190f8c6c8939 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.agents import Agent, AgentList, AgentUpsert +from cognite.client.data_classes.agents.chat import Action, ActionResult, AgentChatResponse, Message +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncAgentsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def upsert(self, agents: AgentUpsert) -> Agent: ... + + @overload + def upsert(self, agents: Sequence[AgentUpsert]) -> AgentList: ... + + def upsert(self, agents: AgentUpsert | Sequence[AgentUpsert]) -> Agent | AgentList: + """ + `Create or update (upsert) one or more agents. `_ + + Args: + agents (AgentUpsert | Sequence[AgentUpsert]): Agent or list of agents to create or update. + + Returns: + Agent | AgentList: The created or updated agent(s). + + Examples: + + Create a new agent with a query knowledge graph tool to find assets: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.agents import ( + ... AgentUpsert, + ... QueryKnowledgeGraphAgentToolUpsert, + ... QueryKnowledgeGraphAgentToolConfiguration, + ... DataModelInfo + ... ) + >>> client = CogniteClient() + ... + >>> find_assets_tool = QueryKnowledgeGraphAgentToolUpsert( + ... name="find assets", + ... description="Use this tool to find assets", + ... configuration=QueryKnowledgeGraphAgentToolConfiguration( + ... data_models=[ + ... DataModelInfo( + ... space="cdf_idm", + ... external_id="CogniteProcessIndustries", + ... version="v1", + ... view_external_ids=["CogniteAsset"], + ... ) + ... ] + ... ) + ... ) + >>> agent = AgentUpsert( + ... external_id="my_agent", + ... name="My Agent", + ... labels=["published"], + ... tools=[find_assets_tool] + ... ) + >>> client.agents.upsert(agents=[agent]) + + Create an agent with multiple different tools: + + >>> from cognite.client.data_classes.agents import ( + ... AgentUpsert, + ... QueryKnowledgeGraphAgentToolUpsert, + ... QueryKnowledgeGraphAgentToolConfiguration, + ... DataModelInfo, + ... SummarizeDocumentAgentToolUpsert, + ... AskDocumentAgentToolUpsert, + ... QueryTimeSeriesDatapointsAgentToolUpsert + ... ) + ... + >>> find_assets_tool = QueryKnowledgeGraphAgentToolUpsert( + ... name="find assets", + ... description="Use this tool to query the knowledge graph for assets", + ... configuration=QueryKnowledgeGraphAgentToolConfiguration( + ... data_models=[ + ... DataModelInfo( + ... space="cdf_idm", + ... external_id="CogniteProcessIndustries", + ... version="v1", + ... view_external_ids=["CogniteAsset"], + ... ) + ... ] + ... ) + ... ) + >>> find_files_tool = QueryKnowledgeGraphAgentToolUpsert( + ... name="find files", + ... description="Use this tool to query the knowledge graph for files", + ... configuration=QueryKnowledgeGraphAgentToolConfiguration( + ... data_models=[ + ... DataModelInfo( + ... space="cdf_idm", + ... external_id="CogniteProcessIndustries", + ... version="v1", + ... view_external_ids=["CogniteFile"], + ... ) + ... ] + ... ) + ... ) + >>> find_time_series_tool = QueryKnowledgeGraphAgentToolUpsert( + ... name="find time series", + ... description="Use this tool to query the knowledge graph for time series", + ... configuration=QueryKnowledgeGraphAgentToolConfiguration( + ... data_models=[ + ... DataModelInfo( + ... space="cdf_idm", + ... external_id="CogniteProcessIndustries", + ... version="v1", + ... view_external_ids=["CogniteTimeSeries"], + ... ) + ... ] + ... ) + ... ) + >>> summarize_tool = SummarizeDocumentAgentToolUpsert( + ... name="summarize document", + ... description="Use this tool to get a summary of a document" + ... ) + >>> ask_doc_tool = AskDocumentAgentToolUpsert( + ... name="ask document", + ... description="Use this tool to ask questions about specific documents" + ... ) + >>> ts_tool = QueryTimeSeriesDatapointsAgentToolUpsert( + ... name="query time series", + ... description="Use this tool to query time series data points" + ... ) + >>> agent = AgentUpsert( + ... external_id="my_agent", + ... name="My agent", + ... description="An agent with many tools", + ... instructions="You are a helpful assistant that can query knowledge graphs, summarize documents, answer questions about documents, and query time series data points.", + ... labels=["published"], + ... tools=[find_assets_tool, find_files_tool, find_time_series_tool, summarize_tool, ask_doc_tool, ts_tool] + ... ) + >>> client.agents.upsert(agents=[agent]) + """ + return run_sync(self.__async_client.agents.upsert(agents=agents)) + + @overload + def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Agent | None: ... + + @overload + def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> AgentList: ... + + def retrieve( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Agent | AgentList | None: + """ + `Retrieve one or more agents by external ID. `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external id of the agent(s) to retrieve. + ignore_unknown_ids (bool): Whether to ignore unknown IDs. Defaults to False. + + Returns: + Agent | AgentList | None: The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found. + + Examples: + + Retrieve an agent by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.agents.retrieve(external_ids="my_agent") + + Retrieve multiple agents: + + >>> res = client.agents.retrieve(external_ids=["my_agent_1", "my_agent_2"]) + """ + return run_sync( + self.__async_client.agents.retrieve(external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids) + ) + + def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete one or more agents. `_ + + Args: + external_ids (str | SequenceNotStr[str]): External ID of the agent or a list of external ids. + ignore_unknown_ids (bool): If `True`, the call will ignore unknown external IDs. Defaults to False. + + Examples: + + Delete an agent by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.agents.delete(external_ids="my_agent") + """ + return run_sync( + self.__async_client.agents.delete(external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids) + ) + + def list(self) -> AgentList: + """ + `List agents. `_ + + Returns: + AgentList: The list of agents. + + Examples: + + List all agents: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> agent_list = client.agents.list() + """ + return run_sync(self.__async_client.agents.list()) + + def chat( + self, + agent_external_id: str, + messages: Message | ActionResult | Sequence[Message | ActionResult], + cursor: str | None = None, + actions: Sequence[Action] | None = None, + ) -> AgentChatResponse: + """ + `Chat with an agent. `_ + + Given a user query, the Atlas AI agent responds by reasoning and using the tools associated with it. + Users can ensure conversation continuity by including the cursor from the previous response in subsequent requests. + + Args: + agent_external_id (str): External ID that uniquely identifies the agent. + messages (Message | ActionResult | Sequence[Message | ActionResult]): A list of one or many input messages to the agent. Can include regular messages and action results. + cursor (str | None): The cursor to use for continuation of a conversation. Use this to + create multi-turn conversations, as the cursor will keep track of the conversation state. + actions (Sequence[Action] | None): A list of client-side actions that can be called by the agent. + + Returns: + AgentChatResponse: The response from the agent. + + Examples: + + Start a simple chat with an agent: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.agents import Message + >>> client = CogniteClient() + >>> response = client.agents.chat( + ... agent_external_id="my_agent", + ... messages=Message("What can you help me with?") + ... ) + >>> print(response.text) + + Continue a conversation using the cursor: + + >>> follow_up = client.agents.chat( + ... agent_external_id="my_agent", + ... messages=Message("Tell me more about that"), + ... cursor=response.cursor + ... ) + + Send multiple messages at once: + + >>> response = client.agents.chat( + ... agent_external_id="my_agent", + ... messages=[ + ... Message("Help me find the 1st stage compressor."), + ... Message("Once you have found it, find related time series.") + ... ] + ... ) + + Chat with client-side actions: + + >>> from cognite.client.data_classes.agents import ClientToolAction, ClientToolResult + >>> add_numbers_action = ClientToolAction( + ... name="add", + ... description="Add two numbers together", + ... parameters={ + ... "type": "object", + ... "properties": { + ... "a": {"type": "number", "description": "First number"}, + ... "b": {"type": "number", "description": "Second number"}, + ... }, + ... "required": ["a", "b"] + ... } + ... ) + >>> response = client.agents.chat( + ... agent_external_id="my_agent", + ... messages=Message("What is 42 plus 58?"), + ... actions=[add_numbers_action] + ... ) + >>> if response.action_calls: + ... for call in response.action_calls: + ... # Execute the action + ... result = call.arguments["a"] + call.arguments["b"] + ... # Send result back + ... response = client.agents.chat( + ... agent_external_id="my_agent", + ... messages=ClientToolResult( + ... action_id=call.action_id, + ... content=f"The result is {result}" + ... ), + ... cursor=response.cursor, + ... actions=[add_numbers_action] + ... ) + """ + return run_sync( + self.__async_client.agents.chat( + agent_external_id=agent_external_id, messages=messages, cursor=cursor, actions=actions + ) + ) diff --git a/cognite/client/_sync_api/ai/__init__.py b/cognite/client/_sync_api/ai/__init__.py new file mode 100644 index 0000000000..b6cd24f1f4 --- /dev/null +++ b/cognite/client/_sync_api/ai/__init__.py @@ -0,0 +1,25 @@ +""" +=============================================================================== +9f3dd7689d375651560a7f9b91cb94e7 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.ai.tools import SyncAIToolsAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncAIAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.tools = SyncAIToolsAPI(async_client) diff --git a/cognite/client/_sync_api/ai/tools/__init__.py b/cognite/client/_sync_api/ai/tools/__init__.py new file mode 100644 index 0000000000..0d5061765f --- /dev/null +++ b/cognite/client/_sync_api/ai/tools/__init__.py @@ -0,0 +1,25 @@ +""" +=============================================================================== +c07271fd270ca0c1a19ea1dd1cff15af +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.ai.tools.documents import SyncAIDocumentsAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncAIToolsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.documents = SyncAIDocumentsAPI(async_client) diff --git a/cognite/client/_sync_api/ai/tools/documents.py b/cognite/client/_sync_api/ai/tools/documents.py new file mode 100644 index 0000000000..874a290cc2 --- /dev/null +++ b/cognite/client/_sync_api/ai/tools/documents.py @@ -0,0 +1,141 @@ +""" +=============================================================================== +e53e36c6b07aeb024b15078c9e4ca307 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Literal + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.ai import Answer, AnswerLanguage, Summary +from cognite.client.data_classes.data_modeling import NodeId +from cognite.client.utils._async_helpers import run_sync + + +class SyncAIDocumentsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def summarize( + self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None + ) -> Summary: + """ + `Summarize a document using a Large Language Model. `_ + + Note: + Currently only supports summarizing a single document at a time, but + this may be extended in the future. + + Args: + id (int | None): The ID of the document + external_id (str | None): The external ID of the document + instance_id (NodeId | None): The instance ID of the document + + Returns: + Summary: A summary of the document. + + Examples: + + Summarize a single document using ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.ai.tools.documents.summarize(id=123) + + You can also use external ID or instance ID: + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> client.ai.tools.documents.summarize( + ... instance_id=NodeId("my-space", "my-xid") + ... ) + """ + return run_sync( + self.__async_client.ai.tools.documents.summarize(id=id, external_id=external_id, instance_id=instance_id) + ) + + def ask_question( + self, + question: str, + *, + id: int | Sequence[int] | None = None, + external_id: str | Sequence[str] | None = None, + instance_id: NodeId | Sequence[NodeId] | None = None, + language: AnswerLanguage + | Literal[ + "Chinese", + "Dutch", + "English", + "French", + "German", + "Italian", + "Japanese", + "Korean", + "Latvian", + "Norwegian", + "Portuguese", + "Spanish", + "Swedish", + ] = AnswerLanguage.English, + additional_context: str | None = None, + ignore_unknown_ids: bool = False, + ) -> Answer: + """ + `Ask a question about one or more documents using a Large Language Model. `_ + + Supports up to 100 documents at a time. + + Args: + question (str): The question. + id (int | Sequence[int] | None): The ID(s) of the document(s) + external_id (str | Sequence[str] | None): The external ID(s) of the document(s) + instance_id (NodeId | Sequence[NodeId] | None): The instance ID(s) of the document(s) + language (AnswerLanguage | Literal['Chinese', 'Dutch', 'English', 'French', 'German', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Norwegian', 'Portuguese', 'Spanish', 'Swedish']): The desired language of the answer, defaults to English. + additional_context (str | None): Additional context that you want the LLM to take into account. + ignore_unknown_ids (bool): Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised. + + Returns: + Answer: The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references. + + Examples: + + Ask a question about a single document with id=123 and get the answer in English (default): + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.ai.tools.documents.ask_question( + ... question="What model pump was used?", + ... id=123, + ... ) + + Ask a question about multiple documents referenced using external IDs, and instance ID + and get the answer in German: + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> from cognite.client.data_classes.ai import AnswerLanguage + >>> client.ai.tools.documents.ask_question( + ... question="What other pumps are available?", + ... external_id=["foo", "bar"], + ... instance_id=NodeId("my-space", "my-xid"), + ... language=AnswerLanguage.German, + ... ) + """ + return run_sync( + self.__async_client.ai.tools.documents.ask_question( + question=question, + id=id, + external_id=external_id, + instance_id=instance_id, + language=language, + additional_context=additional_context, + ignore_unknown_ids=ignore_unknown_ids, + ) + ) diff --git a/cognite/client/_sync_api/annotations.py b/cognite/client/_sync_api/annotations.py new file mode 100644 index 0000000000..73b3a8dec6 --- /dev/null +++ b/cognite/client/_sync_api/annotations.py @@ -0,0 +1,185 @@ +""" +=============================================================================== +c9ad6444c7e9b577c7eadb5b29053fc1 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import Annotation, AnnotationFilter, AnnotationList, AnnotationUpdate +from cognite.client.data_classes.annotations import AnnotationReverseLookupFilter, AnnotationWrite +from cognite.client.data_classes.contextualization import ResourceReferenceList +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncAnnotationsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def create(self, annotations: Annotation | AnnotationWrite) -> Annotation: ... + + @overload + def create(self, annotations: Sequence[Annotation | AnnotationWrite]) -> AnnotationList: ... + + def create( + self, annotations: Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite] + ) -> Annotation | AnnotationList: + """ + `Create annotations `_ + + Args: + annotations (Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]): Annotation(s) to create + + Returns: + Annotation | AnnotationList: Created annotation(s) + """ + return run_sync(self.__async_client.annotations.create(annotations=annotations)) + + @overload + def suggest(self, annotations: Annotation | AnnotationWrite) -> Annotation: ... + + @overload + def suggest(self, annotations: Sequence[Annotation] | Sequence[AnnotationWrite]) -> AnnotationList: ... + + def suggest( + self, annotations: Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite] + ) -> Annotation | AnnotationList: + """ + `Suggest annotations `_ + + Args: + annotations (Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]): annotation(s) to suggest. They must have status set to "suggested". + + Returns: + Annotation | AnnotationList: suggested annotation(s) + """ + return run_sync(self.__async_client.annotations.suggest(annotations=annotations)) + + @overload + def update( + self, + item: Annotation | AnnotationWrite | AnnotationUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Annotation: ... + + @overload + def update( + self, + item: Sequence[Annotation | AnnotationWrite | AnnotationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> AnnotationList: ... + + def update( + self, + item: Annotation + | AnnotationWrite + | AnnotationUpdate + | Sequence[Annotation | AnnotationWrite | AnnotationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Annotation | AnnotationList: + """ + `Update annotations `_ + + Args: + item (Annotation | AnnotationWrite | AnnotationUpdate | Sequence[Annotation | AnnotationWrite | AnnotationUpdate]): Annotation or list of annotations to update (or patch or list of patches to apply) + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Annotation | AnnotationList: No description. + """ + return run_sync(self.__async_client.annotations.update(item=item, mode=mode)) + + def delete(self, id: int | Sequence[int]) -> None: + """ + `Delete annotations `_ + + Args: + id (int | Sequence[int]): ID or list of IDs to be deleted + """ + return run_sync(self.__async_client.annotations.delete(id=id)) + + def retrieve_multiple(self, ids: Sequence[int]) -> AnnotationList: + """ + `Retrieve annotations by IDs `_` + + Args: + ids (Sequence[int]): list of IDs to be retrieved + + Returns: + AnnotationList: list of annotations + """ + return run_sync(self.__async_client.annotations.retrieve_multiple(ids=ids)) + + def retrieve(self, id: int) -> Annotation | None: + """ + `Retrieve an annotation by id `_ + + Args: + id (int): id of the annotation to be retrieved + + Returns: + Annotation | None: annotation requested + """ + return run_sync(self.__async_client.annotations.retrieve(id=id)) + + def reverse_lookup(self, filter: AnnotationReverseLookupFilter, limit: int | None = None) -> ResourceReferenceList: + """ + Reverse lookup annotated resources based on having annotations matching the filter. + + Args: + filter (AnnotationReverseLookupFilter): Filter to apply + limit (int | None): Maximum number of results to return. Defaults to None (all). + + Returns: + ResourceReferenceList: List of resource references + + Examples: + + Retrieve the first 100 ids of annotated resources mathing the 'file' resource type: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AnnotationReverseLookupFilter + >>> client = CogniteClient() + >>> flt = AnnotationReverseLookupFilter(annotated_resource_type="file") + >>> res = client.annotations.reverse_lookup(flt, limit=100) + """ + return run_sync(self.__async_client.annotations.reverse_lookup(filter=filter, limit=limit)) + + def list(self, filter: AnnotationFilter | dict, limit: int | None = DEFAULT_LIMIT_READ) -> AnnotationList: + """ + `List annotations. `_ + + Note: + Passing a filter with both 'annotated_resource_type' and 'annotated_resource_ids' is always required. + + Args: + filter (AnnotationFilter | dict): Return annotations with parameter values that match what is specified. + limit (int | None): Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + AnnotationList: list of annotations + + Example: + + List all annotations for the file with id=123: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AnnotationFilter + >>> client = CogniteClient() + >>> flt = AnnotationFilter(annotated_resource_type="file", annotated_resource_ids=[{"id": 123}]) + >>> res = client.annotations.list(flt, limit=None) + """ + return run_sync(self.__async_client.annotations.list(filter=filter, limit=limit)) diff --git a/cognite/client/_sync_api/assets.py b/cognite/client/_sync_api/assets.py new file mode 100644 index 0000000000..9f1f902eef --- /dev/null +++ b/cognite/client/_sync_api/assets.py @@ -0,0 +1,909 @@ +""" +=============================================================================== +0d85e1a0727792cc34a2b348d9a25113 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.assets import AggregateAssetProperty, SortSpec +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + Asset, + AssetFilter, + AssetHierarchy, + AssetList, + AssetUpdate, + GeoLocationFilter, + LabelFilter, + TimestampRange, +) +from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList +from cognite.client.data_classes.assets import ( + AssetPropertyLike, + AssetWrite, +) +from cognite.client.data_classes.filters import Filter +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncAssetsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Asset]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[AssetList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + parent_ids: Sequence[int] | None = None, + parent_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + metadata: dict[str, str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + labels: LabelFilter | None = None, + geo_location: GeoLocationFilter | None = None, + source: str | None = None, + created_time: TimestampRange | dict[str, Any] | None = None, + last_updated_time: TimestampRange | dict[str, Any] | None = None, + root: bool | None = None, + external_id_prefix: str | None = None, + aggregated_properties: Sequence[AggregateAssetProperty] | None = None, + limit: int | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | None = None, + ) -> Iterator[Asset | AssetList]: + """ + Iterate over assets + + Fetches assets as they are iterated over, so you keep a limited number of assets in memory. + + Args: + chunk_size (int | None): Number of assets to return in each chunk. Defaults to yielding one asset a time. + name (str | None): Name of asset. Often referred to as tag. + parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. + parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. + asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value + data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. + labels (LabelFilter | None): Return only the assets matching the specified label. + geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. + source (str | None): The source of this asset + created_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root (bool | None): filtered assets are root assets or not + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. + limit (int | None): Maximum number of assets to return. Defaults to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Yields: + Asset | AssetList: yields Asset one by one if chunk_size is not specified, else AssetList objects. + """ + yield from SyncIterator( + self.__async_client.assets( + chunk_size=chunk_size, + name=name, + parent_ids=parent_ids, + parent_external_ids=parent_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + metadata=metadata, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + labels=labels, + geo_location=geo_location, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + root=root, + external_id_prefix=external_id_prefix, + aggregated_properties=aggregated_properties, + limit=limit, + advanced_filter=advanced_filter, + sort=sort, + ) + ) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> Asset | None: + """ + `Retrieve a single asset by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + Asset | None: Requested asset or None if it does not exist. + + Examples: + + Get asset by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.assets.retrieve(id=1) + + Get asset by external id: + + >>> res = client.assets.retrieve(external_id="1") + """ + return run_sync(self.__async_client.assets.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> AssetList: + """ + `Retrieve multiple assets by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + AssetList: The requested assets. + + Examples: + + Get assets by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.assets.retrieve_multiple(ids=[1, 2, 3]) + + Get assets by external id: + + >>> res = client.assets.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.assets.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_count( + self, + property: AssetPropertyLike | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Count of assets matching the specified filters. `_ + + Args: + property (AssetPropertyLike | None): If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters. + advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down the assets to count. + filter (AssetFilter | dict[str, Any] | None): The filter to narrow down the assets to count (strict matching). + + Returns: + int: The number of assets matching the specified filters. + + Examples: + + Count the number of assets in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.assets.aggregate_count() + + Count the number of assets with the metadata key "timezone" in your CDF project: + + >>> from cognite.client.data_classes.filters import ContainsAny + >>> from cognite.client.data_classes.assets import AssetProperty + >>> has_timezone = ContainsAny(AssetProperty.metadata, "timezone") + >>> asset_count = client.assets.aggregate_count(advanced_filter=has_timezone) + """ + return run_sync( + self.__async_client.assets.aggregate_count( + property=property, advanced_filter=advanced_filter, filter=filter + ) + ) + + def aggregate_cardinality_values( + self, + property: AssetPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate property count for assets. `_ + + Args: + property (AssetPropertyLike): The property to count the cardinality of. + advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of labels used by assets in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.assets import AssetProperty + >>> client = CogniteClient() + >>> label_count = client.assets.aggregate_cardinality_values(AssetProperty.labels) + + Count the number of timezones (metadata key) for assets with the word "critical" in the description in your CDF project: + + >>> from cognite.client.data_classes.filters import Search + >>> from cognite.client.data_classes.assets import AssetProperty + >>> is_critical = Search(AssetProperty.description, "critical") + >>> critical_assets = client.assets.aggregate_cardinality_values( + ... AssetProperty.metadata_key("timezone"), + ... advanced_filter=is_critical) + """ + return run_sync( + self.__async_client.assets.aggregate_cardinality_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_cardinality_properties( + self, + path: AssetPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate paths count for assets. `_ + + Args: + path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. + It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + Returns: + int: The number of properties matching the specified filters. + + Examples: + + Count the number of unique metadata keys used by assets in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.assets import AssetProperty + >>> client = CogniteClient() + >>> key_count = client.assets.aggregate_cardinality_properties(AssetProperty.metadata) + """ + return run_sync( + self.__async_client.assets.aggregate_cardinality_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_values( + self, + property: AssetPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique properties with counts for assets. `_ + + Note: + In the case of text fields, the values are aggregated in a case-insensitive manner. + + Args: + property (AssetPropertyLike): The property to group by. + advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + + Returns: + UniqueResultList: List of unique values of assets matching the specified filters and search. + + Examples: + + Get the timezones (metadata key) with count for your assets in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.assets import AssetProperty + >>> client = CogniteClient() + >>> result = client.assets.aggregate_unique_values(AssetProperty.metadata_key("timezone")) + >>> print(result.unique) + + Get the different labels with count used for assets created after 2020-01-01 in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.assets import AssetProperty + >>> from cognite.client.utils import timestamp_to_ms + >>> from datetime import datetime + >>> created_after_2020 = filters.Range(AssetProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020) + >>> print(result.unique) + + Get the different labels with count for assets updated after 2020-01-01 in your CDF project, but exclude all labels that + starts with "test": + + >>> from cognite.client.data_classes.assets import AssetProperty + >>> from cognite.client.data_classes import aggregations + >>> from cognite.client.data_classes import filters + >>> not_test = aggregations.Not(aggregations.Prefix("test")) + >>> created_after_2020 = filters.Range(AssetProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020, aggregate_filter=not_test) + >>> print(result.unique) + """ + return run_sync( + self.__async_client.assets.aggregate_unique_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_properties( + self, + path: AssetPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique paths with counts for assets. `_ + + Note: + In the case of text fields, the values are aggregated in a case-insensitive manner. + + Args: + path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. + It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + + Returns: + UniqueResultList: List of unique values of assets matching the specified filters and search. + + Examples: + + Get the metadata keys with counts for your assets in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.assets import AssetProperty + >>> client = CogniteClient() + >>> result = client.assets.aggregate_unique_properties(AssetProperty.metadata) + """ + return run_sync( + self.__async_client.assets.aggregate_unique_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + @overload + def create(self, asset: Sequence[Asset] | Sequence[AssetWrite]) -> AssetList: ... + + @overload + def create(self, asset: Asset | AssetWrite) -> Asset: ... + + def create(self, asset: Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]) -> Asset | AssetList: + """ + `Create one or more assets. `_ + + You can create an arbitrary number of assets, and the SDK will split the request into multiple requests. + When specifying parent-child relation between assets using `parentExternalId` the link will be resvoled into an internal ID and stored as `parentId`. + + Args: + asset (Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]): Asset or list of assets to create. + + Returns: + Asset | AssetList: Created asset(s) + + Examples: + + Create new assets: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AssetWrite + >>> client = CogniteClient() + >>> assets = [AssetWrite(name="asset1"), AssetWrite(name="asset2")] + >>> res = client.assets.create(assets) + + Create asset with label: + + >>> from cognite.client.data_classes import AssetWrite, Label + >>> asset = AssetWrite(name="my_pump", labels=[Label(external_id="PUMP")]) + >>> res = client.assets.create(asset) + """ + return run_sync(self.__async_client.assets.create(asset=asset)) + + def create_hierarchy( + self, + assets: Sequence[AssetWrite] | AssetHierarchy, + *, + upsert: bool = False, + upsert_mode: Literal["patch", "replace"] = "patch", + ) -> AssetList: + """ + Create an asset hierarchy with validation. + + This helper function makes it easy to insert large asset hierarchies. It solves the problem of topological + insertion order, i.e. a parent asset must exist before it can be referenced by any 'children' assets. + You may pass any number of partial- or full hierarchies: there are no requirements on the number of root + assets, so you may pass zero, one or many (same goes for the non-root assets). + + Args: + assets (Sequence[AssetWrite] | AssetHierarchy): List of assets to create or an instance of AssetHierarchy. + upsert (bool): If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument. + upsert_mode (Literal['patch', 'replace']): Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty). + + Returns: + AssetList: Created (and possibly updated) asset hierarchy + + Prior to insertion, this function will run validation on the given assets and raise an error if any of + the following issues are found: + + 1. Any assets are invalid (category: ``invalid``): + + - Missing external ID. + - Missing a valid name. + - Has an ID set (note: you may not pass Asset, use AssetWrite) + 2. Any asset duplicates exist (category: ``duplicates``) + 3. Any assets have an ambiguous parent link (category: ``unsure_parents``) + 4. Any group of assets form a cycle, e.g. A->B->A (category: ``cycles``) + + As part of validation there is a fifth category that is ignored when using this method (for backwards compatibility) and that + is orphan assets. These are assets linking a parent by an identifier that is not present among the given assets, and as such, + might contain links we are unable to vet ahead of insertion. These are thus assumed to be valid, but may fail. + + Tip: + The different categories specified above corresponds to the name of the attribute you might access on the raised error to + get the collection of 'bad' assets falling in that group, e.g. ``error.duplicates``. + + Note: + Updating ``external_id`` via upsert is not supported (and will not be supported). Use ``AssetsAPI.update`` instead. + + Warning: + The API does not natively support upsert, so the SDK has to simulate the behaviour at the cost of some insertion speed. + + Be careful when moving assets to new parents via upsert: Please do so only by specifying ``parent_external_id`` + (instead of ``parent_id``) to avoid race conditions in insertion order (temporary cycles might form since we + can only make changes to 1000 assets at the time). + + Examples: + + Create an asset hierarchy: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AssetWrite + >>> client = CogniteClient() + >>> assets = [ + ... AssetWrite(external_id="root", name="root"), + ... AssetWrite(external_id="child1", parent_external_id="root", name="child1"), + ... AssetWrite(external_id="child2", parent_external_id="root", name="child2")] + >>> res = client.assets.create_hierarchy(assets) + + Create an asset hierarchy, but run update for existing assets: + + >>> res = client.assets.create_hierarchy(assets, upsert=True, upsert_mode="patch") + + Patch will only update the parameters you have defined on your assets. Note that specifically setting + something to ``None`` is the same as not setting it. For ``metadata``, this will extend your existing + data, only overwriting when keys overlap. For ``labels`` the behaviour is mostly the same, existing are + left untouched, and your new ones are simply added. + + You may also pass ``upsert_mode="replace"`` to make sure the updated assets look identical to the ones + you passed to the method. For both ``metadata`` and ``labels`` this will clear out all existing, + before (potentially) adding the new ones. + + If the hierarchy validation for some reason fail, you may inspect all the issues that were found by + catching :class:`~cognite.client.exceptions.CogniteAssetHierarchyError`: + + >>> from cognite.client.exceptions import CogniteAssetHierarchyError + >>> try: + ... res = client.assets.create_hierarchy(assets) + ... except CogniteAssetHierarchyError as err: + ... if err.invalid: + ... ... # do something + + In addition to ``invalid``, you may inspect ``duplicates``, ``unsure_parents``, ``orphans`` and ``cycles``. + Note that cycles are not available if any of the other basic issues exist, as the search for cyclical + references requires a clean asset hierarchy to begin with. + + You may also wrap the ``create_hierarchy()`` call in a try-except to get information if any of the assets + fails to be created (assuming a valid hierarchy): + + >>> from cognite.client.exceptions import CogniteAPIError + >>> try: + ... client.assets.create_hierarchy(assets) + ... except CogniteAPIError as err: + ... created = err.successful + ... maybe_created = err.unknown + ... not_created = err.failed + + Here's a slightly longer explanation of the different groups: + + - ``err.successful``: Which assets were created (request yielded a 201) + - ``err.unknown``: Which assets *may* have been created (request yielded 5xx) + - ``err.failed``: Which assets were *not* created (request yielded 4xx, or was a descendant of an asset with unknown status) + + The preferred way to create an asset hierarchy, is to run validation *prior to insertion*. You may do this by + using the :class:`~cognite.client.data_classes.assets.AssetHierarchy` class. It will by default consider orphan + assets to be problematic (but accepts the boolean parameter ``ignore_orphans``), contrary to how ``create_hierarchy`` + works (which accepts them in order to be backwards-compatible). It also provides helpful methods to create reports + of any issues found, check out ``validate_and_report``: + + >>> from cognite.client.data_classes import AssetHierarchy + >>> from pathlib import Path + >>> hierarchy = AssetHierarchy(assets) + >>> if hierarchy.is_valid(): + ... res = client.assets.create_hierarchy(hierarchy) + ... else: + ... hierarchy.validate_and_report(output_file=Path("report.txt")) + """ + return run_sync( + self.__async_client.assets.create_hierarchy(assets=assets, upsert=upsert, upsert_mode=upsert_mode) + ) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + recursive: bool = False, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete one or more assets `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + recursive (bool): Recursively delete whole asset subtrees under given ids. Defaults to False. + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete assets by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.assets.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.assets.delete( + id=id, external_id=external_id, recursive=recursive, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def update( + self, + item: Sequence[Asset | AssetWrite | AssetUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> AssetList: ... + + @overload + def update( + self, + item: Asset | AssetWrite | AssetUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Asset: ... + + def update( + self, + item: Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Asset | AssetList: + """ + `Update one or more assets `_ + Labels can be added, removed or replaced (set). Note that set operation deletes all the existing labels and adds the new specified labels. + + Args: + item (Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate]): Asset(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + Returns: + Asset | AssetList: Updated asset(s) + + Examples: + Perform a partial update on an asset, updating the description and adding a new field to metadata: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AssetUpdate + >>> client = CogniteClient() + >>> my_update = AssetUpdate(id=1).description.set("New description").metadata.add({"key": "value"}) + >>> res1 = client.assets.update(my_update) + >>> # Remove an already set field like so + >>> another_update = AssetUpdate(id=1).description.set(None) + >>> res2 = client.assets.update(another_update) + + Remove the metadata on an asset: + + >>> from cognite.client.data_classes import AssetUpdate + >>> my_update = AssetUpdate(id=1).metadata.add({"key": "value"}) + >>> res1 = client.assets.update(my_update) + >>> another_update = AssetUpdate(id=1).metadata.set(None) + >>> # The same result can be achieved with: + >>> another_update2 = AssetUpdate(id=1).metadata.set({}) + >>> res2 = client.assets.update(another_update) + + Attach labels to an asset: + + >>> from cognite.client.data_classes import AssetUpdate + >>> my_update = AssetUpdate(id=1).labels.add(["PUMP", "VERIFIED"]) + >>> res = client.assets.update(my_update) + + Detach a single label from an asset: + + >>> from cognite.client.data_classes import AssetUpdate + >>> my_update = AssetUpdate(id=1).labels.remove("PUMP") + >>> res = client.assets.update(my_update) + + Replace all labels for an asset: + + >>> from cognite.client.data_classes import AssetUpdate + >>> my_update = AssetUpdate(id=1).labels.set("PUMP") + >>> res = client.assets.update(my_update) + """ + return run_sync(self.__async_client.assets.update(item=item, mode=mode)) + + @overload + def upsert(self, item: Sequence[Asset | AssetWrite], mode: Literal["patch", "replace"] = "patch") -> AssetList: ... + + @overload + def upsert(self, item: Asset | AssetWrite, mode: Literal["patch", "replace"] = "patch") -> Asset: ... + + def upsert( + self, item: Asset | AssetWrite | Sequence[Asset | AssetWrite], mode: Literal["patch", "replace"] = "patch" + ) -> Asset | AssetList: + """ + Upsert assets, i.e., update if it exists, and create if it does not exist. + Note this is a convenience method that handles the upserting for you by first calling update on all items, + and if any of them fail because they do not exist, it will create them instead. + + For more details, see :ref:`appendix-upsert`. + + Args: + item (Asset | AssetWrite | Sequence[Asset | AssetWrite]): Asset or list of assets to upsert. + mode (Literal['patch', 'replace']): Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + + Returns: + Asset | AssetList: The upserted asset(s). + + Examples: + + Upsert for assets: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import AssetWrite + >>> client = CogniteClient() + >>> existing_asset = client.assets.retrieve(id=1) + >>> existing_asset.description = "New description" + >>> new_asset = AssetWrite(external_id="new_asset", name="my asset", description="New asset") + >>> res = client.assets.upsert([existing_asset, new_asset], mode="replace") + """ + return run_sync(self.__async_client.assets.upsert(item=item, mode=mode)) + + def search( + self, + name: str | None = None, + description: str | None = None, + query: str | None = None, + filter: AssetFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> AssetList: + """ + `Search for assets `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. + + Args: + name (str | None): Fuzzy match on name. + description (str | None): Fuzzy match on description. + query (str | None): Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance. + filter (AssetFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. + limit (int): Maximum number of results to return. + + Returns: + AssetList: List of requested assets + + Examples: + + Search for assets by fuzzy search on name: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.assets.search(name="some name") + + Search for assets by exact search on name: + + >>> res = client.assets.search(filter={"name": "some name"}) + + Search for assets by improved multi-field fuzzy search: + + >>> res = client.assets.search(query="TAG 30 XV") + + Search for assets using multiple filters, finding all assets with name similar to `xyz` with parent asset `123` or `456` with source `some source`: + + >>> res = client.assets.search(name="xyz",filter={"parent_ids": [123,456],"source": "some source"}) + + Search for an asset with an attached label: + + >>> my_label_filter = LabelFilter(contains_all=["PUMP"]) + >>> res = client.assets.search(name="xyz",filter=AssetFilter(labels=my_label_filter)) + """ + return run_sync( + self.__async_client.assets.search( + name=name, description=description, query=query, filter=filter, limit=limit + ) + ) + + def retrieve_subtree( + self, id: int | None = None, external_id: str | None = None, depth: int | None = None + ) -> AssetList: + """ + Retrieve the subtree for this asset up to a specified depth. + + Args: + id (int | None): Id of the root asset in the subtree. + external_id (str | None): External id of the root asset in the subtree. + depth (int | None): Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree. + + Returns: + AssetList: The requested assets or empty AssetList if asset does not exist. + """ + return run_sync(self.__async_client.assets.retrieve_subtree(id=id, external_id=external_id, depth=depth)) + + def list( + self, + name: str | None = None, + parent_ids: Sequence[int] | None = None, + parent_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + labels: LabelFilter | None = None, + geo_location: GeoLocationFilter | None = None, + metadata: dict[str, str] | None = None, + source: str | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + root: bool | None = None, + external_id_prefix: str | None = None, + aggregated_properties: Sequence[AggregateAssetProperty] | None = None, + partitions: int | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | None = None, + ) -> AssetList: + """ + `List assets `_ + + Args: + name (str | None): Name of asset. Often referred to as tag. + parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. + parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. + asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. + labels (LabelFilter | None): Return only the assets matching the specified label filter. + geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. + metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. + source (str | None): The source of this asset. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root (bool | None): filtered assets are root assets or not. + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. + partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit (int | None): Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Returns: + AssetList: List of requested assets + + .. note:: + When using `partitions`, there are few considerations to keep in mind: + * `limit` has to be set to `None` (or `-1`). + * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status. + * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed. + + Examples: + + List assets: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> asset_list = client.assets.list(limit=5) + + Iterate over assets, one-by-one: + + >>> for asset in client.assets(): + ... asset # do something with the asset + + Iterate over chunks of assets to reduce memory load: + + >>> for asset_list in client.assets(chunk_size=2500): + ... asset_list # do something with the assets + + Filter assets based on labels: + + >>> from cognite.client.data_classes import LabelFilter + >>> my_label_filter = LabelFilter(contains_all=["PUMP", "VERIFIED"]) + >>> asset_list = client.assets.list(labels=my_label_filter) + + Using advanced filter, find all assets that have a metadata key 'timezone' starting with 'Europe', + and sort by external id ascending: + + >>> from cognite.client.data_classes import filters + >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe") + >>> res = client.assets.list(advanced_filter=in_timezone, sort=("external_id", "asc")) + + Note that you can check the API documentation above to see which properties you can filter on + with which filters. + + To make it easier to avoid spelling mistakes and easier to look up available properties + for filtering and sorting, you can also use the `AssetProperty` and `SortableAssetProperty` Enums. + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.assets import AssetProperty, SortableAssetProperty + >>> in_timezone = filters.Prefix(AssetProperty.metadata_key("timezone"), "Europe") + >>> res = client.assets.list( + ... advanced_filter=in_timezone, + ... sort=(SortableAssetProperty.external_id, "asc")) + + Combine filter and advanced filter: + + >>> from cognite.client.data_classes import filters + >>> not_instrument_lvl5 = filters.And( + ... filters.ContainsAny("labels", ["Level5"]), + ... filters.Not(filters.ContainsAny("labels", ["Instrument"])) + ... ) + >>> res = client.assets.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5) + """ + return run_sync( + self.__async_client.assets.list( + name=name, + parent_ids=parent_ids, + parent_external_ids=parent_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + labels=labels, + geo_location=geo_location, + metadata=metadata, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + root=root, + external_id_prefix=external_id_prefix, + aggregated_properties=aggregated_properties, + partitions=partitions, + limit=limit, + advanced_filter=advanced_filter, + sort=sort, + ) + ) diff --git a/cognite/client/_sync_api/data_modeling/__init__.py b/cognite/client/_sync_api/data_modeling/__init__.py new file mode 100644 index 0000000000..41eb939a72 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/__init__.py @@ -0,0 +1,37 @@ +""" +=============================================================================== +1fe95c1878f11bc0bee617a86e1dc4a4 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.data_modeling.containers import SyncContainersAPI +from cognite.client._sync_api.data_modeling.data_models import SyncDataModelsAPI +from cognite.client._sync_api.data_modeling.graphql import SyncDataModelingGraphQLAPI +from cognite.client._sync_api.data_modeling.instances import SyncInstancesAPI +from cognite.client._sync_api.data_modeling.spaces import SyncSpacesAPI +from cognite.client._sync_api.data_modeling.statistics import SyncStatisticsAPI +from cognite.client._sync_api.data_modeling.views import SyncViewsAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDataModelingAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.containers = SyncContainersAPI(async_client) + self.data_models = SyncDataModelsAPI(async_client) + self.spaces = SyncSpacesAPI(async_client) + self.views = SyncViewsAPI(async_client) + self.instances = SyncInstancesAPI(async_client) + self.graphql = SyncDataModelingGraphQLAPI(async_client) + self.statistics = SyncStatisticsAPI(async_client) diff --git a/cognite/client/_sync_api/data_modeling/containers.py b/cognite/client/_sync_api/data_modeling/containers.py new file mode 100644 index 0000000000..7ca9098574 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/containers.py @@ -0,0 +1,328 @@ +""" +=============================================================================== +8a40bb13bf895e182628f40657186557 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.containers import ( + Container, + ContainerApply, + ContainerList, +) +from cognite.client.data_classes.data_modeling.ids import ( + ConstraintIdentifier, + ContainerId, + ContainerIdentifier, + IndexIdentifier, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncContainersAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Container]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[ContainerList]: ... + + def __call__( + self, + chunk_size: int | None = None, + space: str | None = None, + include_global: bool = False, + limit: int | None = None, + ) -> Iterator[Container | ContainerList]: + """ + Iterate over containers + + Fetches containers as they are iterated over, so you keep a limited number of containers in memory. + + Args: + chunk_size (int | None): Number of containers to return in each chunk. Defaults to yielding one container a time. + space (str | None): The space to query. + include_global (bool): Whether the global containers should be returned. + limit (int | None): Maximum number of containers to return. Defaults to returning all items. + + Yields: + Container | ContainerList: yields Container one by one if chunk_size is not specified, else ContainerList objects. + """ + yield from SyncIterator( + self.__async_client.data_modeling.containers( + chunk_size=chunk_size, space=space, include_global=include_global, limit=limit + ) + ) + + @overload + def retrieve(self, ids: ContainerIdentifier) -> Container | None: ... + + @overload + def retrieve(self, ids: Sequence[ContainerIdentifier]) -> ContainerList: ... + + def retrieve(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> Container | ContainerList | None: + """ + `Retrieve one or more container by id(s). `_ + + Args: + ids (ContainerIdentifier | Sequence[ContainerIdentifier]): Identifier for container(s). + + Returns: + Container | ContainerList | None: Requested container or None if it does not exist. + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.containers.retrieve(('mySpace', 'myContainer')) + + Fetch using the ContainerId: + + >>> from cognite.client.data_classes.data_modeling import ContainerId + >>> res = client.data_modeling.containers.retrieve( + ... ContainerId(space='mySpace', external_id='myContainer')) + """ + return run_sync(self.__async_client.data_modeling.containers.retrieve(ids=ids)) + + def delete(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> list[ContainerId]: + """ + `Delete one or more containers `_ + + Args: + ids (ContainerIdentifier | Sequence[ContainerIdentifier]): The container identifier(s). + Returns: + list[ContainerId]: The container(s) which has been deleted. Empty list if nothing was deleted. + Examples: + + Delete containers by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.containers.delete(("mySpace", "myContainer")) + """ + return run_sync(self.__async_client.data_modeling.containers.delete(ids=ids)) + + def delete_constraints(self, ids: Sequence[ConstraintIdentifier]) -> list[ConstraintIdentifier]: + """ + `Delete one or more constraints `_ + + Args: + ids (Sequence[ConstraintIdentifier]): The constraint identifier(s). + Returns: + list[ConstraintIdentifier]: The constraints(s) which have been deleted. + Examples: + + Delete constraints by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.containers.delete_constraints( + ... [(ContainerId("mySpace", "myContainer"), "myConstraint")] + ... ) + """ + return run_sync(self.__async_client.data_modeling.containers.delete_constraints(ids=ids)) + + def delete_indexes(self, ids: Sequence[IndexIdentifier]) -> list[IndexIdentifier]: + """ + `Delete one or more indexes `_ + + Args: + ids (Sequence[IndexIdentifier]): The index identifier(s). + Returns: + list[IndexIdentifier]: The indexes(s) which has been deleted. + Examples: + + Delete indexes by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.containers.delete_indexes( + ... [(ContainerId("mySpace", "myContainer"), "myIndex")] + ... ) + """ + return run_sync(self.__async_client.data_modeling.containers.delete_indexes(ids=ids)) + + def list( + self, + space: str | None = None, + limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ, + include_global: bool = False, + ) -> ContainerList: + """ + `List containers `_ + + Args: + space (str | None): The space to query + limit (int | None): Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global (bool): Whether the global containers should be returned. + + Returns: + ContainerList: List of requested containers + + Examples: + + List containers and limit to 5: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> container_list = client.data_modeling.containers.list(limit=5) + + Iterate over containers, one-by-one: + + >>> for container in client.data_modeling.containers(): + ... container # do something with the container + + Iterate over chunks of containers to reduce memory load: + + >>> for container_list in client.data_modeling.containers(chunk_size=10): + ... container_list # do something with the containers + """ + return run_sync( + self.__async_client.data_modeling.containers.list(space=space, limit=limit, include_global=include_global) + ) + + @overload + def apply(self, container: Sequence[ContainerApply]) -> ContainerList: ... + + @overload + def apply(self, container: ContainerApply) -> Container: ... + + def apply(self, container: ContainerApply | Sequence[ContainerApply]) -> Container | ContainerList: + """ + `Add or update (upsert) containers. `_ + + Args: + container (ContainerApply | Sequence[ContainerApply]): Container(s) to create or update. + + Returns: + Container | ContainerList: Created container(s) + + Examples: + + Create a new container: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import ( + ... ContainerApply, ContainerProperty, Text, Float64) + >>> client = CogniteClient() + >>> container = ContainerApply( + ... space="mySpace", + ... external_id="myContainer", + ... properties={ + ... "name": ContainerProperty(type=Text, name="name"), + ... "numbers": ContainerProperty( + ... type=Float64(is_list=True, max_list_size=200), + ... description="very important numbers", + ... ), + ... }, + ... ), + >>> res = client.data_modeling.containers.apply(container) + + Create new container with unit-aware properties: + + >>> from cognite.client.data_classes.data_modeling import Float64 + >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference + >>> container = ContainerApply( + ... space="mySpace", + ... external_id="myContainer", + ... properties={ + ... "maxPressure": ContainerProperty( + ... nullable=True, + ... description="Maximum Pump Pressure", + ... name="maxPressure", + ... type=Float64( + ... unit=UnitReference( + ... external_id="pressure:bar", + ... source_unit="BAR" + ... ) + ... ) + ... ), + ... "rotationConfigurations": ContainerProperty( + ... nullable=True, + ... description="Rotation Configurations", + ... name="rotationConfigurations", + ... type=Float64( + ... is_list=True, + ... unit=UnitReference( + ... external_id="angular_velocity:rev-per-min" + ... ) + ... ) + ... ) + ... } + ... ) + >>> res = client.data_modeling.containers.apply(container) + + Example container with all available properties (for illustration purposes). Note that + ``ContainerProperty`` has several options not shown here, like ``name``, ``description``, + ``nullable``, ``auto_increment``, ``default_value`` and ``immutable`` that may be specified, + depending on the choice of property type (e.g. ``auto_increment`` only works with integer types). + + >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference, EnumValue + >>> from cognite.client.data_classes.data_modeling.data_types import ( + ... Boolean, Date, DirectRelation, Enum, FileReference, Float32, Float64, + ... Int32, Int64, Json, SequenceReference, Text, TimeSeriesReference, Timestamp + ... ) + >>> container_properties = { + ... "prop01": ContainerProperty(Boolean), + ... "prop02": ContainerProperty(Boolean(is_list=True)), + ... "prop03": ContainerProperty(Date), + ... "prop04": ContainerProperty(Date(is_list=True)), + ... "prop05": ContainerProperty(Timestamp), + ... "prop06": ContainerProperty(Timestamp(is_list=True)), + ... "prop07": ContainerProperty(Text), + ... "prop08": ContainerProperty(Text(is_list=True)), + ... # Note: DirectRelation(list) support `container`: The (optional) required type for the node + ... # the direct relation points to. + ... "prop09": ContainerProperty(DirectRelation), + ... "prop10": ContainerProperty(DirectRelation(is_list=True)), + ... # Note: Enum also support `unknown_value`: The value to use when the enum value is unknown. + ... "prop11": ContainerProperty( + ... Enum({"Closed": EnumValue("Valve is closed"), + ... "Opened": EnumValue("Valve is opened")})), + ... # Note: Floats support unit references, e.g. `unit=UnitReference("pressure:bar")`: + ... "prop12": ContainerProperty(Float32), + ... "prop13": ContainerProperty(Float32(is_list=True)), + ... "prop14": ContainerProperty(Float64), + ... "prop15": ContainerProperty(Float64(is_list=True)), + ... "prop16": ContainerProperty(Int32), + ... "prop17": ContainerProperty(Int32(is_list=True)), + ... "prop18": ContainerProperty(Int64), + ... "prop19": ContainerProperty(Int64(is_list=True)), + ... "prop20": ContainerProperty(Json), + ... "prop21": ContainerProperty(Json(is_list=True)), + ... "prop22": ContainerProperty(SequenceReference), + ... "prop23": ContainerProperty(SequenceReference(is_list=True)), + ... # Note: It is adviced to represent files and time series directly as nodes + ... # instead of referencing existing: + ... "prop24": ContainerProperty(FileReference), + ... "prop25": ContainerProperty(FileReference(is_list=True)), + ... "prop26": ContainerProperty(TimeSeriesReference), + ... "prop27": ContainerProperty(TimeSeriesReference(is_list=True)), + ... } + >>> container = ContainerApply( + ... space="my-space", + ... external_id="my-everything-container", + ... properties=container_properties, + ... ) + """ + return run_sync(self.__async_client.data_modeling.containers.apply(container=container)) diff --git a/cognite/client/_sync_api/data_modeling/data_models.py b/cognite/client/_sync_api/data_modeling/data_models.py new file mode 100644 index 0000000000..c0c649f5c3 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/data_models.py @@ -0,0 +1,226 @@ +""" +=============================================================================== +d6b0080645719a3270447e9f773c8ff2 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.data_models import ( + DataModel, + DataModelApply, + DataModelList, +) +from cognite.client.data_classes.data_modeling.ids import DataModelId, DataModelIdentifier, ViewId +from cognite.client.data_classes.data_modeling.views import View +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDataModelsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[DataModel]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[DataModelList]: ... + + def __call__( + self, + chunk_size: int | None = None, + limit: int | None = None, + space: str | None = None, + inline_views: bool = False, + all_versions: bool = False, + include_global: bool = False, + ) -> Iterator[DataModel | DataModelList]: + """ + Iterate over data model + + Fetches data model as they are iterated over, so you keep a limited number of data model in memory. + + Args: + chunk_size (int | None): Number of data model to return in each chunk. Defaults to yielding one data_model a time. + limit (int | None): Maximum number of data model to return. Defaults to returning all items. + space (str | None): The space to query. + inline_views (bool): Whether to expand the referenced views inline in the returned result. + all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global (bool): Whether to include global views. + + Yields: + DataModel | DataModelList: yields DataModel one by one if chunk_size is not specified, else DataModelList objects. + """ + yield from SyncIterator( + self.__async_client.data_modeling.data_models( + chunk_size=chunk_size, + limit=limit, + space=space, + inline_views=inline_views, + all_versions=all_versions, + include_global=include_global, + ) + ) + + @overload + def retrieve( + self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: Literal[True] + ) -> DataModelList[View]: ... + + @overload + def retrieve( + self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: Literal[False] = False + ) -> DataModelList[ViewId]: ... + + def retrieve( + self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: bool = False + ) -> DataModelList[ViewId] | DataModelList[View]: + """ + `Retrieve data_model(s) by id(s). `_ + + Args: + ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). + inline_views (bool): Whether to expand the referenced views inline in the returned result. + + Returns: + DataModelList[ViewId] | DataModelList[View]: Requested data model(s) or empty if none exist. + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.data_models.retrieve(("mySpace", "myDataModel", "v1")) + """ + return run_sync(self.__async_client.data_modeling.data_models.retrieve(ids=ids, inline_views=inline_views)) + + def delete(self, ids: DataModelIdentifier | Sequence[DataModelIdentifier]) -> list[DataModelId]: + """ + `Delete one or more data model `_ + + Args: + ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). + Returns: + list[DataModelId]: The data_model(s) which has been deleted. None if nothing was deleted. + Examples: + + Delete data model by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.data_models.delete(("mySpace", "myDataModel", "v1")) + """ + return run_sync(self.__async_client.data_modeling.data_models.delete(ids=ids)) + + @overload + def list( + self, + inline_views: Literal[True], + limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ, + space: str | None = None, + all_versions: bool = False, + include_global: bool = False, + ) -> DataModelList[View]: ... + + @overload + def list( + self, + inline_views: Literal[False] = False, + limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ, + space: str | None = None, + all_versions: bool = False, + include_global: bool = False, + ) -> DataModelList[ViewId]: ... + + def list( + self, + inline_views: bool = False, + limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ, + space: str | None = None, + all_versions: bool = False, + include_global: bool = False, + ) -> DataModelList[View] | DataModelList[ViewId]: + """ + `List data models `_ + + Args: + inline_views (bool): Whether to expand the referenced views inline in the returned result. + limit (int | None): Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space (str | None): The space to query. + all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global (bool): Whether to include global data models. + + Returns: + DataModelList[View] | DataModelList[ViewId]: List of requested data models + + Examples: + + List 5 data model: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> data_model_list = client.data_modeling.data_models.list(limit=5) + + Iterate over data model, one-by-one: + + >>> for data_model in client.data_modeling.data_models(): + ... data_model # do something with the data model + + Iterate over chunks of data model to reduce memory load: + + >>> for data_model_list in client.data_modeling.data_models(chunk_size=10): + ... data_model_list # do something with the data model + """ + return run_sync( + self.__async_client.data_modeling.data_models.list( + inline_views=inline_views, + limit=limit, + space=space, + all_versions=all_versions, + include_global=include_global, + ) + ) + + @overload + def apply(self, data_model: Sequence[DataModelApply]) -> DataModelList: ... + + @overload + def apply(self, data_model: DataModelApply) -> DataModel: ... + + def apply(self, data_model: DataModelApply | Sequence[DataModelApply]) -> DataModel | DataModelList: + """ + `Create or update one or more data model. `_ + + Args: + data_model (DataModelApply | Sequence[DataModelApply]): Data model(s) to create or update (upsert). + + Returns: + DataModel | DataModelList: Created data model(s) + + Examples: + + Create new data model: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import DataModelApply, ViewId + >>> client = CogniteClient() + >>> data_models = [ + ... DataModelApply(space="mySpace",external_id="myDataModel",version="v1",views=[ViewId("mySpace","myView","v1")]), + ... DataModelApply(space="mySpace",external_id="myOtherDataModel",version="v1",views=[ViewId("mySpace","myView","v1")])] + >>> res = client.data_modeling.data_models.apply(data_models) + """ + return run_sync(self.__async_client.data_modeling.data_models.apply(data_model=data_model)) diff --git a/cognite/client/_sync_api/data_modeling/graphql.py b/cognite/client/_sync_api/data_modeling/graphql.py new file mode 100644 index 0000000000..69bd7a84e6 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/graphql.py @@ -0,0 +1,90 @@ +""" +=============================================================================== +f825b382430ead59cfa24b671298f05b +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import Any + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling import DataModelIdentifier +from cognite.client.data_classes.data_modeling.graphql import DMLApplyResult +from cognite.client.utils._async_helpers import run_sync + + +class SyncDataModelingGraphQLAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def apply_dml( + self, + id: DataModelIdentifier, + dml: str, + name: str | None = None, + description: str | None = None, + previous_version: str | None = None, + ) -> DMLApplyResult: + """ + Apply the DML for a given data model. + + Args: + id (DataModelIdentifier): The data model to apply DML to. + dml (str): The DML to apply. + name (str | None): The name of the data model. + description (str | None): The description of the data model. + previous_version (str | None): The previous version of the data model. Specify to reuse view versions from previous data model version. + + Returns: + DMLApplyResult: The id of the updated data model. + + Examples: + + Apply DML: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.graphql.apply_dml( + ... id=("mySpaceExternalId", "myModelExternalId", "1"), + ... dml="type MyType { id: String! }", + ... name="My model name", + ... description="My model description" + ... ) + """ + return run_sync( + self.__async_client.data_modeling.graphql.apply_dml( + id=id, dml=dml, name=name, description=description, previous_version=previous_version + ) + ) + + def query(self, id: DataModelIdentifier, query: str, variables: dict[str, Any] | None = None) -> dict[str, Any]: + """ + Execute a GraphQl query against a given data model. + + Args: + id (DataModelIdentifier): The data model to query. + query (str): The query to issue. + variables (dict[str, Any] | None): An optional dict of variables to pass to the query. + + Returns: + dict[str, Any]: The query result + + Examples: + + Execute a graphql query against a given data model: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.graphql.query( + ... id=("mySpace", "myDataModel", "v1"), + ... query="listThings { items { thingProperty } }", + ... ) + """ + return run_sync(self.__async_client.data_modeling.graphql.query(id=id, query=query, variables=variables)) diff --git a/cognite/client/_sync_api/data_modeling/instances.py b/cognite/client/_sync_api/data_modeling/instances.py new file mode 100644 index 0000000000..674e73b999 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/instances.py @@ -0,0 +1,1250 @@ +""" +=============================================================================== +60db155d3f4c97368b986049d3a1b66f +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Awaitable, Callable, Iterator, Sequence +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.data_modeling.instances import Source +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.aggregations import ( + AggregatedNumberedValue, + Histogram, + HistogramValue, + MetricAggregation, +) +from cognite.client.data_classes.data_modeling.ids import EdgeId, NodeId, ViewId +from cognite.client.data_classes.data_modeling.instances import ( + Edge, + EdgeApply, + EdgeList, + InstanceAggregationResultList, + InstanceInspectResults, + InstancesApplyResult, + InstancesDeleteResult, + InstanceSort, + InstancesResult, + InvolvedContainers, + InvolvedViews, + Node, + NodeApply, + NodeList, + T_Edge, + T_Node, + TargetUnit, +) +from cognite.client.data_classes.data_modeling.query import ( + Query, + QueryResult, +) +from cognite.client.data_classes.data_modeling.sync import SubscriptionContext +from cognite.client.data_classes.filters import Filter +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient +from cognite.client.data_classes.data_modeling.debug import DebugParameters + + +class SyncInstancesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None, instance_type: Literal["node"] = "node") -> Iterator[Node]: ... + + @overload + def __call__(self, chunk_size: None, instance_type: Literal["edge"]) -> Iterator[Edge]: ... + + @overload + def __call__(self, chunk_size: int, instance_type: Literal["node"] = "node") -> Iterator[NodeList]: ... + + @overload + def __call__(self, chunk_size: int, instance_type: Literal["edge"]) -> Iterator[EdgeList]: ... + + def __call__( + self, + chunk_size: int | None = None, + instance_type: Literal["node", "edge"] = "node", + limit: int | None = None, + include_typing: bool = False, + sources: Source | Sequence[Source] | None = None, + space: str | SequenceNotStr[str] | None = None, + sort: list[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> Iterator[Edge | EdgeList | Node | NodeList]: + """ + Iterate over nodes or edges. + Fetches instances as they are iterated over, so you keep a limited number of instances in memory. + + Args: + chunk_size (int | None): Number of data_models to return in each chunk. Defaults to yielding one instance at a time. + instance_type (Literal['node', 'edge']): Whether to query for nodes or edges. + limit (int | None): Maximum number of instances to return. Defaults to returning all items. + include_typing (bool): Whether to return property type information as part of the result. + sources (Source | Sequence[Source] | None): Views to retrieve properties from. + space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). + sort (list[InstanceSort | dict] | InstanceSort | dict | None): Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index. + filter (Filter | dict[str, Any] | None): Advanced filtering of instances. + debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + + Yields: + Edge | EdgeList | Node | NodeList: yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects. + """ + yield from SyncIterator( + self.__async_client.data_modeling.instances( + chunk_size=chunk_size, + instance_type=instance_type, + limit=limit, + include_typing=include_typing, + sources=sources, + space=space, + sort=sort, + filter=filter, + debug=debug, + ) + ) + + @overload + def retrieve_edges(self, edges: EdgeId | tuple[str, str], *, edge_cls: type[T_Edge]) -> T_Edge | None: ... + + @overload + def retrieve_edges( + self, + edges: EdgeId | tuple[str, str], + *, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> Edge | None: ... + + @overload + def retrieve_edges( + self, edges: Sequence[EdgeId] | Sequence[tuple[str, str]], *, edge_cls: type[T_Edge] + ) -> EdgeList[T_Edge]: ... + + @overload + def retrieve_edges( + self, + edges: Sequence[EdgeId] | Sequence[tuple[str, str]], + *, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> EdgeList[Edge]: ... + + def retrieve_edges( + self, + edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]], + edge_cls: type[T_Edge] = Edge, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> EdgeList[T_Edge] | T_Edge | Edge | None: + """ + `Retrieve one or more edges by id(s). `_ + + Note: + This method should be used for retrieving edges with a custom edge class. You can use it + without providing a custom edge class, but in that case, the retrieved edges will be of the + built-in Edge class. + + + Args: + edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]]): Edge id(s) to retrieve. + edge_cls (type[T_Edge]): The custom edge class to use, the retrieved edges will automatically be serialized into this class. + sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class. + include_typing (bool): Whether to include typing information + + Returns: + EdgeList[T_Edge] | T_Edge | Edge | None: The requested edges. + + Examples: + + Retrieve edges using a custom typed class "Flow". Any property that you want to look up by a different attribute name, + e.g. you want `my_edge.flow_rate` to return the data for property `flowRate`, must use the PropertyOptions as shown below. + We strongly suggest you use snake_cased attribute names, as is done here: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import EdgeId, TypedEdge, PropertyOptions, DirectRelationReference, ViewId + >>> class Flow(TypedEdge): + ... flow_rate = PropertyOptions(identifier="flowRate") + ... + ... def __init__( + ... self, + ... space: str, + ... external_id: str, + ... version: int, + ... type: DirectRelationReference, + ... last_updated_time: int, + ... created_time: int, + ... flow_rate: float, + ... start_node: DirectRelationReference, + ... end_node: DirectRelationReference, + ... deleted_time: int | None = None, + ... ) -> None: + ... super().__init__( + ... space, external_id, version, type, last_updated_time, created_time, start_node, end_node, deleted_time + ... ) + ... self.flow_rate = flow_rate + ... + ... @classmethod + ... def get_source(cls) -> ViewId: + ... return ViewId("sp_model_space", "flow", "1") + ... + >>> client = CogniteClient() + >>> res = client.data_modeling.instances.retrieve_edges( + ... EdgeId("mySpace", "theFlow"), edge_cls=Flow + ... ) + >>> isinstance(res, Flow) + """ + return run_sync( + self.__async_client.data_modeling.instances.retrieve_edges( + edges=edges, edge_cls=edge_cls, sources=sources, include_typing=include_typing + ) + ) + + @overload + def retrieve_nodes(self, nodes: NodeId | tuple[str, str], *, node_cls: type[T_Node]) -> T_Node | None: ... + + @overload + def retrieve_nodes( + self, + nodes: NodeId | tuple[str, str], + *, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> Node | None: ... + + @overload + def retrieve_nodes( + self, nodes: Sequence[NodeId] | Sequence[tuple[str, str]], *, node_cls: type[T_Node] + ) -> NodeList[T_Node]: ... + + @overload + def retrieve_nodes( + self, + nodes: Sequence[NodeId] | Sequence[tuple[str, str]], + *, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> NodeList[Node]: ... + + def retrieve_nodes( + self, + nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]], + node_cls: type[T_Node] = Node, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> NodeList[T_Node] | T_Node | Node | None: + """ + `Retrieve one or more nodes by id(s). `_ + + Note: + This method should be used for retrieving nodes with a custom node class. You can use it + without providing a custom node class, but in that case, the retrieved nodes will be of the + built-in Node class. + + Args: + nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]]): Node id(s) to retrieve. + node_cls (type[T_Node]): The custom node class to use, the retrieved nodes will automatically be serialized to this class. + sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class. + include_typing (bool): Whether to include typing information + + Returns: + NodeList[T_Node] | T_Node | Node | None: The requested edges. + + Examples: + + Retrieve nodes using a custom typed node class "Person". Any property that you want to look up by a different attribute name, + e.g. you want `my_node.birth_year` to return the data for property `birthYear`, must use the PropertyOptions as shown below. + We strongly suggest you use snake_cased attribute names, as is done here: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import NodeId, TypedNode, PropertyOptions, DirectRelationReference, ViewId + >>> class Person(TypedNode): + ... birth_year = PropertyOptions(identifier="birthYear") + ... + ... def __init__( + ... self, + ... space: str, + ... external_id: str, + ... version: int, + ... last_updated_time: int, + ... created_time: int, + ... name: str, + ... birth_year: int | None = None, + ... type: DirectRelationReference | None = None, + ... deleted_time: int | None = None, + ... ): + ... super().__init__( + ... space=space, + ... external_id=external_id, + ... version=version, + ... last_updated_time=last_updated_time, + ... created_time=created_time, + ... type=type, + ... deleted_time=deleted_time + ... ) + ... self.name = name + ... self.birth_year = birth_year + ... + ... @classmethod + ... def get_source(cls) -> ViewId: + ... return ViewId("myModelSpace", "Person", "1") + ... + >>> client = CogniteClient() + >>> res = client.data_modeling.instances.retrieve_nodes( + ... NodeId("myDataSpace", "myPerson"), node_cls=Person + ... ) + >>> isinstance(res, Person) + """ + return run_sync( + self.__async_client.data_modeling.instances.retrieve_nodes( + nodes=nodes, node_cls=node_cls, sources=sources, include_typing=include_typing + ) + ) + + def retrieve( + self, + nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + sources: Source | Sequence[Source] | None = None, + include_typing: bool = False, + ) -> InstancesResult[Node, Edge]: + """ + `Retrieve one or more instance by id(s). `_ + + Args: + nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids + edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids + sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. + include_typing (bool): Whether to return property type information as part of the result. + + Returns: + InstancesResult[Node, Edge]: Requested instances. + + Examples: + + Retrieve instances by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.instances.retrieve( + ... nodes=("mySpace", "myNodeExternalId"), + ... edges=("mySpace", "myEdgeExternalId"), + ... sources=("mySpace", "myViewExternalId", "myViewVersion")) + + Retrieve nodes an edges using the built in data class + + >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId, ViewId + >>> res = client.data_modeling.instances.retrieve( + ... NodeId("mySpace", "myNode"), + ... EdgeId("mySpace", "myEdge"), + ... ViewId("mySpace", "myViewExternalId", "myViewVersion")) + + Retrieve nodes an edges using the the view object as source + + >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId + >>> res = client.data_modeling.instances.retrieve( + ... NodeId("mySpace", "myNode"), + ... EdgeId("mySpace", "myEdge"), + ... sources=("myspace", "myView")) + """ + return run_sync( + self.__async_client.data_modeling.instances.retrieve( + nodes=nodes, edges=edges, sources=sources, include_typing=include_typing + ) + ) + + def delete( + self, + nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + ) -> InstancesDeleteResult: + """ + `Delete one or more instances `_ + + Args: + nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids + edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids + + Returns: + InstancesDeleteResult: The instance ID(s) that was deleted. Empty list if nothing was deleted. + + Examples: + + Delete instances by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.instances.delete(nodes=("mySpace", "myNode")) + + Delete nodes and edges using the built in data class + + >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId + >>> client.data_modeling.instances.delete(NodeId('mySpace', 'myNode'), EdgeId('mySpace', 'myEdge')) + + Delete all nodes from a NodeList + + >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId + >>> my_view = client.data_modeling.views.retrieve(('mySpace', 'myView')) + >>> my_nodes = client.data_modeling.instances.list(instance_type='node', sources=my_view, limit=None) + >>> client.data_modeling.instances.delete(nodes=my_nodes.as_ids()) + """ + return run_sync(self.__async_client.data_modeling.instances.delete(nodes=nodes, edges=edges)) + + def inspect( + self, + nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None, + *, + involved_views: InvolvedViews | None = None, + involved_containers: InvolvedContainers | None = None, + ) -> InstanceInspectResults: + """ + `Reverse lookup for instances. `_ + + This method will return the involved views and containers for the given nodes and edges. + + Args: + nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node IDs. + edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge IDs. + involved_views (InvolvedViews | None): Whether to include involved views. Must pass at least one of involved_views or involved_containers. + involved_containers (InvolvedContainers | None): Whether to include involved containers. Must pass at least one of involved_views or involved_containers. + + Returns: + InstanceInspectResults: List of instance inspection results. + + Examples: + + Look up the involved views for a given node and edge: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId, InvolvedViews + >>> client = CogniteClient() + >>> res = client.data_modeling.instances.inspect( + ... nodes=NodeId("my-space", "foo1"), + ... edges=EdgeId("my-space", "bar2"), + ... involved_views=InvolvedViews(all_versions=False), + ... ) + + Look up the involved containers: + + >>> from cognite.client.data_classes.data_modeling import InvolvedContainers + >>> res = client.data_modeling.instances.inspect( + ... nodes=[("my-space", "foo1"), ("my-space", "foo2")], + ... involved_containers=InvolvedContainers(), + ... ) + """ + return run_sync( + self.__async_client.data_modeling.instances.inspect( + nodes=nodes, edges=edges, involved_views=involved_views, involved_containers=involved_containers + ) + ) + + def subscribe( + self, + query: Query, + callback: Callable[[QueryResult], None | Awaitable[None]], + poll_delay_seconds: float = 30, + throttle_seconds: float = 1, + ) -> SubscriptionContext: + """ + Subscribe to a query and get updates when the result set changes. This runs the sync() method in a background task. + We do not support chaining result sets when subscribing to a query. + + Tip: + For a practical guide on using this method to create a live local replica of your data, + see :ref:`this example of syncing instances to a local SQLite database `. + + Args: + query (Query): The query to subscribe to. + callback (Callable[[QueryResult], None | Awaitable[None]]): The callback function to call when the result set changes. Can be a regular or async function. + poll_delay_seconds (float): The time to wait between polls when no data is present. Defaults to 30 seconds. + throttle_seconds (float): The time to wait between polls despite data being present. + + Returns: + SubscriptionContext: An object that can be used to inspect and cancel the subscription. + + Examples: + + Subscribe to a given query and process the results in your own callback function + (here we just print the result for illustration): + + >>> from cognite.client import AsyncCogniteClient + >>> from cognite.client.data_classes.data_modeling.query import ( + ... Query, QueryResult, NodeResultSetExpression, Select, SourceSelector) + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> from cognite.client.data_classes.filters import Equals + >>> + >>> client = AsyncCogniteClient() + >>> def just_print_the_result(result: QueryResult) -> None: + >>> print(result) + >>> + >>> view_id = ViewId("someSpace", "someView", "v1") + >>> filter = Equals(view_id.as_property_ref("myAsset"), "Il-Tempo-Gigante") + >>> query = Query( + >>> with_={"work_orders": NodeResultSetExpression(filter=filter)}, + >>> select={"work_orders": Select([SourceSelector(view_id, ["*"])])} + >>> ) + >>> subscription_context = await client.data_modeling.instances.subscribe( + ... query, callback=just_print_the_result + ... ) + >>> # Use the returned subscription_context to manage the subscription, e.g. to cancel it: + >>> subscription_context.cancel() + """ + return run_sync( + self.__async_client.data_modeling.instances.subscribe( + query=query, callback=callback, poll_delay_seconds=poll_delay_seconds, throttle_seconds=throttle_seconds + ) + ) + + def apply( + self, + nodes: NodeApply | Sequence[NodeApply] | None = None, + edges: EdgeApply | Sequence[EdgeApply] | None = None, + auto_create_start_nodes: bool = False, + auto_create_end_nodes: bool = False, + auto_create_direct_relations: bool = True, + skip_on_version_conflict: bool = False, + replace: bool = False, + ) -> InstancesApplyResult: + """ + `Add or update (upsert) instances. `_ + + Args: + nodes (NodeApply | Sequence[NodeApply] | None): Nodes to apply + edges (EdgeApply | Sequence[EdgeApply] | None): Edges to apply + auto_create_start_nodes (bool): Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested. + auto_create_end_nodes (bool): Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested. + auto_create_direct_relations (bool): Whether to create missing direct relation targets when ingesting. + skip_on_version_conflict (bool): If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly. + replace (bool): How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call. + Returns: + InstancesApplyResult: Created instance(s) + + Examples: + + Create new node without data: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import EdgeApply, NodeOrEdgeData, NodeApply + >>> client = CogniteClient() + >>> node = NodeApply("mySpace", "myNodeId") + >>> res = client.data_modeling.instances.apply(node) + + Create two nodes with data with a one-to-many edge + + >>> from cognite.client.data_classes.data_modeling import ContainerId, EdgeApply, NodeOrEdgeData, NodeApply, ViewId + >>> work_order = NodeApply( + ... space="industrial", + ... external_id="work_order:123", + ... sources=[ + ... # Insert data through a view + ... NodeOrEdgeData( + ... ViewId("mySpace", "WorkOrderView", "v1"), + ... {"title": "Repair pump", "createdYear": 2023} + ... ) + ... ] + ... ) + >>> pump = NodeApply( + ... space="industrial", + ... external_id="pump:456", + ... sources=[ + ... # Insert data directly to the container + ... NodeOrEdgeData( + ... ContainerId("mySpace", "PumpContainer"), + ... {"name": "Pump 456", "location": "Subsea"} + ... ) + ... ] + ... ) + ... # This is one-to-many edge, in this case from a work order to a pump + >>> work_order_to_pump = EdgeApply( + ... space="industrial", + ... external_id="relation:work_order:123:pump:456", + ... type=("industrial", "relates-to"), + ... start_node=("industrial", "work_order:123"), + ... end_node=("industrial", "pump:456"), + ... ) + >>> res = client.data_modeling.instances.apply([work_order, pump], [work_order_to_pump]) + + Create new edge and automatically create end nodes. + + >>> from cognite.client.data_classes.data_modeling import EdgeApply + >>> work_order_to_pump = EdgeApply( + ... space="industrial", + ... external_id="relation:work_order:123:pump:456", + ... type=("industrial", "relates-to"), + ... start_node=("industrial", "work_order:123"), + ... end_node=("industrial", "pump:456"), + ... ) + >>> res = client.data_modeling.instances.apply( + ... edges=work_order_to_pump, + ... auto_create_start_nodes=True, + ... auto_create_end_nodes=True + ... ) + + Using helper function to create valid graphql timestamp for a datetime object: + + >>> from cognite.client.utils import datetime_to_ms_iso_timestamp + >>> from datetime import datetime, timezone + >>> my_date = datetime(2020, 3, 14, 15, 9, 26, 535000, tzinfo=timezone.utc) + >>> data_model_timestamp = datetime_to_ms_iso_timestamp(my_date) # "2020-03-14T15:09:26.535+00:00" + + Create a typed node apply. Any property that you want to look up by a different attribute name, e.g. you want + `my_node.birth_year` to return the data for property `birthYear`, must use the PropertyOptions as shown below. + We strongly suggest you use snake_cased attribute names, as is done here: + + >>> from cognite.client.data_classes.data_modeling import TypedNodeApply, PropertyOptions + >>> class PersonApply(TypedNodeApply): + ... birth_year = PropertyOptions(identifier="birthYear") + ... + ... def __init__(self, space: str, external_id, name: str, birth_year: int): + ... super().__init__(space, external_id, type=("sp_model_space", "Person")) + ... self.name = name + ... self.birth_year = birth_year + ... def get_source(self): + ... return ViewId("sp_model_space", "Person", "v1") + ... + >>> person = PersonApply("sp_date_space", "my_person", "John Doe", 1980) + >>> res = client.data_modeling.instances.apply(nodes=person) + """ + return run_sync( + self.__async_client.data_modeling.instances.apply( + nodes=nodes, + edges=edges, + auto_create_start_nodes=auto_create_start_nodes, + auto_create_end_nodes=auto_create_end_nodes, + auto_create_direct_relations=auto_create_direct_relations, + skip_on_version_conflict=skip_on_version_conflict, + replace=replace, + ) + ) + + @overload + def search( + self, + view: ViewId, + query: str | None = None, + *, + instance_type: Literal["node"] = "node", + properties: list[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + include_typing: bool = False, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + operator: Literal["AND", "OR"] = "OR", + ) -> NodeList[Node]: ... + + @overload + def search( + self, + view: ViewId, + query: str | None = None, + *, + instance_type: Literal["edge"], + properties: list[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + include_typing: bool = False, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + operator: Literal["AND", "OR"] = "OR", + ) -> EdgeList[Edge]: ... + + @overload + def search( + self, + view: ViewId, + query: str | None = None, + *, + instance_type: type[T_Node], + properties: list[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + include_typing: bool = False, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + operator: Literal["AND", "OR"] = "OR", + ) -> NodeList[T_Node]: ... + + @overload + def search( + self, + view: ViewId, + query: str | None = None, + *, + instance_type: type[T_Edge], + properties: list[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + include_typing: bool = False, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + operator: Literal["AND", "OR"] = "OR", + ) -> EdgeList[T_Edge]: ... + + def search( + self, + view: ViewId, + query: str | None = None, + instance_type: Literal["node", "edge"] | type[T_Node] | type[T_Edge] = "node", + properties: list[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + include_typing: bool = False, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + operator: Literal["AND", "OR"] = "OR", + ) -> NodeList[T_Node] | EdgeList[T_Edge]: + """ + `Search instances `_ + + Args: + view (ViewId): View to search in. + query (str | None): Query string that will be parsed and used for search. + instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + properties (list[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space (str | SequenceNotStr[str] | None): Restrict instance search to the given space (or list of spaces). + filter (Filter | dict[str, Any] | None): Advanced filtering of instances. + include_typing (bool): Whether to include typing information. + limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number + of results (1000) if set to None, -1, or math.inf. + sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. + operator (Literal['AND', 'OR']): Controls how multiple search terms are combined when matching documents. OR (default): A document matches if it contains any of the query terms in the searchable fields. This typically returns more results but with lower precision. AND: A document matches only if it contains all of the query terms across the searchable fields. This typically returns fewer results but with higher relevance. + + Returns: + NodeList[T_Node] | EdgeList[T_Edge]: Search result with matching nodes or edges. + + Examples: + + Search for Arnold in the person view in the name property: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> client = CogniteClient() + >>> res = client.data_modeling.instances.search( + ... ViewId("mySpace", "PersonView", "v1"), + ... query="Arnold", + ... properties=["name"], + ... ) + + Search for Tarantino, Ritchie or Scorsese in the person view in the name property, but only born after 1942: + + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> from cognite.client.data_classes import filters + >>> born_after_1942 = filters.Range(["mySpace", "PersonView/v1", "birthYear"], gt=1942) + >>> res = client.data_modeling.instances.search( + ... ViewId("mySpace", "PersonView", "v1"), + ... query="Tarantino Ritchie Scorsese", + ... properties=["name"], + ... filter=born_after_1942, + ... operator="OR" + ... ) + """ + return run_sync( + self.__async_client.data_modeling.instances.search( + view=view, + query=query, + instance_type=instance_type, + properties=properties, + target_units=target_units, + space=space, + filter=filter, + include_typing=include_typing, + limit=limit, + sort=sort, + operator=operator, + ) + ) + + @overload + def aggregate( + self, + view: ViewId, + aggregates: MetricAggregation | dict, + group_by: None = None, + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: str | SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> AggregatedNumberedValue: ... + + @overload + def aggregate( + self, + view: ViewId, + aggregates: Sequence[MetricAggregation | dict], + group_by: None = None, + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: str | SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> list[AggregatedNumberedValue]: ... + + @overload + def aggregate( + self, + view: ViewId, + aggregates: MetricAggregation | dict | Sequence[MetricAggregation | dict], + group_by: str | SequenceNotStr[str], + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: str | SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> InstanceAggregationResultList: ... + + def aggregate( + self, + view: ViewId, + aggregates: MetricAggregation | dict | Sequence[MetricAggregation | dict], + group_by: str | SequenceNotStr[str] | None = None, + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: str | SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: + """ + `Aggregate data across nodes/edges `_ + + Args: + view (ViewId): View to aggregate over. + aggregates (MetricAggregation | dict | Sequence[MetricAggregation | dict]): The properties to aggregate over. + group_by (str | SequenceNotStr[str] | None): The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by. + instance_type (Literal['node', 'edge']): The type of instance. + query (str | None): Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s). + properties (str | SequenceNotStr[str] | None): Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default. + target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space (str | SequenceNotStr[str] | None): Restrict instance aggregate query to the given space (or list of spaces). + filter (Filter | dict[str, Any] | None): Advanced filtering of instances. + limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number + of results (1000) if set to None, -1, or math.inf. + + Returns: + AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: Node or edge aggregation results. + + Examples: + + Get the average run time in minutes for pumps grouped by release year: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import ViewId, aggregations as aggs + >>> client = CogniteClient() + >>> avg_run_time = aggs.Avg("runTimeMinutes") + >>> view_id = ViewId("mySpace", "PumpView", "v1") + >>> res = client.data_modeling.instances.aggregate(view_id, avg_run_time, group_by="releaseYear") + """ + return run_sync( + self.__async_client.data_modeling.instances.aggregate( + view=view, + aggregates=aggregates, + group_by=group_by, + instance_type=instance_type, + query=query, + properties=properties, + target_units=target_units, + space=space, + filter=filter, + limit=limit, + ) + ) + + @overload + def histogram( + self, + view: ViewId, + histograms: Histogram, + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> HistogramValue: ... + + @overload + def histogram( + self, + view: ViewId, + histograms: Sequence[Histogram], + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> list[HistogramValue]: ... + + def histogram( + self, + view: ViewId, + histograms: Histogram | Sequence[Histogram], + instance_type: Literal["node", "edge"] = "node", + query: str | None = None, + properties: SequenceNotStr[str] | None = None, + target_units: list[TargetUnit] | None = None, + space: str | SequenceNotStr[str] | None = None, + filter: Filter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> HistogramValue | list[HistogramValue]: + """ + `Produces histograms for nodes/edges `_ + + Args: + view (ViewId): View to to aggregate over. + histograms (Histogram | Sequence[Histogram]): The properties to aggregate over. + instance_type (Literal['node', 'edge']): Whether to search for nodes or edges. + query (str | None): Query string that will be parsed and used for search. + properties (SequenceNotStr[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space (str | SequenceNotStr[str] | None): Restrict histogram query to instances in the given space (or list of spaces). + filter (Filter | dict[str, Any] | None): Advanced filtering of instances. + limit (int): Maximum number of instances to return. Defaults to 25. + + Returns: + HistogramValue | list[HistogramValue]: Node or edge aggregation results. + + Examples: + + Find the number of people born per decade: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import aggregations as aggs, ViewId + >>> client = CogniteClient() + >>> birth_by_decade = aggs.Histogram("birthYear", interval=10.0) + >>> view_id = ViewId("mySpace", "PersonView", "v1") + >>> res = client.data_modeling.instances.histogram(view_id, birth_by_decade) + """ + return run_sync( + self.__async_client.data_modeling.instances.histogram( + view=view, + histograms=histograms, + instance_type=instance_type, + query=query, + properties=properties, + target_units=target_units, + space=space, + filter=filter, + limit=limit, + ) + ) + + def query(self, query: Query, include_typing: bool = False, debug: DebugParameters | None = None) -> QueryResult: + """ + `Advanced query interface for nodes/edges. `_ + + The Data Modelling API exposes an advanced query interface. The query interface supports parameterization, + recursive edge traversal, chaining of result sets, and granular property selection. + + Args: + query (Query): Query. + include_typing (bool): Should we return property type information as part of the result? + debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + + Returns: + QueryResult: The resulting nodes and/or edges from the query. + + Examples: + + Find work orders created before 2023 sorted by title: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling.query import Query, Select, NodeResultSetExpression, EdgeResultSetExpression, SourceSelector + >>> from cognite.client.data_classes.filters import Range, Equals + >>> from cognite.client.data_classes.data_modeling.ids import ViewId + >>> client = CogniteClient() + >>> work_order_id = ViewId("mySpace", "WorkOrderView", "v1") + >>> pump_id = ViewId("mySpace", "PumpView", "v1") + >>> query = Query( + ... with_ = { + ... "work_orders": NodeResultSetExpression(filter=Range(work_order_id.as_property_ref("createdYear"), lt=2023)), + ... "work_orders_to_pumps": EdgeResultSetExpression(from_="work_orders", filter=Equals(["edge", "type"], {"space": work_order_id.space, "externalId": "WorkOrder.asset"})), + ... "pumps": NodeResultSetExpression(from_="work_orders_to_pumps"), + ... }, + ... select = { + ... "pumps": Select( + ... [SourceSelector(pump_id, ["name"])], sort=[InstanceSort(pump_id.as_property_ref("name"))]), + ... }, + ... ) + >>> res = client.data_modeling.instances.query(query) + + To convert units, specify what your target units are in the SourceSelector. You can either use + a UnitReference or a UnitSystemReference. Note that in order for a property to be converted, they + need to have a unit defined in the underlying container. + + >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference, UnitSystemReference + >>> selected_source = SourceSelector( + ... source=ViewId("my-space", "my-xid", "v1"), + ... properties=["f32_prop1", "f32_prop2", "f64_prop1", "f64_prop2"], + ... target_units=[ + ... TargetUnit("f32_prop1", UnitReference("pressure:kilopa")), + ... TargetUnit("f32_prop2", UnitReference("pressure:barg")), + ... TargetUnit("f64_prop1", UnitSystemReference("SI")), + ... TargetUnit("f64_prop2", UnitSystemReference("Imperial")), + ... ], + ... ) + + To select all properties, use '[*]' in your SourceSelector: + + >>> SourceSelector(source=ViewId("my-space", "my-xid", "v1"), properties=["*"]) + + To debug and/or profile your query, you can use the debug parameter: + + >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters + >>> debug_params = DebugParameters( + ... emit_results=False, + ... include_plan=True, # Include the postgres execution plan + ... include_translated_query=True, # Include the internal representation of the query. + ... profile=True, + ... ) + >>> res = client.data_modeling.instances.query(query, debug=debug_params) + >>> print(res.debug) + """ + return run_sync( + self.__async_client.data_modeling.instances.query(query=query, include_typing=include_typing, debug=debug) + ) + + def sync(self, query: Query, include_typing: bool = False, debug: DebugParameters | None = None) -> QueryResult: + """ + `Subscription to changes for nodes/edges. `_ + + Subscribe to changes for nodes and edges in a project, matching a supplied filter. + + Args: + query (Query): Query. + include_typing (bool): Should we return property type information as part of the result? + debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + + Returns: + QueryResult: The resulting nodes and/or edges from the query. + + Examples: + + Find work orders created before 2023 sorted by title: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling.instances import InstanceSort + >>> from cognite.client.data_classes.data_modeling.query import Query, Select, NodeResultSetExpression, EdgeResultSetExpression, SourceSelector + >>> from cognite.client.data_classes.filters import Range, Equals + >>> from cognite.client.data_classes.data_modeling.ids import ViewId + >>> client = CogniteClient() + >>> work_order_id = ViewId("mySpace", "WorkOrderView", "v1") + >>> pump_id = ViewId("mySpace", "PumpView", "v1") + >>> query = Query( + ... with_ = { + ... "work_orders": NodeResultSetExpression(filter=Range(work_order_id.as_property_ref("createdYear"), lt=2023)), + ... "work_orders_to_pumps": EdgeResultSetExpression(from_="work_orders", filter=Equals(["edge", "type"], {"space": work_order_id.space, "externalId": "WorkOrder.asset"})), + ... "pumps": NodeResultSetExpression(from_="work_orders_to_pumps"), + ... }, + ... select = { + ... "pumps": Select( + ... [SourceSelector(pump_id, ["name"])], sort=[InstanceSort(pump_id.as_property_ref("name"))]), + ... }, + ... ) + >>> res = client.data_modeling.instances.sync(query) + >>> # Added a new work order with pumps created before 2023 + >>> query.cursors = res.cursors + >>> res_new = client.data_modeling.instances.sync(query) + + In the last example, the res_new will only contain the pumps that have been added with the new work order. + + To debug and/or profile your query, you can use the debug parameter: + + >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters + >>> debug_params = DebugParameters( + ... emit_results=False, + ... include_plan=True, # Include the postgres execution plan + ... include_translated_query=True, # Include the internal representation of the query. + ... profile=True, + ... ) + >>> res = client.data_modeling.instances.sync(query, debug=debug_params) + >>> print(res.debug) + """ + return run_sync( + self.__async_client.data_modeling.instances.sync(query=query, include_typing=include_typing, debug=debug) + ) + + @overload + def list( + self, + instance_type: Literal["node"] = "node", + include_typing: bool = False, + sources: Source | Sequence[Source] | None = None, + space: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> NodeList[Node]: ... + + @overload + def list( + self, + instance_type: Literal["edge"], + include_typing: bool = False, + sources: Source | Sequence[Source] | None = None, + space: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> EdgeList[Edge]: ... + + @overload + def list( + self, + instance_type: type[T_Node], + *, + space: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> NodeList[T_Node]: ... + + @overload + def list( + self, + instance_type: type[T_Edge], + *, + space: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> EdgeList[T_Edge]: ... + + def list( + self, + instance_type: Literal["node", "edge"] | type[T_Node] | type[T_Edge] = "node", + include_typing: bool = False, + sources: Source | Sequence[Source] | None = None, + space: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None, + filter: Filter | dict[str, Any] | None = None, + debug: DebugParameters | None = None, + ) -> NodeList[T_Node] | EdgeList[T_Edge]: + """ + `List instances `_ + + Args: + instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + include_typing (bool): Whether to return property type information as part of the result. + sources (Source | Sequence[Source] | None): Views to retrieve properties from. + space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). + limit (int | None): Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. + filter (Filter | dict[str, Any] | None): Advanced filtering of instances. + debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + + Returns: + NodeList[T_Node] | EdgeList[T_Edge]: List of requested instances + + Examples: + + List instances and limit to 5: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> instance_list = client.data_modeling.instances.list(limit=5) + + List some instances in the space 'my-space': + + >>> instance_list = client.data_modeling.instances.list(space="my-space") + + List instances and sort by some property: + + >>> from cognite.client.data_classes.data_modeling import InstanceSort + >>> property_sort = InstanceSort( + ... property=('space', 'view_xid/view_version', 'some_property'), + ... direction="descending", + ... nulls_first=True) + >>> instance_list = client.data_modeling.instances.list(sort=property_sort) + + Iterate over instances (nodes by default), one-by-one: + + >>> for node in client.data_modeling.instances(): + ... node + >>> for edge in client.data_modeling.instances(instance_type="edge"): + ... edge + + Iterate over chunks of instances to reduce memory load: + + >>> for instance_list in client.data_modeling.instances(chunk_size=100): + ... instance_list # do something with the instances + + List instances with a view as source: + + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> my_view = ViewId("mySpace", "myView", "v1") + >>> instance_list = client.data_modeling.instances.list(sources=my_view) + + Convert instances to pandas DataFrame with expanded properties (``expand_properties=True``). + This will add the properties directly as dataframe columns. Specifying ``camel_case=True`` + will convert the basic columns to camel case (e.g. externalId), but leave the property names as-is. + + >>> df = instance_list.to_pandas( + ... expand_properties=True, + ... camel_case=True, + ... ) + + To debug and/or profile your query, you can use the debug parameter: + + >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters + >>> debug_params = DebugParameters( + ... emit_results=False, + ... include_plan=True, # Include the postgres execution plan + ... include_translated_query=True, # Include the internal representation of the query. + ... profile=True, + ... ) + >>> res = client.data_modeling.instances.list( + ... debug=debug_params, sources=my_view + ... ) + >>> print(res.debug) + """ + return run_sync( + self.__async_client.data_modeling.instances.list( + instance_type=instance_type, + include_typing=include_typing, + sources=sources, + space=space, + limit=limit, + sort=sort, + filter=filter, + debug=debug, + ) + ) diff --git a/cognite/client/_sync_api/data_modeling/space_statistics.py b/cognite/client/_sync_api/data_modeling/space_statistics.py new file mode 100644 index 0000000000..569ad9a4b6 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/space_statistics.py @@ -0,0 +1,80 @@ +""" +=============================================================================== +773bd110da46d6c4ebcf2a0513906391 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.statistics import SpaceStatistics, SpaceStatisticsList +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client._cognite_client import AsyncCogniteClient + + +class SyncSpaceStatisticsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def retrieve(self, space: str) -> SpaceStatistics | None: ... + + @overload + def retrieve(self, space: SequenceNotStr[str]) -> SpaceStatisticsList: ... + + def retrieve(self, space: str | SequenceNotStr[str]) -> SpaceStatistics | SpaceStatisticsList | None: + """ + `Retrieve usage data and limits per space `_ + + Args: + space (str | SequenceNotStr[str]): The space or spaces to retrieve statistics for. + + Returns: + SpaceStatistics | SpaceStatisticsList | None: The requested statistics and limits for the specified space(s). + + Examples: + + Fetch statistics for a single space: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> result = client.data_modeling.statistics.spaces.retrieve("my-space") + + Fetch statistics for multiple spaces: + >>> res = client.data_modeling.statistics.spaces.retrieve( + ... ["my-space1", "my-space2"] + ... ) + """ + return run_sync(self.__async_client.data_modeling.statistics.spaces.retrieve(space=space)) + + def list(self) -> SpaceStatisticsList: + """ + `Retrieve usage for all spaces `_ + + Returns statistics for data modeling resources grouped by each space in the project. + + Returns: + SpaceStatisticsList: The requested statistics and limits for all spaces in the project. + + Examples: + + Fetch statistics for all spaces in the project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> stats = client.data_modeling.statistics.spaces.list() + >>> for space_stats in stats: + ... print(f"Space: {space_stats.space}, Nodes: {space_stats.nodes}") + """ + return run_sync(self.__async_client.data_modeling.statistics.spaces.list()) diff --git a/cognite/client/_sync_api/data_modeling/spaces.py b/cognite/client/_sync_api/data_modeling/spaces.py new file mode 100644 index 0000000000..9f6c03e13b --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/spaces.py @@ -0,0 +1,158 @@ +""" +=============================================================================== +ad14ab3f96c33bdeba7358c9567da303 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.spaces import Space, SpaceApply, SpaceList +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSpacesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Space]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[SpaceList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Space | SpaceList]: + """ + Iterate over spaces + + Fetches spaces as they are iterated over, so you keep a limited number of spaces in memory. + + Args: + chunk_size (int | None): Number of spaces to return in each chunk. Defaults to yielding one space a time. + limit (int | None): Maximum number of spaces to return. Defaults to returning all items. + + Yields: + Space | SpaceList: yields Space one by one if chunk_size is not specified, else SpaceList objects. + """ + yield from SyncIterator(self.__async_client.data_modeling.spaces(chunk_size=chunk_size, limit=limit)) + + @overload + def retrieve(self, spaces: str) -> Space | None: ... + + @overload + def retrieve(self, spaces: SequenceNotStr[str]) -> SpaceList: ... + + def retrieve(self, spaces: str | SequenceNotStr[str]) -> Space | SpaceList | None: + """ + `Retrieve one or more spaces. `_ + + Args: + spaces (str | SequenceNotStr[str]): Space ID + + Returns: + Space | SpaceList | None: Requested space or None if it does not exist. + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.spaces.retrieve(spaces='mySpace') + + Get multiple spaces by id: + + >>> res = client.data_modeling.spaces.retrieve(spaces=["MySpace", "MyAwesomeSpace", "MyOtherSpace"]) + """ + return run_sync(self.__async_client.data_modeling.spaces.retrieve(spaces=spaces)) + + def delete(self, spaces: str | SequenceNotStr[str]) -> list[str]: + """ + `Delete one or more spaces `_ + + Args: + spaces (str | SequenceNotStr[str]): ID or ID list ids of spaces. + Returns: + list[str]: The space(s) which has been deleted. + Examples: + + Delete spaces by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.spaces.delete(spaces=["mySpace", "myOtherSpace"]) + """ + return run_sync(self.__async_client.data_modeling.spaces.delete(spaces=spaces)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ, include_global: bool = False) -> SpaceList: + """ + `List spaces `_ + + Args: + limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global (bool): Whether to include global spaces. Defaults to False. + + Returns: + SpaceList: List of requested spaces + + Examples: + + List spaces and filter on max start time: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> space_list = client.data_modeling.spaces.list(limit=5) + + Iterate over spaces, one-by-one: + + >>> for space in client.data_modeling.spaces(): + ... space # do something with the space + + Iterate over chunks of spaces to reduce memory load: + + >>> for space_list in client.data_modeling.spaces(chunk_size=2500): + ... space_list # do something with the spaces + """ + return run_sync(self.__async_client.data_modeling.spaces.list(limit=limit, include_global=include_global)) + + @overload + def apply(self, spaces: Sequence[SpaceApply]) -> SpaceList: ... + + @overload + def apply(self, spaces: SpaceApply) -> Space: ... + + def apply(self, spaces: SpaceApply | Sequence[SpaceApply]) -> Space | SpaceList: + """ + `Create or patch one or more spaces. `_ + + Args: + spaces (SpaceApply | Sequence[SpaceApply]): Space | Sequence[Space]): Space or spaces of spacesda to create or update. + + Returns: + Space | SpaceList: Created space(s) + + Examples: + + Create new spaces: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import SpaceApply + >>> client = CogniteClient() + >>> spaces = [SpaceApply(space="mySpace", description="My first space", name="My Space"), + ... SpaceApply(space="myOtherSpace", description="My second space", name="My Other Space")] + >>> res = client.data_modeling.spaces.apply(spaces) + """ + return run_sync(self.__async_client.data_modeling.spaces.apply(spaces=spaces)) diff --git a/cognite/client/_sync_api/data_modeling/statistics.py b/cognite/client/_sync_api/data_modeling/statistics.py new file mode 100644 index 0000000000..e385548578 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/statistics.py @@ -0,0 +1,50 @@ +""" +=============================================================================== +a4c510989006c674b4ae203ce2c6918d +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.data_modeling.space_statistics import SyncSpaceStatisticsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.statistics import ProjectStatistics +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client._cognite_client import AsyncCogniteClient + + +class SyncStatisticsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.spaces = SyncSpaceStatisticsAPI(async_client) + + def project(self) -> ProjectStatistics: + """ + `Retrieve project-wide usage data and limits `_ + + Returns the usage data and limits for a project's data modelling usage, including data model schemas and graph instances + + Returns: + ProjectStatistics: The requested statistics and limits + + Examples: + + Fetch project statistics (and limits) and check the current number of data models vs. + and how many more can be created: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> stats = client.data_modeling.statistics.project() + >>> data_model_count = stats.data_models.count + >>> available_count = stats.data_models.limit - data_model_count + """ + return run_sync(self.__async_client.data_modeling.statistics.project()) diff --git a/cognite/client/_sync_api/data_modeling/views.py b/cognite/client/_sync_api/data_modeling/views.py new file mode 100644 index 0000000000..a70550d800 --- /dev/null +++ b/cognite/client/_sync_api/data_modeling/views.py @@ -0,0 +1,263 @@ +""" +=============================================================================== +286155d9ffa73428bab0192dd2914c82 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.data_modeling.ids import ViewId, ViewIdentifier +from cognite.client.data_classes.data_modeling.views import View, ViewApply, ViewList +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncViewsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[View]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[ViewList]: ... + + def __call__( + self, + chunk_size: int | None = None, + limit: int | None = None, + space: str | None = None, + include_inherited_properties: bool = True, + all_versions: bool = False, + include_global: bool = False, + ) -> Iterator[View | ViewList]: + """ + Iterate over views + + Fetches views as they are iterated over, so you keep a limited number of views in memory. + + Args: + chunk_size (int | None): Number of views to return in each chunk. Defaults to yielding one view at a time. + limit (int | None): Maximum number of views to return. Defaults to returning all items. + space (str | None): (str | None): The space to query. + include_inherited_properties (bool): Whether to include properties inherited from views this view implements. + all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global (bool): Whether to include global views. + + Yields: + View | ViewList: yields View one by one if chunk_size is not specified, else ViewList objects. + """ + yield from SyncIterator( + self.__async_client.data_modeling.views( + chunk_size=chunk_size, + limit=limit, + space=space, + include_inherited_properties=include_inherited_properties, + all_versions=all_versions, + include_global=include_global, + ) + ) + + def retrieve( + self, + ids: ViewIdentifier | Sequence[ViewIdentifier], + include_inherited_properties: bool = True, + all_versions: bool = True, + ) -> ViewList: + """ + `Retrieve a single view by id. `_ + + Args: + ids (ViewIdentifier | Sequence[ViewIdentifier]): The view identifier(s). This can be given as a tuple of + strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), + or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions + will be returned. + include_inherited_properties (bool): Whether to include properties inherited from views this view implements. + all_versions (bool): Whether to return all versions. If false, only the newest version is returned, + + Returns: + ViewList: Requested view or None if it does not exist. + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_modeling.views.retrieve(('mySpace', 'myView', 'v1')) + """ + return run_sync( + self.__async_client.data_modeling.views.retrieve( + ids=ids, include_inherited_properties=include_inherited_properties, all_versions=all_versions + ) + ) + + def delete(self, ids: ViewIdentifier | Sequence[ViewIdentifier]) -> list[ViewId]: + """ + `Delete one or more views `_ + + Args: + ids (ViewIdentifier | Sequence[ViewIdentifier]): View identifier(s) + Returns: + list[ViewId]: The identifier for the view(s) which has been deleted. Empty list if nothing was deleted. + Examples: + + Delete views by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.data_modeling.views.delete(('mySpace', 'myView', 'v1')) + """ + return run_sync(self.__async_client.data_modeling.views.delete(ids=ids)) + + def list( + self, + limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ, + space: str | None = None, + include_inherited_properties: bool = True, + all_versions: bool = False, + include_global: bool = False, + ) -> ViewList: + """ + `List views `_ + + Args: + limit (int | None): Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space (str | None): (str | None): The space to query. + include_inherited_properties (bool): Whether to include properties inherited from views this view implements. + all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global (bool): Whether to include global views. + + Returns: + ViewList: List of requested views + + Examples: + + List 5 views: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> view_list = client.data_modeling.views.list(limit=5) + + Iterate over views, one-by-one: + + >>> for view in client.data_modeling.views(): + ... view # do something with the view + + Iterate over chunks of views to reduce memory load: + + >>> for view_list in client.data_modeling.views(chunk_size=10): + ... view_list # do something with the views + """ + return run_sync( + self.__async_client.data_modeling.views.list( + limit=limit, + space=space, + include_inherited_properties=include_inherited_properties, + all_versions=all_versions, + include_global=include_global, + ) + ) + + @overload + def apply(self, view: Sequence[ViewApply]) -> ViewList: ... + + @overload + def apply(self, view: ViewApply) -> View: ... + + def apply(self, view: ViewApply | Sequence[ViewApply]) -> View | ViewList: + """ + `Create or update (upsert) one or more views. `_ + + Args: + view (ViewApply | Sequence[ViewApply]): View(s) to create or update. + + Returns: + View | ViewList: Created view(s) + + Examples: + + Create new views: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import ViewApply, MappedPropertyApply, ContainerId + >>> client = CogniteClient() + >>> views = [ + ... ViewApply( + ... space="mySpace", + ... external_id="myView", + ... version="v1", + ... properties={ + ... "someAlias": MappedPropertyApply( + ... container=ContainerId("mySpace", "myContainer"), + ... container_property_identifier="someProperty", + ... ), + ... } + ... ) + ... ] + >>> res = client.data_modeling.views.apply(views) + + + Create views with edge relations: + + >>> from cognite.client.data_classes.data_modeling import ( + ... ContainerId, + ... DirectRelationReference, + ... MappedPropertyApply, + ... MultiEdgeConnectionApply, + ... ViewApply, + ... ViewId + ... ) + >>> work_order_for_asset = DirectRelationReference(space="mySpace", external_id="work_order_for_asset") + >>> work_order_view = ViewApply( + ... space="mySpace", + ... external_id="WorkOrder", + ... version="v1", + ... name="WorkOrder", + ... properties={ + ... "title": MappedPropertyApply( + ... container=ContainerId(space="mySpace", external_id="WorkOrder"), + ... container_property_identifier="title", + ... ), + ... "asset": MultiEdgeConnectionApply( + ... type=work_order_for_asset, + ... direction="outwards", + ... source=ViewId("mySpace", "Asset", "v1"), + ... name="asset", + ... ), + ... } + ... ) + >>> asset_view = ViewApply( + ... space="mySpace", + ... external_id="Asset", + ... version="v1", + ... name="Asset", + ... properties={ + ... "name": MappedPropertyApply( + ... container=ContainerId("mySpace", "Asset"), + ... name="name", + ... container_property_identifier="name", + ... ), + ... "work_orders": MultiEdgeConnectionApply( + ... type=work_order_for_asset, + ... direction="inwards", + ... source=ViewId("mySpace", "WorkOrder", "v1"), + ... name="work_orders", + ... ), + ... } + ... ) + >>> res = client.data_modeling.views.apply([work_order_view, asset_view]) + """ + return run_sync(self.__async_client.data_modeling.views.apply(view=view)) diff --git a/cognite/client/_sync_api/data_sets.py b/cognite/client/_sync_api/data_sets.py new file mode 100644 index 0000000000..202e866130 --- /dev/null +++ b/cognite/client/_sync_api/data_sets.py @@ -0,0 +1,289 @@ +""" +=============================================================================== +98e4f0c8b9c49ed283cc7f11792b2999 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import DataSet, DataSetFilter, DataSetList, DataSetUpdate, DataSetWrite, TimestampRange +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDataSetsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[DataSet]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[DataSetList]: ... + + def __call__( + self, + chunk_size: int | None = None, + metadata: dict[str, str] | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + write_protected: bool | None = None, + limit: int | None = None, + ) -> Iterator[DataSet | DataSetList]: + """ + Iterate over data sets + + Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory. + + Args: + chunk_size (int | None): Number of data sets to return in each chunk. Defaults to yielding one data set a time. + metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit (int | None): Maximum number of data sets to return. Defaults to return all items. + + Yields: + DataSet | DataSetList: yields DataSet one by one if chunk is not specified, else DataSetList objects. + """ + yield from SyncIterator( + self.__async_client.data_sets( + chunk_size=chunk_size, + metadata=metadata, + created_time=created_time, + last_updated_time=last_updated_time, + external_id_prefix=external_id_prefix, + write_protected=write_protected, + limit=limit, + ) + ) + + @overload + def create(self, data_set: Sequence[DataSet] | Sequence[DataSetWrite]) -> DataSetList: ... + + @overload + def create(self, data_set: DataSet | DataSetWrite) -> DataSet: ... + + def create( + self, data_set: DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite] + ) -> DataSet | DataSetList: + """ + `Create one or more data sets. `_ + + Args: + data_set (DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]): Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create. + + Returns: + DataSet | DataSetList: Created data set(s) + + Examples: + + Create new data sets: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import DataSetWrite + >>> client = CogniteClient() + >>> data_sets = [DataSetWrite(name="1st level"), DataSetWrite(name="2nd level")] + >>> res = client.data_sets.create(data_sets) + """ + return run_sync(self.__async_client.data_sets.create(data_set=data_set)) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> DataSet | None: + """ + `Retrieve a single data set by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + DataSet | None: Requested data set or None if it does not exist. + + Examples: + + Get data set by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_sets.retrieve(id=1) + + Get data set by external id: + + >>> res = client.data_sets.retrieve(external_id="1") + """ + return run_sync(self.__async_client.data_sets.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> DataSetList: + """ + `Retrieve multiple data sets by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + DataSetList: The requested data sets. + + Examples: + + Get data sets by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.data_sets.retrieve_multiple(ids=[1, 2, 3]) + + Get data sets by external id: + + >>> res = client.data_sets.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.data_sets.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_count(self, filter: DataSetFilter | dict[str, Any] | None = None) -> int: + """ + `Aggregate data sets `_ + + Args: + filter (DataSetFilter | dict[str, Any] | None): Filter on data set filter with exact match + + Returns: + int: Count of data sets matching the filter. + + Examples: + + Get the number of write-protected data sets: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> aggregate_protected = client.data_sets.aggregate_count( + ... filter={"write_protected": True} + ... ) + """ + return run_sync(self.__async_client.data_sets.aggregate_count(filter=filter)) + + @overload + def update( + self, + item: DataSet | DataSetWrite | DataSetUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> DataSet: ... + + @overload + def update( + self, + item: Sequence[DataSet | DataSetWrite | DataSetUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> DataSetList: ... + + def update( + self, + item: DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> DataSet | DataSetList: + """ + `Update one or more data sets `_ + + Args: + item (DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate]): Data set(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + DataSet | DataSetList: Updated data set(s) + + Examples: + + Update a data set that you have fetched. This will perform a full update of the data set: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> data_set = client.data_sets.retrieve(id=1) + >>> data_set.description = "New description" + >>> res = client.data_sets.update(data_set) + + Perform a partial update on a data set, updating the description and removing a field from metadata: + + >>> from cognite.client.data_classes import DataSetUpdate + >>> my_update = DataSetUpdate(id=1).description.set("New description").metadata.remove(["key"]) + >>> res = client.data_sets.update(my_update) + """ + return run_sync(self.__async_client.data_sets.update(item=item, mode=mode)) + + def list( + self, + metadata: dict[str, str] | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + write_protected: bool | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> DataSetList: + """ + `List data sets `_ + + Args: + metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit (int | None): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + DataSetList: List of requested data sets + + Examples: + + List data sets and filter on write_protected: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> data_sets_list = client.data_sets.list(limit=5, write_protected=False) + + Iterate over data sets, one-by-one: + + >>> for data_set in client.data_sets(): + ... data_set # do something with the data set + + Iterate over chunks of data sets to reduce memory load: + + >>> for data_set_list in client.data_sets(chunk_size=2500): + ... data_set_list # do something with the list + """ + return run_sync( + self.__async_client.data_sets.list( + metadata=metadata, + created_time=created_time, + last_updated_time=last_updated_time, + external_id_prefix=external_id_prefix, + write_protected=write_protected, + limit=limit, + ) + ) diff --git a/cognite/client/_sync_api/datapoints.py b/cognite/client/_sync_api/datapoints.py new file mode 100644 index 0000000000..512d29ebcf --- /dev/null +++ b/cognite/client/_sync_api/datapoints.py @@ -0,0 +1,1559 @@ +""" +=============================================================================== +bb21beeeb7ec0d88e0a4320fead5b347 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +import datetime +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Any, Literal, overload +from zoneinfo import ZoneInfo + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_DATAPOINTS_CHUNK_SIZE +from cognite.client._sync_api.synthetic_time_series import SyncSyntheticDatapointsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + Datapoints, + DatapointsArray, + DatapointsArrayList, + DatapointsList, + DatapointsQuery, + LatestDatapointQuery, +) +from cognite.client.data_classes.data_modeling.ids import NodeId +from cognite.client.data_classes.datapoint_aggregates import Aggregate +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + import pandas as pd + + +class SyncDatapointsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.synthetic = SyncSyntheticDatapointsAPI(async_client) + + @overload + def __call__( + self, + queries: DatapointsQuery, + *, + return_arrays: Literal[True] = True, + chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE, + chunk_size_time_series: int | None = None, + ) -> Iterator[DatapointsArray]: ... + + @overload + def __call__( + self, + queries: Sequence[DatapointsQuery], + *, + return_arrays: Literal[True] = True, + chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE, + chunk_size_time_series: int | None = None, + ) -> Iterator[DatapointsArrayList]: ... + + @overload + def __call__( + self, + queries: DatapointsQuery, + *, + return_arrays: Literal[False], + chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE, + chunk_size_time_series: int | None = None, + ) -> Iterator[Datapoints]: ... + + @overload + def __call__( + self, + queries: Sequence[DatapointsQuery], + *, + return_arrays: Literal[False], + chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE, + chunk_size_time_series: int | None = None, + ) -> Iterator[DatapointsList]: ... + + def __call__( + self, + queries: DatapointsQuery | Sequence[DatapointsQuery], + *, + chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE, + chunk_size_time_series: int | None = None, + return_arrays: bool = True, + ) -> Iterator[DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList]: + """ + `Iterate through datapoints in chunks, for one or more time series. `_ + + Note: + Control memory usage by specifying ``chunk_size_time_series``, how many time series to iterate simultaneously and ``chunk_size_datapoints``, + how many datapoints to yield per iteration (per individual time series). See full example in examples. Note that in order to make efficient + use of the API request limits, this method will never hold less than 100k datapoints in memory at a time, per time series. + + If you run with memory constraints, use ``return_arrays=True`` (the default). + + No empty chunk is ever returned. + + Args: + queries (DatapointsQuery | Sequence[DatapointsQuery]): Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating. + chunk_size_datapoints (int): The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000. + chunk_size_time_series (int | None): The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time). + return_arrays (bool): Whether to return the datapoints as numpy arrays. Default: True. + + Yields: + DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList: If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for. + + Examples: + + Iterate through the datapoints of a single time series with external_id="foo", in chunks of 25k: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import DatapointsQuery + >>> client = CogniteClient() + >>> query = DatapointsQuery(external_id="foo", start="2w-ago") + >>> for chunk in client.time_series.data(query, chunk_size_datapoints=25_000): + ... pass # do something with the datapoints chunk + + Iterate through datapoints from multiple time series, and do not return them as memory-efficient numpy arrays. + As one or more time series get exhausted (no more data), they are no longer part of the returned "chunk list". + Note that the order is still preserved (for the remaining). + + If you run with ``chunk_size_time_series=None``, an easy way to check when a time series is exhausted is to + use the ``.get`` method, as illustrated below: + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> queries = [ + ... DatapointsQuery(id=123), + ... DatapointsQuery(external_id="foo"), + ... DatapointsQuery(instance_id=NodeId("my-space", "my-ts-xid")) + ... ] + >>> for chunk_lst in client.time_series.data(query, return_arrays=False): + ... if chunk_lst.get(id=123) is None: + ... print("Time series with id=123 has no more datapoints!") + + A likely use case for iterating datapoints is to clone data from one project to another, while keeping a low memory + footprint and without having to write very custom logic involving count aggregates (which won't work for string data) + or do time-domain splitting yourself. + + Here's an example of how to do so efficiently, while including bad- and uncertain data (``ignore_bad_datapoints=False``) and + copying status codes (``include_status=True``). This is automatically taken care of when the Datapoints(-Array) objects are passed + directly to an insert method. The only assumption below is that the time series have already been created in the target project. + + >>> from cognite.client.utils import MIN_TIMESTAMP_MS, MAX_TIMESTAMP_MS + >>> target_client = AsyncCogniteClient() + >>> ts_to_copy = client.time_series.list(data_set_external_ids="my-use-case") + >>> queries = [ + ... DatapointsQuery( + ... external_id=ts.external_id, + ... include_status=True, + ... ignore_bad_datapoints=False, + ... start=MIN_TIMESTAMP_MS, + ... end=MAX_TIMESTAMP_MS + 1, # end is exclusive + ... ) + ... for ts in ts_to_copy + ... ] + >>> for dps_chunk in client.time_series.data( + ... queries, # may be several thousand time series... + ... chunk_size_time_series=20, # control memory usage by specifying how many to iterate at a time + ... chunk_size_datapoints=100_000, + ... ): + ... target_client.time_series.data.insert_multiple( + ... [{"external_id": dps.external_id, "datapoints": dps} for dps in dps_chunk] + ... ) + """ + yield from SyncIterator( + self.__async_client.time_series.data( + queries=queries, + chunk_size_datapoints=chunk_size_datapoints, + chunk_size_time_series=chunk_size_time_series, + return_arrays=return_arrays, + ) + ) + + @overload + def retrieve( + self, + *, + id: int | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> Datapoints | None: ... + + @overload + def retrieve( + self, + *, + id: Sequence[int | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + external_id: str | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> Datapoints | None: ... + + @overload + def retrieve( + self, + *, + external_id: SequenceNotStr[str | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + instance_id: NodeId | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> Datapoints | None: ... + + @overload + def retrieve( + self, + *, + instance_id: Sequence[NodeId | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery], + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery], + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery], + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + @overload + def retrieve( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery], + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery], + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsList: ... + + def retrieve( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None, + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None, + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> Datapoints | DatapointsList | None: + """ + `Retrieve datapoints for one or more time series. `_ + + **Performance guide**: + In order to retrieve millions of datapoints as efficiently as possible, here are a few guidelines: + + 1. Make *one* call to retrieve and fetch all time series in go, rather than making multiple calls (if your memory allows it). The SDK will optimize retrieval strategy for you! + 2. For best speed, and significantly lower memory usage, consider using ``retrieve_arrays(...)`` which uses ``numpy.ndarrays`` for data storage. + 3. Unlimited queries (``limit=None``) are most performant as they are always fetched in parallel, for any number of requested time series, even one. + 4. Limited queries, (e.g. ``limit=500_000``) are much less performant, at least for large limits, as each individual time series is fetched serially (we can't predict where on the timeline the datapoints are). Thus parallelisation is only used when asking for multiple "limited" time series. + 5. Try to avoid specifying `start` and `end` to be very far from the actual data: If you have data from 2000 to 2015, don't use start=0 (1970). + 6. Using ``timezone`` and/or calendar granularities like month/quarter/year in aggregate queries comes at a penalty as they are expensive for the API to compute. + + Warning: + When using the AsyncCogniteClient, always ``await`` the result of this method and never run multiple calls concurrently (e.g. using asyncio.gather). + You can pass as many queries as you like to a single call, and the SDK will optimize the retrieval strategy for you intelligently. + + Tip: + To read datapoints efficiently, while keeping a low memory footprint e.g. to copy from one project to another, check out :py:meth:`~DatapointsAPI.__call__`. + It allows you to iterate through datapoints in chunks, and also control how many time series to iterate at the same time. + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Args: + id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. + start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. + end (int | str | datetime.datetime | None): Exclusive end. Default: "now" + aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. + limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False + include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + + Returns: + Datapoints | DatapointsList | None: A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + + Examples: + + You can specify the identifiers of the datapoints you wish to retrieve in a number of ways. In this example + we are using the time-ago format, ``"2w-ago"`` to get raw data for the time series with id=42 from 2 weeks ago up until now. + You can also use the time-ahead format, like ``"3d-ahead"``, to specify a relative time in the future. + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> dps = client.time_series.data.retrieve(id=42, start="2w-ago") + >>> # You can also use instance_id: + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> dps = client.time_series.data.retrieve(instance_id=NodeId("ts-space", "foo")) + + Although raw datapoints are returned by default, you can also get aggregated values, such as `max` or `average`. You may also fetch more than one time series simultaneously. Here we are + getting daily averages and maximum values for all of 2018, for two different time series, where we're specifying `start` and `end` as integers + (milliseconds after epoch). In the below example, we fetch them using their external ids: + + >>> dps_lst = client.time_series.data.retrieve( + ... external_id=["foo", "bar"], + ... start=1514764800000, + ... end=1546300800000, + ... aggregates=["max", "average"], + ... granularity="1d") + + In the two code examples above, we have a `dps` object (an instance of ``Datapoints``), and a `dps_lst` object (an instance of ``DatapointsList``). + On `dps`, which in this case contains raw datapoints, you may access the underlying data directly by using the `.value` attribute. This works for + both numeric and string (raw) datapoints, but not aggregates - they must be accessed by their respective names, because you're allowed to fetch + all available aggregates simultaneously, and they are stored on the same object: + + >>> raw_data = dps.value + >>> first_dps = dps_lst[0] # optionally: `dps_lst.get(external_id="foo")` + >>> avg_data = first_dps.average + >>> max_data = first_dps.max + + You may also slice a ``Datapoints`` object (you get ``Datapoints`` back), or ask for "a row of data" at a single index in same way you would do with a + built-in `list` (you get a `Datapoint` object back, note the singular name). You'll also get `Datapoint` objects when iterating through a ``Datapoints`` + object, but this should generally be avoided (consider this a performance warning): + + >>> dps_slice = dps[-10:] # Last ten values + >>> dp = dps[3] # The third value + >>> for dp in dps_slice: + ... pass # do something! + + All parameters can be individually set if you use and pass ``DatapointsQuery`` objects (even ``ignore_unknown_ids``, contrary to the API). + If you also pass top-level parameters, these will be overruled by the individual parameters (where both exist, so think of these as defaults). + You are free to mix any kind of ids and external ids: Single identifiers, single DatapointsQuery objects and (mixed) lists of these. + + Let's say you want different aggregates and end-times for a few time series (when only fetching a single aggregate, you may pass + the string directly for convenience): + + >>> from cognite.client.data_classes import DatapointsQuery + >>> dps_lst = client.time_series.data.retrieve( + ... id=[ + ... DatapointsQuery(id=42, end="1d-ago", aggregates="average"), + ... DatapointsQuery(id=69, end="2d-ahead", aggregates=["average"]), + ... DatapointsQuery(id=96, end="3d-ago", aggregates=["min", "max", "count"]), + ... ], + ... external_id=DatapointsQuery(external_id="foo", aggregates="max"), + ... start="5d-ago", + ... granularity="1h") + + Certain aggregates are very useful when they follow the calendar, for example electricity consumption per day, week, month + or year. You may request such calendar-based aggregates in a specific timezone to make them even more useful: daylight savings (DST) + will be taken care of automatically and the datapoints will be aligned to the timezone. Note: Calendar granularities and timezone + can be used independently. To get monthly local aggregates in Oslo, Norway you can do: + + >>> dps = client.time_series.data.retrieve( + ... id=123, + ... aggregates="sum", + ... granularity="1month", + ... timezone="Europe/Oslo") + + When requesting multiple time series, an easy way to get the datapoints of a specific one is to use the `.get` method + on the returned ``DatapointsList`` object, then specify if you want `id` or `external_id`. Note: If you fetch a time series + by using `id`, you can still access it with its `external_id` (and the opposite way around), if you know it: + + >>> from datetime import datetime, timezone + >>> utc = timezone.utc + >>> dps_lst = client.time_series.data.retrieve( + ... start=datetime(1907, 10, 14, tzinfo=utc), + ... end=datetime(1907, 11, 6, tzinfo=utc), + ... id=[42, 43, 44, ..., 499, 500], + ... ) + >>> ts_350 = dps_lst.get(id=350) # ``Datapoints`` object + + ...but what happens if you request some duplicate ids or external_ids? In this example we will show how to get data from + multiple disconnected periods. Let's say you're tasked to train a machine learning model to recognize a specific failure mode + of a system, and you want the training data to only be from certain periods (when an alarm was on/high). Assuming these alarms + are stored as events in CDF, with both start- and end times, we can use these directly in the query. + + After fetching, the `.get` method will return a list of ``Datapoints`` instead, (assuming we have more than one event) in the + same order, similar to how slicing works with non-unique indices on Pandas DataFrames: + + >>> periods = client.events.list(type="alarm", subtype="pressure") + >>> sensor_xid = "foo-pressure-bar" + >>> dps_lst = client.time_series.data.retrieve( + ... id=[42, 43, 44], + ... external_id=[ + ... DatapointsQuery(external_id=sensor_xid, start=ev.start_time, end=ev.end_time) + ... for ev in periods + ... ]) + >>> ts_44 = dps_lst.get(id=44) # Single ``Datapoints`` object + >>> ts_lst = dps_lst.get(external_id=sensor_xid) # List of ``len(periods)`` ``Datapoints`` objects + + The API has an endpoint to :py:meth:`~DatapointsAPI.retrieve_latest`, i.e. "before", but not "after". Luckily, we can emulate that behaviour easily. + Let's say we have a very dense time series and do not want to fetch all of the available raw data (or fetch less precise + aggregate data), just to get the very first datapoint of every month (from e.g. the year 2000 through 2010): + + >>> import itertools + >>> month_starts = [ + ... datetime(year, month, 1, tzinfo=utc) + ... for year, month in itertools.product(range(2000, 2011), range(1, 13))] + >>> dps_lst = client.time_series.data.retrieve( + ... external_id=[DatapointsQuery(external_id="foo", start=start) for start in month_starts], + ... limit=1) + + To get *all* historic and future datapoints for a time series, e.g. to do a backup, you may want to import the two integer + constants: ``MIN_TIMESTAMP_MS`` and ``MAX_TIMESTAMP_MS``, to make sure you do not miss any. **Performance warning**: This pattern of + fetching datapoints from the entire valid time domain is slower and shouldn't be used for regular "day-to-day" queries: + + >>> from cognite.client.utils import MIN_TIMESTAMP_MS, MAX_TIMESTAMP_MS + >>> dps_backup = client.time_series.data.retrieve( + ... id=123, + ... start=MIN_TIMESTAMP_MS, + ... end=MAX_TIMESTAMP_MS + 1) # end is exclusive + + If you have a time series with 'unit_external_id' set, you can use the 'target_unit' parameter to convert the datapoints + to the desired unit. In the example below, we are converting temperature readings from a sensor measured and stored in Celsius, + to Fahrenheit (we're assuming that the time series has e.g. ``unit_external_id="temperature:deg_c"`` ): + + >>> client.time_series.data.retrieve( + ... id=42, start="2w-ago", target_unit="temperature:deg_f") + + Or alternatively, you can use the 'target_unit_system' parameter to convert the datapoints to the desired unit system: + + >>> client.time_series.data.retrieve( + ... id=42, start="2w-ago", target_unit_system="Imperial") + + To retrieve status codes for a time series, pass ``include_status=True``. This is only possible for raw datapoint queries. + You would typically also pass ``ignore_bad_datapoints=False`` to not hide all the datapoints that are marked as uncertain or bad, + which is the API's default behaviour. You may also use ``treat_uncertain_as_bad`` to control how uncertain values are interpreted. + + >>> dps = client.time_series.data.retrieve( + ... id=42, include_status=True, ignore_bad_datapoints=False) + >>> dps.status_code # list of integer codes, e.g.: [0, 1073741824, 2147483648] + >>> dps.status_symbol # list of symbolic representations, e.g. [Good, Uncertain, Bad] + + There are six aggregates directly related to status codes, three for count: 'count_good', 'count_uncertain' and 'count_bad', and + three for duration: 'duration_good', 'duration_uncertain' and 'duration_bad'. These may be fetched as any other aggregate. + It is important to note that status codes may influence how other aggregates are computed: Aggregates will in general omit the + time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good + datapoint will be considered constant. To put simply, what 'average' may return depends on your setting for 'ignore_bad_datapoints' + and 'treat_uncertain_as_bad' (in the presence of uncertain/bad datapoints). + """ + return run_sync( + self.__async_client.time_series.data.retrieve( + id=id, + external_id=external_id, + instance_id=instance_id, + start=start, + end=end, + aggregates=aggregates, + granularity=granularity, + timezone=timezone, + target_unit=target_unit, + target_unit_system=target_unit_system, + limit=limit, + include_outside_points=include_outside_points, + ignore_unknown_ids=ignore_unknown_ids, + include_status=include_status, + ignore_bad_datapoints=ignore_bad_datapoints, + treat_uncertain_as_bad=treat_uncertain_as_bad, + ) + ) + + @overload + def retrieve_arrays( + self, + *, + id: int | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArray | None: ... + + @overload + def retrieve_arrays( + self, + *, + id: Sequence[int | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArrayList: ... + + @overload + def retrieve_arrays( + self, + *, + external_id: str | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArray | None: ... + + @overload + def retrieve_arrays( + self, + *, + external_id: SequenceNotStr[str | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArrayList: ... + + @overload + def retrieve_arrays( + self, + *, + instance_id: NodeId | DatapointsQuery, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArray | None: ... + + @overload + def retrieve_arrays( + self, + *, + instance_id: Sequence[NodeId | DatapointsQuery], + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArrayList: ... + + def retrieve_arrays( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None, + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None, + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> DatapointsArray | DatapointsArrayList | None: + """ + `Retrieve datapoints for one or more time series. `_ + + Note: + This method requires ``numpy`` to be installed. + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Args: + id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. + start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. + end (int | str | datetime.datetime | None): Exclusive end. Default: "now" + aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. + limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False + include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + + Returns: + DatapointsArray | DatapointsArrayList | None: A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + + Note: + For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. + + When retrieving raw datapoints with ``ignore_bad_datapoints=False``, bad datapoints with the value NaN can not be distinguished from those + missing a value (due to being stored in a numpy array). To solve this, all missing values have their timestamp recorded in a set you may access: + ``dps.null_timestamps``. If you chose to pass a ``DatapointsArray`` to an insert method, this will be inspected automatically to replicate correctly + (inserting status codes will soon be supported). + + Examples: + + Get weekly ``min`` and ``max`` aggregates for a time series with id=42 since the year 2000, then compute the range of values: + + >>> from cognite.client import CogniteClient + >>> from datetime import datetime, timezone + >>> client = CogniteClient() + >>> dps = client.time_series.data.retrieve_arrays( + ... id=42, + ... start=datetime(2020, 1, 1, tzinfo=timezone.utc), + ... aggregates=["min", "max"], + ... granularity="7d") + >>> weekly_range = dps.max - dps.min + + Get up-to 2 million raw datapoints for the last 48 hours for a noisy time series with external_id="ts-noisy", + then use a small and wide moving average filter to smooth it out: + + >>> import numpy as np + >>> dps = client.time_series.data.retrieve_arrays( + ... external_id="ts-noisy", + ... start="2d-ago", + ... limit=2_000_000) + >>> smooth = np.convolve(dps.value, np.ones(5) / 5) # doctest: +SKIP + >>> smoother = np.convolve(dps.value, np.ones(20) / 20) # doctest: +SKIP + + Get raw datapoints for multiple time series, that may or may not exist, from the last 2 hours, then find the + largest gap between two consecutive values for all time series, also taking the previous value into account (outside point). + + >>> id_lst = [42, 43, 44] + >>> dps_lst = client.time_series.data.retrieve_arrays( + ... id=id_lst, + ... start="2h-ago", + ... include_outside_points=True, + ... ignore_unknown_ids=True) + >>> largest_gaps = [np.max(np.diff(dps.timestamp)) for dps in dps_lst] + + Get raw datapoints for a time series with external_id="bar" from the last 10 weeks, then convert to a ``pandas.Series`` + (you can of course also use the ``to_pandas()`` convenience method if you want a ``pandas.DataFrame``): + + >>> import pandas as pd + >>> dps = client.time_series.data.retrieve_arrays(external_id="bar", start="10w-ago") + >>> series = pd.Series(dps.value, index=dps.timestamp) + """ + return run_sync( + self.__async_client.time_series.data.retrieve_arrays( + id=id, + external_id=external_id, + instance_id=instance_id, + start=start, + end=end, + aggregates=aggregates, + granularity=granularity, + timezone=timezone, + target_unit=target_unit, + target_unit_system=target_unit_system, + limit=limit, + include_outside_points=include_outside_points, + ignore_unknown_ids=ignore_unknown_ids, + include_status=include_status, + ignore_bad_datapoints=ignore_bad_datapoints, + treat_uncertain_as_bad=treat_uncertain_as_bad, + ) + ) + + def retrieve_dataframe( + self, + *, + id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None, + external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None, + instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None, + start: int | str | datetime.datetime | None = None, + end: int | str | datetime.datetime | None = None, + aggregates: Aggregate | str | list[Aggregate | str] | None = None, + granularity: str | None = None, + timezone: str | datetime.timezone | ZoneInfo | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + limit: int | None = None, + include_outside_points: bool = False, + ignore_unknown_ids: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + uniform_index: bool = False, + include_status: bool = False, + include_unit: bool = True, + include_aggregate_name: bool = True, + include_granularity_name: bool = False, + ) -> pd.DataFrame: + """ + Get datapoints directly in a pandas dataframe. + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Note: + For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. + + Args: + id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, DatapointsQuery or (mixed) sequence of these. See examples. + external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, DatapointsQuery or (mixed) sequence of these. See examples. + instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id, DatapointsQuery or (mixed) sequence of these. See examples. + start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. + end (int | str | datetime.datetime | None): Exclusive end. Default: "now" + aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. + limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False + ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + uniform_index (bool): If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False + include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex). + include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) + + Returns: + pd.DataFrame: A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max"). + + Tip: + Pandas DataFrames have one shared index, so when you fetch datapoints from multiple time series, the final index will be + the union of all the timestamps. Thus, unless all time series have the exact same timestamps, the various columns will contain + NaNs to fill the "missing" values. For lower memory usage on unaligned data, use the :py:meth:`~DatapointsAPI.retrieve_arrays` method. + + Warning: + If you have duplicated time series in your query, the dataframe columns will also contain duplicates. + + When retrieving raw datapoints with ``ignore_bad_datapoints=False``, bad datapoints with the value NaN can not be distinguished from those + missing a value (due to being stored in a numpy array); all will become NaNs in the dataframe. + + Examples: + + Get a pandas dataframe using a single id, and use this id as column name, with no more than 100 datapoints: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> df = client.time_series.data.retrieve_dataframe( + ... id=12345, + ... start="2w-ago", + ... end="now", + ... limit=100, + ... column_names="id") + + Get the pandas dataframe with a uniform index (fixed spacing between points) of 1 day, for two time series with + individually specified aggregates, from 1990 through 2020: + + >>> from datetime import datetime, timezone + >>> from cognite.client.data_classes import DatapointsQuery + >>> df = client.time_series.data.retrieve_dataframe( + ... external_id=[ + ... DatapointsQuery(external_id="foo", aggregates="discrete_variance"), + ... DatapointsQuery(external_id="bar", aggregates=["total_variation", "continuous_variance"]), + ... ], + ... granularity="1d", + ... start=datetime(1990, 1, 1, tzinfo=timezone.utc), + ... end=datetime(2020, 12, 31, tzinfo=timezone.utc), + ... uniform_index=True) + + Get a pandas dataframe containing the 'average' aggregate for two time series using a monthly granularity, + starting Jan 1, 1970 all the way up to present, without having the aggregate name in the columns: + + >>> df = client.time_series.data.retrieve_dataframe( + ... external_id=["foo", "bar"], + ... aggregates="average", + ... granularity="1mo", + ... include_aggregate_name=False) + + You may also use ``pandas.Timestamp`` to define start and end. Here we fetch using instance_id: + + >>> import pandas as pd + >>> df = client.time_series.data.retrieve_dataframe( + ... instance_id=NodeId("my-space", "my-ts-xid"), + ... start=pd.Timestamp("2023-01-01"), + ... end=pd.Timestamp("2023-02-01")) + """ + return run_sync( + self.__async_client.time_series.data.retrieve_dataframe( + id=id, + external_id=external_id, + instance_id=instance_id, + start=start, + end=end, + aggregates=aggregates, + granularity=granularity, + timezone=timezone, + target_unit=target_unit, + target_unit_system=target_unit_system, + limit=limit, + include_outside_points=include_outside_points, + ignore_unknown_ids=ignore_unknown_ids, + ignore_bad_datapoints=ignore_bad_datapoints, + treat_uncertain_as_bad=treat_uncertain_as_bad, + uniform_index=uniform_index, + include_status=include_status, + include_unit=include_unit, + include_aggregate_name=include_aggregate_name, + include_granularity_name=include_granularity_name, + ) + ) + + @overload + def retrieve_latest( + self, + id: int | LatestDatapointQuery, + *, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> Datapoints | None: ... + + @overload + def retrieve_latest( + self, + id: Sequence[int | LatestDatapointQuery], + *, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + id: int | LatestDatapointQuery, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> Datapoints | None: ... + + @overload + def retrieve_latest( + self, + *, + id: Sequence[int | LatestDatapointQuery], + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + external_id: str | LatestDatapointQuery, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> Datapoints | None: ... + + @overload + def retrieve_latest( + self, + *, + external_id: SequenceNotStr[str | LatestDatapointQuery], + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + instance_id: NodeId | LatestDatapointQuery, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> Datapoints | None: ... + + @overload + def retrieve_latest( + self, + *, + instance_id: Sequence[NodeId | LatestDatapointQuery], + external_id: None = None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None, + external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None, + instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None, + external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None, + instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + @overload + def retrieve_latest( + self, + *, + external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None, + instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> DatapointsList: ... + + def retrieve_latest( + self, + id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None = None, + external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None = None, + instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None = None, + before: None | int | str | datetime.datetime = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ignore_unknown_ids: bool = False, + ) -> Datapoints | DatapointsList | None: + """ + `Get the latest datapoint for one or more time series `_ + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Args: + id (int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None): Id or list of ids. + external_id (str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None): External id or list of external ids. + instance_id (NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None): Instance id or list of instance ids. + before (None | int | str | datetime.datetime): Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'. + target_unit (str | None): The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system (str | None): The unit system of the datapoint returned. Cannot be used with target_unit. + include_status (bool): Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints (bool): Prevent datapoints with a bad status code to be returned. Default: True. + treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + Datapoints | DatapointsList | None: A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + + Examples: + + Getting the latest datapoint in a time series. This method returns a Datapoints object, so the datapoint + (if it exists) will be the first element: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.data.retrieve_latest(id=1)[0] + + You can also use external_id or instance_id; single identifier or list of identifiers: + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> res = client.time_series.data.retrieve_latest( + ... external_id=["foo", "bar"], + ... instance_id=NodeId("my-space", "my-ts-xid")) + + You can also get the first datapoint before a specific time: + + >>> res = client.time_series.data.retrieve_latest(id=1, before="2d-ago")[0] + + You can also get the first datapoint before a specific time in the future e.g. forecast data: + + >>> res = client.time_series.data.retrieve_latest(id=1, before="2d-ahead")[0] + + You can also retrieve the datapoint in a different unit or unit system: + + >>> res = client.time_series.data.retrieve_latest(id=1, target_unit="temperature:deg_f")[0] + >>> res = client.time_series.data.retrieve_latest(id=1, target_unit_system="Imperial")[0] + + You may also pass an instance of LatestDatapointQuery: + + >>> from cognite.client.data_classes import LatestDatapointQuery + >>> res = client.time_series.data.retrieve_latest(id=LatestDatapointQuery(id=1, before=60_000))[0] + + If you need the latest datapoint for multiple time series, simply give a list of ids. Note that we are + using external ids here, but either will work: + + >>> res = client.time_series.data.retrieve_latest(external_id=["abc", "def"]) + >>> latest_abc = res[0][0] + >>> latest_def = res[1][0] + + If you for example need to specify a different value of 'before' for each time series, you may pass several + LatestDatapointQuery objects. These will override any parameter passed directly to the function and also allows + for individual customisation of 'target_unit', 'target_unit_system', 'include_status', 'ignore_bad_datapoints' + and 'treat_uncertain_as_bad'. + + >>> from datetime import datetime, timezone + >>> id_queries = [ + ... 123, + ... LatestDatapointQuery(id=456, before="1w-ago"), + ... LatestDatapointQuery(id=789, before=datetime(2018,1,1, tzinfo=timezone.utc)), + ... LatestDatapointQuery(id=987, target_unit="temperature:deg_f")] + >>> ext_id_queries = [ + ... "foo", + ... LatestDatapointQuery(external_id="abc", before="3h-ago", target_unit_system="Imperial"), + ... LatestDatapointQuery(external_id="def", include_status=True), + ... LatestDatapointQuery(external_id="ghi", treat_uncertain_as_bad=False), + ... LatestDatapointQuery(external_id="jkl", include_status=True, ignore_bad_datapoints=False)] + >>> res = client.time_series.data.retrieve_latest( + ... id=id_queries, external_id=ext_id_queries) + """ + return run_sync( + self.__async_client.time_series.data.retrieve_latest( + id=id, + external_id=external_id, + instance_id=instance_id, + before=before, + target_unit=target_unit, + target_unit_system=target_unit_system, + include_status=include_status, + ignore_bad_datapoints=ignore_bad_datapoints, + treat_uncertain_as_bad=treat_uncertain_as_bad, + ignore_unknown_ids=ignore_unknown_ids, + ) + ) + + def insert( + self, + datapoints: Datapoints + | DatapointsArray + | Sequence[dict[str, int | float | str | datetime.datetime]] + | Sequence[ + tuple[int | float | datetime.datetime, int | float | str] + | tuple[int | float | datetime.datetime, int | float | str, int] + ], + id: int | None = None, + external_id: str | None = None, + instance_id: NodeId | None = None, + ) -> None: + """ + Insert datapoints into a time series + + Timestamps can be represented as milliseconds since epoch or datetime objects. Note that naive datetimes + are interpreted to be in the local timezone (not UTC), adhering to Python conventions for datetime handling. + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Args: + datapoints (Datapoints | DatapointsArray | Sequence[dict[str, int | float | str | datetime.datetime]] | Sequence[tuple[int | float | datetime.datetime, int | float | str] | tuple[int | float | datetime.datetime, int | float | str, int]]): The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below. + id (int | None): Id of time series to insert datapoints into. + external_id (str | None): External id of time series to insert datapoint into. + instance_id (NodeId | None): Instance ID of time series to insert datapoints into. + + Note: + All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass + either the status code (int) or status symbol (str). Only one of code and symbol is required. If both are given, + they must match or an API error will be raised. + + Datapoints marked bad can take on any of the following values: None (missing), NaN, and +/- Infinity. It is also not + restricted by the normal numeric range [-1e100, 1e100] (i.e. can be any valid float64). + + Examples: + + Your datapoints can be a list of tuples where the first element is the timestamp and the second element is the value. + The third element is optional and may contain the status code for the datapoint. To pass by symbol, a dictionary must be used. + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import StatusCode + >>> from datetime import datetime, timezone + >>> client = CogniteClient() + >>> datapoints = [ + ... (datetime(2018,1,1, tzinfo=timezone.utc), 1000), + ... (datetime(2018,1,2, tzinfo=timezone.utc), 2000, StatusCode.Good), + ... (datetime(2018,1,3, tzinfo=timezone.utc), 3000, StatusCode.Uncertain), + ... (datetime(2018,1,4, tzinfo=timezone.utc), None, StatusCode.Bad), + ... ] + >>> client.time_series.data.insert(datapoints, id=1) + + The timestamp can be given by datetime as above, or in milliseconds since epoch. Status codes can also be + passed as normal integers; this is necessary if a subcategory or modifier flag is needed, e.g. 3145728: 'GoodClamped': + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> datapoints = [ + ... (150000000000, 1000), + ... (160000000000, 2000, 3145728), + ... (170000000000, 2000, 2147483648), # Same as StatusCode.Bad + ... ] + >>> client.time_series.data.insert(datapoints, instance_id=NodeId("my-space", "my-ts-xid")) + + Or they can be a list of dictionaries: + + >>> import math + >>> datapoints = [ + ... {"timestamp": 150000000000, "value": 1000}, + ... {"timestamp": 160000000000, "value": 2000}, + ... {"timestamp": 170000000000, "value": 3000, "status": {"code": 0}}, + ... {"timestamp": 180000000000, "value": 4000, "status": {"symbol": "Uncertain"}}, + ... {"timestamp": 190000000000, "value": math.nan, "status": {"code": StatusCode.Bad, "symbol": "Bad"}}, + ... ] + >>> client.time_series.data.insert(datapoints, external_id="abcd") + + Or they can be a Datapoints or DatapointsArray object (with raw datapoints only). Note that the id or external_id + set on these objects are not inspected/used (as they belong to the "from-time-series", and not the "to-time-series"), + and so you must explicitly pass the identifier of the time series you want to insert into, which in this example is + `external_id="foo"`. + + If the Datapoints or DatapointsArray are fetched with status codes, these will be automatically used in the insert: + + >>> data = client.time_series.data.retrieve( + ... external_id="abc", + ... start="1w-ago", + ... end="now", + ... include_status=True, + ... ignore_bad_datapoints=False, + ... ) + >>> client.time_series.data.insert(data, external_id="foo") + """ + return run_sync( + self.__async_client.time_series.data.insert( + datapoints=datapoints, id=id, external_id=external_id, instance_id=instance_id + ) + ) + + def insert_multiple( + self, datapoints: list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]] + ) -> None: + """ + `Insert datapoints into multiple time series `_ + + Timestamps can be represented as milliseconds since epoch or datetime objects. Note that naive datetimes + are interpreted to be in the local timezone (not UTC), adhering to Python conventions for datetime handling. + + Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on + `status codes. `_ + + Args: + datapoints (list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]): The datapoints you wish to insert along with the ids of the time series. See examples below. + + Note: + All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass + either the status code (int) or status symbol (str). Only one of code and symbol is required. If both are given, + they must match or an API error will be raised. + + Datapoints marked bad can take on any of the following values: None (missing), NaN, and +/- Infinity. It is also not + restricted by the normal numeric range [-1e100, 1e100] (i.e. can be any valid float64). + + Examples: + + Your datapoints can be a list of dictionaries, each containing datapoints for a different (presumably) time series. These dictionaries + must have the key "datapoints" (containing the data) specified as a ``Datapoints`` object, a ``DatapointsArray`` object, or list of either + tuples `(timestamp, value)` or dictionaries, `{"timestamp": ts, "value": value}`. + + When passing tuples, the third element is optional and may contain the status code for the datapoint. To pass by symbol, a dictionary must be used. + + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> from cognite.client.data_classes import StatusCode + >>> from datetime import datetime, timezone + >>> client = CogniteClient() + >>> to_insert = [ + ... {"id": 1, "datapoints": [ + ... (datetime(2018,1,1, tzinfo=timezone.utc), 1000), + ... (datetime(2018,1,2, tzinfo=timezone.utc), 2000, StatusCode.Good)], + ... }, + ... {"external_id": "foo", "datapoints": [ + ... (datetime(2018,1,3, tzinfo=timezone.utc), 3000), + ... (datetime(2018,1,4, tzinfo=timezone.utc), 4000, StatusCode.Uncertain)], + ... }, + ... {"instance_id": NodeId("my-space", "my-ts-xid"), "datapoints": [ + ... (datetime(2018,1,5, tzinfo=timezone.utc), 5000), + ... (datetime(2018,1,6, tzinfo=timezone.utc), None, StatusCode.Bad)], + ... } + ... ] + + Passing datapoints using the dictionary format with timestamp given in milliseconds since epoch: + + >>> import math + >>> to_insert.append( + ... {"external_id": "bar", "datapoints": [ + ... {"timestamp": 170000000, "value": 7000}, + ... {"timestamp": 180000000, "value": 8000, "status": {"symbol": "Uncertain"}}, + ... {"timestamp": 190000000, "value": None, "status": {"code": StatusCode.Bad}}, + ... {"timestamp": 200000000, "value": math.inf, "status": {"code": StatusCode.Bad, "symbol": "Bad"}}, + ... ]}) + + If the Datapoints or DatapointsArray are fetched with status codes, these will be automatically used in the insert: + + >>> data_to_clone = client.time_series.data.retrieve( + ... external_id="bar", include_status=True, ignore_bad_datapoints=False) + >>> to_insert.append({"external_id": "bar-clone", "datapoints": data_to_clone}) + >>> client.time_series.data.insert_multiple(to_insert) + """ + return run_sync(self.__async_client.time_series.data.insert_multiple(datapoints=datapoints)) + + def delete_range( + self, + start: int | str | datetime.datetime, + end: int | str | datetime.datetime, + id: int | None = None, + external_id: str | None = None, + instance_id: NodeId | None = None, + ) -> None: + """ + Delete a range of datapoints from a time series. + + Args: + start (int | str | datetime.datetime): Inclusive start of delete range + end (int | str | datetime.datetime): Exclusive end of delete range + id (int | None): Id of time series to delete data from + external_id (str | None): External id of time series to delete data from + instance_id (NodeId | None): Instance ID of time series to delete data from + + Examples: + + Deleting the last week of data from a time series: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.time_series.data.delete_range(start="1w-ago", end="now", id=1) + + Deleting the data from now until 2 days in the future from a time series containing e.g. forecasted data: + + >>> client.time_series.data.delete_range(start="now", end="2d-ahead", id=1) + """ + return run_sync( + self.__async_client.time_series.data.delete_range( + start=start, end=end, id=id, external_id=external_id, instance_id=instance_id + ) + ) + + def delete_ranges(self, ranges: list[dict[str, Any]]) -> None: + """ + `Delete a range of datapoints from multiple time series. `_ + + Args: + ranges (list[dict[str, Any]]): The list of datapoint ids along with time range to delete. See examples below. + + Examples: + + Each element in the list ranges must be specify either id or external_id, and a range: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> ranges = [{"id": 1, "start": "2d-ago", "end": "now"}, + ... {"external_id": "abc", "start": "2d-ago", "end": "2d-ahead"}] + >>> client.time_series.data.delete_ranges(ranges) + """ + return run_sync(self.__async_client.time_series.data.delete_ranges(ranges=ranges)) + + def insert_dataframe(self, df: pd.DataFrame, dropna: bool = True) -> None: + """ + Insert a dataframe containing datapoints to one or more time series. + + The index of the dataframe must contain the timestamps (pd.DatetimeIndex). The column identifiers + must contain the IDs (``int``), external IDs (``str``) or instance IDs (``NodeId`` or 2-tuple (space, ext. ID)) + of the already existing time series to which the datapoints from that particular column will be written. + + Note: + The column identifiers must be unique. + + Args: + df (pd.DataFrame): Pandas DataFrame object containing the time series. + dropna (bool): Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True. + + Warning: + You can not insert datapoints with status codes using this method (``insert_dataframe``), you'll need + to use the :py:meth:`~DatapointsAPI.insert` method instead (or :py:meth:`~DatapointsAPI.insert_multiple`)! + + Examples: + Post a dataframe with white noise to three time series, one using ID, one using external id + and one using instance id: + + >>> import numpy as np + >>> import pandas as pd + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> node_id = NodeId("my-space", "my-ts-xid") + >>> df = pd.DataFrame( + ... { + ... 123: np.random.normal(0, 1, 100), + ... "foo": np.random.normal(0, 1, 100), + ... node_id: np.random.normal(0, 1, 100), + ... }, + ... index=pd.date_range(start="2018-01-01", periods=100, freq="1d") + ... ) + >>> client.time_series.data.insert_dataframe(df) + """ + return run_sync(self.__async_client.time_series.data.insert_dataframe(df=df, dropna=dropna)) diff --git a/cognite/client/_sync_api/datapoints_subscriptions.py b/cognite/client/_sync_api/datapoints_subscriptions.py new file mode 100644 index 0000000000..b7f33333f3 --- /dev/null +++ b/cognite/client/_sync_api/datapoints_subscriptions.py @@ -0,0 +1,314 @@ +""" +=============================================================================== +78f40e7a27949090a0e6c70356c9f300 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.datapoints_subscriptions import ( + DatapointSubscription, + DatapointSubscriptionBatch, + DatapointSubscriptionList, + DataPointSubscriptionUpdate, + DataPointSubscriptionWrite, + TimeSeriesIDList, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDatapointsSubscriptionAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[DatapointSubscription]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[DatapointSubscriptionList]: ... + + def __call__( + self, chunk_size: int | None = None, limit: int | None = None + ) -> Iterator[DatapointSubscription | DatapointSubscriptionList]: + """ + Iterate over all datapoint subscriptions. + + Args: + chunk_size (int | None): The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time. + limit (int | None): Maximum number of items to return. Defaults to return all datapoint subscriptions. + + Yields: + DatapointSubscription | DatapointSubscriptionList: Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions. + """ + yield from SyncIterator(self.__async_client.time_series.subscriptions(chunk_size=chunk_size, limit=limit)) + + def create(self, subscription: DataPointSubscriptionWrite) -> DatapointSubscription: + """ + `Create a subscription `_ + + Create a subscription that can be used to listen for changes in data points for a set of time series. + + Args: + subscription (DataPointSubscriptionWrite): Subscription to create. + + Returns: + DatapointSubscription: Created subscription + + Examples: + + Create a subscription with explicit time series IDs: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import DataPointSubscriptionWrite + >>> client = CogniteClient() + >>> sub = DataPointSubscriptionWrite( + ... external_id="my_subscription", + ... name="My subscription", + ... partition_count=1, + ... time_series_ids=["myFistTimeSeries", "mySecondTimeSeries"]) + >>> created = client.time_series.subscriptions.create(sub) + + Create a subscription with explicit time series IDs given as Node IDs + either from CogniteTimeSeries or an extension of CogniteTimeseries: + + >>> from cognite.client.data_classes import DataPointSubscriptionWrite + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> sub = DataPointSubscriptionWrite( + ... external_id="my_subscription", + ... name="My subscription with Data Model Ids", + ... partition_count=1, + ... instance_ids=[NodeId("my_space", "myFistTimeSeries"), NodeId("my_space", "mySecondTimeSeries")]) + >>> created = client.time_series.subscriptions.create(sub) + + Create a filter defined subscription for all numeric time series that are stepwise: + + >>> from cognite.client.data_classes import DataPointSubscriptionWrite + >>> from cognite.client.data_classes import filters as flt + >>> from cognite.client.data_classes.datapoints_subscriptions import DatapointSubscriptionProperty + >>> is_numeric_stepwise = flt.And( + ... flt.Equals(DatapointSubscriptionProperty.is_string, False), + ... flt.Equals(DatapointSubscriptionProperty.is_step, True)) + >>> sub = DataPointSubscriptionWrite( + ... external_id="my_subscription", + ... name="My subscription for numeric, stepwise time series", + ... partition_count=1, + ... filter=is_numeric_stepwise) + >>> created = client.time_series.subscriptions.create(sub) + """ + return run_sync(self.__async_client.time_series.subscriptions.create(subscription=subscription)) + + def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete subscription(s). This operation cannot be undone. `_ + + Args: + external_id (str | SequenceNotStr[str]): External ID or list of external IDs of subscriptions to delete. + ignore_unknown_ids (bool): Whether to ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete a subscription by external ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.time_series.subscriptions.delete("my_subscription") + """ + return run_sync( + self.__async_client.time_series.subscriptions.delete( + external_id=external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def retrieve(self, external_id: str) -> DatapointSubscription | None: + """ + `Retrieve one subscription by external ID. `_ + + Args: + external_id (str): External ID of the subscription to retrieve. + + Returns: + DatapointSubscription | None: The requested subscription. + + Examples: + + Retrieve a subscription by external ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.subscriptions.retrieve("my_subscription") + """ + return run_sync(self.__async_client.time_series.subscriptions.retrieve(external_id=external_id)) + + def list_member_time_series(self, external_id: str, limit: int | None = DEFAULT_LIMIT_READ) -> TimeSeriesIDList: + """ + `List time series in a subscription `_ + + Retrieve a list of time series (IDs) that the subscription is currently retrieving updates from + + Args: + external_id (str): External ID of the subscription to retrieve members of. + limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + TimeSeriesIDList: List of time series in the subscription. + + Examples: + + List time series in a subscription: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import DataPointSubscriptionUpdate + >>> client = CogniteClient() + >>> members = client.time_series.subscriptions.list_member_time_series("my_subscription") + >>> timeseries_external_ids = members.as_external_ids() + """ + return run_sync( + self.__async_client.time_series.subscriptions.list_member_time_series(external_id=external_id, limit=limit) + ) + + def update( + self, + update: DataPointSubscriptionUpdate | DataPointSubscriptionWrite, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> DatapointSubscription: + """ + `Update a subscriptions `_ + + Update a subscription. Note that Fields that are not included in the request are not changed. + Furthermore, the subscription partition cannot be changed. + + Args: + update (DataPointSubscriptionUpdate | DataPointSubscriptionWrite): The subscription update. + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. + + Returns: + DatapointSubscription: Updated subscription. + + Examples: + + Change the name of a preexisting subscription: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import DataPointSubscriptionUpdate + >>> client = CogniteClient() + >>> update = DataPointSubscriptionUpdate("my_subscription").name.set("My New Name") + >>> updated = client.time_series.subscriptions.update(update) + + + Add a time series to a preexisting subscription: + + >>> from cognite.client.data_classes import DataPointSubscriptionUpdate + >>> update = DataPointSubscriptionUpdate("my_subscription").time_series_ids.add(["MyNewTimeSeriesExternalId"]) + >>> updated = client.time_series.subscriptions.update(update) + """ + return run_sync(self.__async_client.time_series.subscriptions.update(update=update, mode=mode)) + + def iterate_data( + self, + external_id: str, + start: str | None = None, + limit: int = DEFAULT_LIMIT_READ, + partition: int = 0, + poll_timeout: int = 5, + cursor: str | None = None, + include_status: bool = False, + ignore_bad_datapoints: bool = True, + treat_uncertain_as_bad: bool = True, + ) -> Iterator[DatapointSubscriptionBatch]: + """ + `Iterate over data from a given subscription. `_ + + Data can be ingested datapoints and time ranges where data is deleted. This endpoint will also return changes to + the subscription itself, that is, if time series are added or removed from the subscription. + + Warning: + This endpoint will store updates from when the subscription was created, but updates + older than 7 days may be discarded. + + Args: + external_id (str): The external ID of the subscription. + start (str | None): When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None. + limit (int): Approximate number of results to return across all partitions. + partition (int): The partition to iterate over. Defaults to 0. + poll_timeout (int): How many seconds to wait for new data, until an empty response is sent. Defaults to 5. + cursor (str | None): Optional cursor to start iterating from. + include_status (bool): Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints (bool): Do not return bad datapoints. Default: True. + treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True. + + Yields: + DatapointSubscriptionBatch: Changes to the subscription and data in the subscribed time series. + + Examples: + + Iterate over changes to subscription timeseries since the beginning until there is no more data: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> for batch in client.time_series.subscriptions.iterate_data("my_subscription"): + ... # Changes to the subscription itself: + ... print(f"Added {len(batch.subscription_changes.added)} timeseries") + ... print(f"Removed {len(batch.subscription_changes.removed)} timeseries") + ... print(f"Changed timeseries data in {len(batch.updates)} updates") + ... # Changes to datapoints for time series in the subscription: + ... for update in batch.updates: + ... upserts.time_series # The time series the update belongs to + ... upserts.upserts # The upserted datapoints, if any + ... upserts.deletes # Ranges of deleted periods, if any + ... if not batch.has_next: + ... break + + Iterate continuously over all changes to the subscription newer than 3 days: + + >>> for batch in client.time_series.subscriptions.iterate_data("my_subscription", "3d-ago"): + ... pass # do something + """ + yield from SyncIterator( + self.__async_client.time_series.subscriptions.iterate_data( + external_id=external_id, + start=start, + limit=limit, + partition=partition, + poll_timeout=poll_timeout, + cursor=cursor, + include_status=include_status, + ignore_bad_datapoints=ignore_bad_datapoints, + treat_uncertain_as_bad=treat_uncertain_as_bad, + ) + ) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatapointSubscriptionList: + """ + `List data point subscriptions `_ + + Args: + limit (int | None): Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + Returns: + DatapointSubscriptionList: List of requested datapoint subscriptions + + Examples: + + List 5 subscriptions: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> subscriptions = client.time_series.subscriptions.list(limit=5) + """ + return run_sync(self.__async_client.time_series.subscriptions.list(limit=limit)) diff --git a/cognite/client/_sync_api/diagrams.py b/cognite/client/_sync_api/diagrams.py new file mode 100644 index 0000000000..ffba1034c1 --- /dev/null +++ b/cognite/client/_sync_api/diagrams.py @@ -0,0 +1,217 @@ +""" +=============================================================================== +0abf6d100ed5bb2cc4a7c6be6ecff503 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes._base import CogniteResource +from cognite.client.data_classes.contextualization import ( + DetectJobBundle, + DiagramConvertResults, + DiagramDetectConfig, + DiagramDetectResults, + FileReference, +) +from cognite.client.data_classes.data_modeling import NodeId +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDiagramsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def detect( + self, + entities: Sequence[dict | CogniteResource], + search_field: str = "name", + partial_match: bool = False, + min_tokens: int = 2, + file_ids: int | Sequence[int] | None = None, + file_external_ids: str | SequenceNotStr[str] | None = None, + file_instance_ids: NodeId | Sequence[NodeId] | None = None, + file_references: list[FileReference] | FileReference | None = None, + pattern_mode: bool = False, + configuration: DiagramDetectConfig | None = None, + *, + multiple_jobs: Literal[False] = False, + ) -> DiagramDetectResults: ... + + @overload + def detect( + self, + entities: Sequence[dict | CogniteResource], + search_field: str = "name", + partial_match: bool = False, + min_tokens: int = 2, + file_ids: int | Sequence[int] | None = None, + file_external_ids: str | SequenceNotStr[str] | None = None, + file_instance_ids: NodeId | Sequence[NodeId] | None = None, + file_references: list[FileReference] | FileReference | None = None, + pattern_mode: bool = False, + configuration: DiagramDetectConfig | None = None, + *, + multiple_jobs: Literal[True], + ) -> tuple[DetectJobBundle, list[dict[str, Any]]]: ... + + def detect( + self, + entities: Sequence[dict | CogniteResource], + search_field: str = "name", + partial_match: bool = False, + min_tokens: int = 2, + file_ids: int | Sequence[int] | None = None, + file_external_ids: str | SequenceNotStr[str] | None = None, + file_instance_ids: NodeId | Sequence[NodeId] | None = None, + file_references: list[FileReference] | FileReference | None = None, + pattern_mode: bool | None = None, + configuration: DiagramDetectConfig | None = None, + *, + multiple_jobs: bool = False, + ) -> DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: + """ + `Detect annotations in engineering diagrams `_ + + Note: + All users on this CDF subscription with assets read-all and files read-all capabilities in the project, + are able to access the data sent to this endpoint. + + Args: + entities (Sequence[dict | CogniteResource]): List of entities to detect + search_field (str): If entities is a list of dictionaries, this is the key to the values to detect in the PnId + partial_match (bool): Allow for a partial match (e.g. missing prefix). + min_tokens (int): Minimal number of tokens a match must be based on + file_ids (int | Sequence[int] | None): ID of the files, should already be uploaded in the same tenant. + file_external_ids (str | SequenceNotStr[str] | None): File external ids, alternative to file_ids and file_references. + file_instance_ids (NodeId | Sequence[NodeId] | None): Files to detect in, specified by instance id. + file_references (list[FileReference] | FileReference | None): File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response. + pattern_mode (bool | None): If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None. + configuration (DiagramDetectConfig | None): Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_. + multiple_jobs (bool): Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False. + Returns: + DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results. + + Note: + The results are not written to CDF, to create annotations based on detected entities use `AnnotationsAPI`. + + Examples: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.contextualization import FileReference + >>> client = CogniteClient() + >>> detect_job = client.diagrams.detect( + ... entities=[ + ... {"userDefinedField": "21PT1017","ignoredField": "AA11"}, + ... {"userDefinedField": "21PT1018"}], + ... search_field="userDefinedField", + ... partial_match=True, + ... min_tokens=2, + ... file_ids=[101], + ... file_external_ids=["Test1"], + ... file_references=[ + ... FileReference(id=20, first_page=1, last_page=10), + ... FileReference(external_id="ext_20", first_page=11, last_page=20) + ... ]) + >>> result = detect_job.get_result() + >>> print(result) + + { + 'items': [ + {'fileId': 101, 'annotations': []}, + {'fileExternalId': 'Test1', 'fileId: 1, 'annotations': []}, + {'fileId': 20, 'fileExternalId': 'ext_20', 'annotations': [], 'pageCount': 17}, + { + 'fileId': 20, + 'fileExternalId': 'ext_20', + 'annotations': [ + { + 'text': '21PT1017', + 'entities': [{"userDefinedField": "21PT1017","ignoredField": "AA11"}], + 'region': { + 'page': 12, + 'shape': 'rectangle', + 'vertices': [ + {'x': 0.01, 'y': 0.01}, + {'x': 0.01, 'y': 0.02}, + {'x': 0.02, 'y': 0.02}, + {'x': 0.02, 'y': 0.01} + ] + } + } + ], + 'pageCount': 17 + } + ] + } + + + To use beta configuration options you can use a dictionary or `DiagramDetectConfig` object for convenience: + + >>> from cognite.client.data_classes.contextualization import ConnectionFlags, DiagramDetectConfig + >>> config = DiagramDetectConfig( + ... remove_leading_zeros=True, + ... connection_flags=ConnectionFlags( + ... no_text_inbetween=True, + ... natural_reading_order=True, + ... ) + ... ) + >>> job = client.diagrams.detect(entities=[{"name": "A1"}], file_id=123, config=config) + + Check the documentation for `DiagramDetectConfig` for more information on the available options. + """ + return run_sync( + self.__async_client.diagrams.detect( + entities=entities, + search_field=search_field, + partial_match=partial_match, + min_tokens=min_tokens, + file_ids=file_ids, + file_external_ids=file_external_ids, + file_instance_ids=file_instance_ids, + file_references=file_references, + pattern_mode=pattern_mode, + configuration=configuration, + multiple_jobs=multiple_jobs, + ) + ) + + def get_detect_jobs(self, job_ids: list[int]) -> list[DiagramDetectResults]: + return run_sync(self.__async_client.diagrams.get_detect_jobs(job_ids=job_ids)) + + def convert(self, detect_job: DiagramDetectResults) -> DiagramConvertResults: + """ + Convert a P&ID to interactive SVGs where the provided annotations are highlighted. + + Note: + Will automatically wait for the detect job to complete before starting the conversion. + + Args: + detect_job (DiagramDetectResults): detect job + + Returns: + DiagramConvertResults: Resulting queued job. + + Examples: + + Run a detection job, then convert the results: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> detect_job = client.diagrams.detect(...) + >>> client.diagrams.convert(detect_job=detect_job) + """ + return run_sync(self.__async_client.diagrams.convert(detect_job=detect_job)) diff --git a/cognite/client/_sync_api/document_preview.py b/cognite/client/_sync_api/document_preview.py new file mode 100644 index 0000000000..5f691428d4 --- /dev/null +++ b/cognite/client/_sync_api/document_preview.py @@ -0,0 +1,148 @@ +""" +=============================================================================== +a613b724c1f2c9708b74ff1e9f99b3aa +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from pathlib import Path +from typing import IO + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.documents import TemporaryLink +from cognite.client.utils._async_helpers import run_sync + + +class SyncDocumentPreviewAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def download_page_as_png_bytes(self, id: int, page_number: int = 1) -> bytes: + """ + `Downloads an image preview for a specific page of the specified document. `_ + + Args: + id (int): The server-generated ID for the document you want to retrieve the preview of. + page_number (int): Page number to preview. Starting at 1 for first page. + + Returns: + bytes: The png preview of the document. + + Examples: + + Download image preview of page 5 of file with id 123: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> content = client.documents.previews.download_page_as_png_bytes(id=123, page_number=5) + + Download an image preview and display using IPython.display.Image (for example in a Jupyter Notebook): + + >>> from IPython.display import Image + >>> binary_png = client.documents.previews.download_page_as_png_bytes(id=123, page_number=5) + >>> Image(binary_png) + """ + return run_sync( + self.__async_client.documents.previews.download_page_as_png_bytes(id=id, page_number=page_number) + ) + + def download_page_as_png( + self, path: Path | str | IO, id: int, page_number: int = 1, overwrite: bool = False + ) -> None: + """ + `Downloads an image preview for a specific page of the specified document. `_ + + Args: + path (Path | str | IO): The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'. + id (int): The server-generated ID for the document you want to retrieve the preview of. + page_number (int): Page number to preview. Starting at 1 for first page. + overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + + Examples: + + Download Image preview of page 5 of file with id 123 to folder "previews": + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.documents.previews.download_page_as_png("previews", id=123, page_number=5) + """ + return run_sync( + self.__async_client.documents.previews.download_page_as_png( + path=path, id=id, page_number=page_number, overwrite=overwrite + ) + ) + + def download_document_as_pdf_bytes(self, id: int) -> bytes: + """ + `Downloads a pdf preview of the specified document. `_ + + Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. + + Args: + id (int): The server-generated ID for the document you want to retrieve the preview of. + + Returns: + bytes: The pdf preview of the document. + + Examples: + + Download PDF preview of file with id 123: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> content = client.documents.previews.download_document_as_pdf_bytes(id=123) + """ + return run_sync(self.__async_client.documents.previews.download_document_as_pdf_bytes(id=id)) + + def download_document_as_pdf(self, path: Path | str | IO, id: int, overwrite: bool = False) -> None: + """ + `Downloads a pdf preview of the specified document. `_ + + Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. + + Args: + path (Path | str | IO): The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'. + id (int): The server-generated ID for the document you want to retrieve the preview of. + overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + + Examples: + + Download PDF preview of file with id 123 to folder "previews": + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.documents.previews.download_document_as_pdf("previews", id=123) + """ + return run_sync( + self.__async_client.documents.previews.download_document_as_pdf(path=path, id=id, overwrite=overwrite) + ) + + def retrieve_pdf_link(self, id: int) -> TemporaryLink: + """ + `Retrieve a Temporary link to download pdf preview `_ + + Args: + id (int): The server-generated ID for the document you want to retrieve the preview of. + + Returns: + TemporaryLink: A temporary link to download the pdf preview. + + Examples: + + Retrieve the PDF preview download link for document with id 123: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> link = client.documents.previews.retrieve_pdf_link(id=123) + """ + return run_sync(self.__async_client.documents.previews.retrieve_pdf_link(id=id)) diff --git a/cognite/client/_sync_api/documents.py b/cognite/client/_sync_api/documents.py new file mode 100644 index 0000000000..3624dacb56 --- /dev/null +++ b/cognite/client/_sync_api/documents.py @@ -0,0 +1,466 @@ +""" +=============================================================================== +a09b74dddecf74bb06f572649b34a8bc +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import TYPE_CHECKING, Any, BinaryIO, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.document_preview import SyncDocumentPreviewAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList +from cognite.client.data_classes.documents import ( + Document, + DocumentHighlightList, + DocumentList, + DocumentProperty, + DocumentSort, + SortableProperty, + SourceFileProperty, +) +from cognite.client.data_classes.filters import Filter +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDocumentsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.previews = SyncDocumentPreviewAPI(async_client) + + @overload + def __call__(self, chunk_size: int) -> Iterator[DocumentList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Document]: ... + + def __call__( + self, + chunk_size: int | None = None, + filter: Filter | dict[str, Any] | None = None, + sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None, + limit: int | None = None, + ) -> Iterator[Document | DocumentList]: + """ + Iterate over documents + + Fetches documents as they are iterated over, so you keep a limited number of documents in memory. + + Args: + chunk_size (int | None): Number of documents to return in each chunk. Defaults to yielding one document at a time. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to return. + sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. + limit (int | None): Maximum number of documents to return. Default to return all items. + + Yields: + Document | DocumentList: yields Documents one by one if chunk_size is not specified, else DocumentList objects. + """ + yield from SyncIterator( + self.__async_client.documents(chunk_size=chunk_size, filter=filter, sort=sort, limit=limit) + ) + + def aggregate_count(self, query: str | None = None, filter: Filter | dict[str, Any] | None = None) -> int: + """ + `Count of documents matching the specified filters and search. `_ + + Args: + query (str | None): The free text search query, for details see the documentation referenced above. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count. + + Returns: + int: The number of documents matching the specified filters and search. + + Examples: + + Count the number of documents in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.documents.aggregate_count() + + Count the number of PDF documents in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf") + >>> pdf_count = client.documents.aggregate_count(filter=is_pdf) + + Count the number of documents with a related asset in a subtree rooted at any of + the specified external IDs, e.g. 'Plant_1' and 'Plant_2': + + >>> client.documents.aggregate_count( + ... filter=filters.InAssetSubtree( + ... property=DocumentProperty.asset_external_ids, + ... values=['Plant_1', 'Plant_2'], + ... ) + ... ) + """ + return run_sync(self.__async_client.documents.aggregate_count(query=query, filter=filter)) + + def aggregate_cardinality_values( + self, + property: DocumentProperty | SourceFileProperty | list[str] | str, + query: str | None = None, + filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate property count for documents. `_ + + Args: + property (DocumentProperty | SourceFileProperty | list[str] | str): The property to count the cardinality of. + query (str | None): The free text search query, for details see the documentation referenced above. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + + Returns: + int: The number of documents matching the specified filters and search. + + Examples: + + Count the number of types of documents in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> client = CogniteClient() + >>> count = client.documents.aggregate_cardinality_values(DocumentProperty.type) + + Count the number of authors of plain/text documents in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> is_plain_text = filters.Equals(DocumentProperty.mime_type, "text/plain") + >>> plain_text_author_count = client.documents.aggregate_cardinality_values(DocumentProperty.author, filter=is_plain_text) + + Count the number of types of documents in your CDF project but exclude documents that start with "text": + + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> from cognite.client.data_classes import aggregations + >>> agg = aggregations + >>> is_not_text = agg.Not(agg.Prefix("text")) + >>> type_count_excluded_text = client.documents.aggregate_cardinality_values(DocumentProperty.type, aggregate_filter=is_not_text) + """ + return run_sync( + self.__async_client.documents.aggregate_cardinality_values( + property=property, query=query, filter=filter, aggregate_filter=aggregate_filter + ) + ) + + def aggregate_cardinality_properties( + self, + path: SourceFileProperty | list[str] = SourceFileProperty.metadata, + query: str | None = None, + filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate paths count for documents. `_ + + Args: + path (SourceFileProperty | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys). + query (str | None): The free text search query, for details see the documentation referenced above. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + + Returns: + int: The number of documents matching the specified filters and search. + + Examples: + + Count the number metadata keys for documents in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.documents.aggregate_cardinality_properties() + """ + return run_sync( + self.__async_client.documents.aggregate_cardinality_properties( + path=path, query=query, filter=filter, aggregate_filter=aggregate_filter + ) + ) + + def aggregate_unique_values( + self, + property: DocumentProperty | SourceFileProperty | list[str] | str, + query: str | None = None, + filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> UniqueResultList: + """ + `Get unique properties with counts for documents. `_ + + Args: + property (DocumentProperty | SourceFileProperty | list[str] | str): The property to group by. + query (str | None): The free text search query, for details see the documentation referenced above. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + limit (int): Maximum number of items. Defaults to 25. + + Returns: + UniqueResultList: List of unique values of documents matching the specified filters and search. + + Examples: + + Get the unique types with count of documents in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> client = CogniteClient() + >>> result = client.documents.aggregate_unique_values(DocumentProperty.mime_type) + >>> unique_types = result.unique + + Get the different languages with count for documents with external id prefix "abc": + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> is_abc = filters.Prefix(DocumentProperty.external_id, "abc") + >>> result = client.documents.aggregate_unique_values(DocumentProperty.language, filter=is_abc) + >>> unique_languages = result.unique + + Get the unique mime types with count of documents, but exclude mime types that start with text: + + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> from cognite.client.data_classes import aggregations + >>> agg = aggregations + >>> is_not_text = agg.Not(agg.Prefix("text")) + >>> result = client.documents.aggregate_unique_values(DocumentProperty.mime_type, aggregate_filter=is_not_text) + >>> unique_mime_types = result.unique + """ + return run_sync( + self.__async_client.documents.aggregate_unique_values( + property=property, query=query, filter=filter, aggregate_filter=aggregate_filter, limit=limit + ) + ) + + def aggregate_unique_properties( + self, + path: DocumentProperty | SourceFileProperty | list[str] | str, + query: str | None = None, + filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> UniqueResultList: + """ + `Get unique paths with counts for documents. `_ + + Args: + path (DocumentProperty | SourceFileProperty | list[str] | str): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + query (str | None): The free text search query, for details see the documentation referenced above. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + limit (int): Maximum number of items. Defaults to 25. + + Returns: + UniqueResultList: List of unique values of documents matching the specified filters and search. + + Examples: + + Get the unique metadata keys with count of documents in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.documents import SourceFileProperty + >>> client = CogniteClient() + >>> result = client.documents.aggregate_unique_values(SourceFileProperty.metadata) + """ + return run_sync( + self.__async_client.documents.aggregate_unique_properties( + path=path, query=query, filter=filter, aggregate_filter=aggregate_filter, limit=limit + ) + ) + + def retrieve_content(self, id: int) -> bytes: + """ + `Retrieve document content `_ + + Returns extracted textual information for the given document. + + The document pipeline extracts up to 1MiB of textual information from each processed document. + The search and list endpoints truncate the textual content of each document, + in order to reduce the size of the returned payload. If you want the whole text for a document, + you can use this endpoint. + + Args: + id (int): The server-generated ID for the document you want to retrieve the content of. + + Returns: + bytes: The content of the document. + + Examples: + + Retrieve the content of a document with id 123: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> content = client.documents.retrieve_content(id=123) + """ + return run_sync(self.__async_client.documents.retrieve_content(id=id)) + + def retrieve_content_buffer(self, id: int, buffer: BinaryIO) -> None: + """ + `Retrieve document content into buffer `_ + + Returns extracted textual information for the given document. + + The document pipeline extracts up to 1MiB of textual information from each processed document. + The search and list endpoints truncate the textual content of each document, + in order to reduce the size of the returned payload. If you want the whole text for a document, + you can use this endpoint. + + Args: + id (int): The server-generated ID for the document you want to retrieve the content of. + buffer (BinaryIO): The document content is streamed directly into the buffer. This is useful for retrieving large documents. + + Examples: + + Retrieve the content of a document with id 123 into local file "my_text.txt": + + >>> from cognite.client import CogniteClient + >>> from pathlib import Path + >>> client = CogniteClient() + >>> with Path("my_file.txt").open("wb") as buffer: + ... client.documents.retrieve_content_buffer(id=123, buffer=buffer) + """ + return run_sync(self.__async_client.documents.retrieve_content_buffer(id=id, buffer=buffer)) + + @overload + def search( + self, + query: str, + highlight: Literal[False] = False, + filter: Filter | dict[str, Any] | None = None, + sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> DocumentList: ... + + @overload + def search( + self, + query: str, + highlight: Literal[True], + filter: Filter | dict[str, Any] | None = None, + sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> DocumentHighlightList: ... + + def search( + self, + query: str, + highlight: bool = False, + filter: Filter | dict[str, Any] | None = None, + sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> DocumentList | DocumentHighlightList: + """ + `Search documents `_ + + This endpoint lets you search for documents by using advanced filters and free text queries. + Free text queries are matched against the documents' filenames and contents. For more information, see + endpoint documentation referenced above. + + Args: + query (str): The free text search query. + highlight (bool): Whether or not matches in search results should be highlighted. + filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to search. + sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. + limit (int): Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25. + + Returns: + DocumentList | DocumentHighlightList: List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned. + + Examples: + + Search for text "pump 123" in PDF documents in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> client = CogniteClient() + >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf") + >>> documents = client.documents.search("pump 123", filter=is_pdf) + + Find all documents with exact text 'CPLEX Error 1217: No Solution exists.' + in plain text files created the last week in your CDF project and highlight the matches: + + >>> from datetime import datetime, timedelta + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> from cognite.client.utils import timestamp_to_ms + >>> is_plain_text = filters.Equals(DocumentProperty.mime_type, "text/plain") + >>> last_week = filters.Range(DocumentProperty.created_time, + ... gt=timestamp_to_ms(datetime.now() - timedelta(days=7))) + >>> documents = client.documents.search('"CPLEX Error 1217: No Solution exists."', + ... highlight=True, + ... filter=filters.And(is_plain_text, last_week)) + """ + return run_sync( + self.__async_client.documents.search( + query=query, highlight=highlight, filter=filter, sort=sort, limit=limit + ) + ) + + def list( + self, + filter: Filter | dict[str, Any] | None = None, + sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> DocumentList: + """ + `List documents `_ + + You can use filters to narrow down the list. Unlike the search method, list does not restrict the number + of documents to return, meaning that setting the limit to -1 will return all the documents in your + project. + + Args: + filter (Filter | dict[str, Any] | None): Filter | dict[str, Any] | None): The filter to narrow down the documents to return. + sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. + limit (int | None): Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents. + + Returns: + DocumentList: List of documents + + Examples: + + List all PDF documents in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.documents import DocumentProperty + >>> client = CogniteClient() + >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf") + >>> pdf_documents = client.documents.list(filter=is_pdf) + + List documents in your CDF project: + + >>> documents = client.documents.list(limit=100) + + Iterate over documents, one-by-one: + + >>> for document in client.documents(): + ... document # do something with the document + + Iterate over chunks of documents to reduce memory load: + + >>> for document_list in client.documents(chunk_size=250): + ... document_list # do something with the document + + List all documents in your CDF project sorted by mime/type in descending order: + + >>> from cognite.client.data_classes.documents import SortableDocumentProperty + >>> documents = client.documents.list(sort=(SortableDocumentProperty.mime_type, "desc")) + """ + return run_sync(self.__async_client.documents.list(filter=filter, sort=sort, limit=limit)) diff --git a/cognite/client/_sync_api/entity_matching.py b/cognite/client/_sync_api/entity_matching.py new file mode 100644 index 0000000000..73dbddaf1a --- /dev/null +++ b/cognite/client/_sync_api/entity_matching.py @@ -0,0 +1,318 @@ +""" +=============================================================================== +547f2afb342b36199f74baa8a4115a2f +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Literal + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes._base import CogniteResource +from cognite.client.data_classes.contextualization import ( + ContextualizationJobList, + EntityMatchingModel, + EntityMatchingModelList, + EntityMatchingModelUpdate, + EntityMatchingPredictionResult, +) +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncEntityMatchingAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> EntityMatchingModel | None: + """ + `Retrieve model `_ + + Args: + id (int | None): id of the model to retrieve. + external_id (str | None): external id of the model to retrieve. + + Returns: + EntityMatchingModel | None: Model requested. + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> retrieved_model = client.entity_matching.retrieve(id=1) + """ + return run_sync(self.__async_client.entity_matching.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, ids: Sequence[int] | None = None, external_ids: SequenceNotStr[str] | None = None + ) -> EntityMatchingModelList: + """ + `Retrieve models `_ + + Args: + ids (Sequence[int] | None): ids of the model to retrieve. + external_ids (SequenceNotStr[str] | None): external ids of the model to retrieve. + + Returns: + EntityMatchingModelList: Models requested. + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> retrieved_models = client.entity_matching.retrieve_multiple([1,2,3]) + """ + return run_sync(self.__async_client.entity_matching.retrieve_multiple(ids=ids, external_ids=external_ids)) + + def update( + self, + item: EntityMatchingModel + | EntityMatchingModelUpdate + | Sequence[EntityMatchingModel | EntityMatchingModelUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> EntityMatchingModelList | EntityMatchingModel: + """ + `Update model `_ + + Args: + item (EntityMatchingModel | EntityMatchingModelUpdate | Sequence[EntityMatchingModel | EntityMatchingModelUpdate]): Model(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + EntityMatchingModelList | EntityMatchingModel: No description. + + Examples: + >>> from cognite.client.data_classes.contextualization import EntityMatchingModelUpdate + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.entity_matching.update(EntityMatchingModelUpdate(id=1).name.set("New name")) + """ + return run_sync(self.__async_client.entity_matching.update(item=item, mode=mode)) + + def list( + self, + name: str | None = None, + description: str | None = None, + original_id: int | None = None, + feature_type: str | None = None, + classifier: str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> EntityMatchingModelList: + """ + `List models `_ + + Args: + name (str | None): Optional user-defined name of model. + description (str | None): Optional user-defined description of model. + original_id (int | None): id of the original model for models that were created with refit. + feature_type (str | None): feature type that defines the combination of features used. + classifier (str | None): classifier used in training. + limit (int | None): Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + EntityMatchingModelList: List of models. + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.entity_matching.list(limit=1, name="test") + """ + return run_sync( + self.__async_client.entity_matching.list( + name=name, + description=description, + original_id=original_id, + feature_type=feature_type, + classifier=classifier, + limit=limit, + ) + ) + + def list_jobs(self) -> ContextualizationJobList: + """ + List jobs, typically model fit and predict runs. + Returns: + ContextualizationJobList: List of jobs. + """ + return run_sync(self.__async_client.entity_matching.list_jobs()) + + def delete( + self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete models `_ + + https://api-docs.cognite.com/20230101/tag/Entity-matching/operation/entityMatchingDelete + + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.entity_matching.delete(id=1) + """ + return run_sync(self.__async_client.entity_matching.delete(id=id, external_id=external_id)) + + def fit( + self, + sources: Sequence[dict | CogniteResource], + targets: Sequence[dict | CogniteResource], + true_matches: Sequence[dict | tuple[int | str, int | str]] | None = None, + match_fields: dict | Sequence[tuple[str, str]] | None = None, + feature_type: str | None = None, + classifier: str | None = None, + ignore_missing_fields: bool = False, + name: str | None = None, + description: str | None = None, + external_id: str | None = None, + ) -> EntityMatchingModel: + """ + Fit entity matching model. + + Note: + All users on this CDF subscription with assets read-all and entitymatching read-all and write-all + capabilities in the project, are able to access the data sent to this endpoint. + + Args: + sources (Sequence[dict | CogniteResource]): entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields. + targets (Sequence[dict | CogniteResource]): entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used. + true_matches (Sequence[dict | tuple[int | str, int | str]] | None): Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + match_fields (dict | Sequence[tuple[str, str]] | None): List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}. + feature_type (str | None): feature type that defines the combination of features used, see API docs for details. + classifier (str | None): classifier used in training. + ignore_missing_fields (bool): whether missing data in match_fields should return error or be filled in with an empty string. + name (str | None): Optional user-defined name of model. + description (str | None): Optional user-defined description of model. + external_id (str | None): Optional external id. Must be unique within the project. + Returns: + EntityMatchingModel: Resulting queued model. + + Example: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> sources = [{'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}] + >>> targets = [{'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}] + >>> true_matches = [(1, 101)] + >>> model = client.entity_matching.fit( + ... sources=sources, + ... targets=targets, + ... true_matches=true_matches, + ... description="AssetMatchingJob1" + ... ) + """ + return run_sync( + self.__async_client.entity_matching.fit( + sources=sources, + targets=targets, + true_matches=true_matches, + match_fields=match_fields, + feature_type=feature_type, + classifier=classifier, + ignore_missing_fields=ignore_missing_fields, + name=name, + description=description, + external_id=external_id, + ) + ) + + def predict( + self, + sources: Sequence[dict] | None = None, + targets: Sequence[dict] | None = None, + num_matches: int = 1, + score_threshold: float | None = None, + id: int | None = None, + external_id: str | None = None, + ) -> EntityMatchingPredictionResult: + """ + `Predict entity matching. `_ + + Warning: + Blocks and waits for the model to be ready if it has been recently created. + + Note: + All users on this CDF subscription with assets read-all and entitymatching read-all and write-all + capabilities in the project, are able to access the data sent to this endpoint. + + Args: + sources (Sequence[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. + targets (Sequence[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. + num_matches (int): number of matches to return for each item. + score_threshold (float | None): only return matches with a score above this threshold + id (int | None): ids of the model to use. + external_id (str | None): external ids of the model to use. + + Returns: + EntityMatchingPredictionResult: object which can be used to wait for and retrieve results. + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> sources = {'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'} + >>> targets = {'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'} + >>> true_matches = [(1, 101)] + >>> model = client.entity_matching.predict( + ... sources = sources, + ... targets = targets, + ... num_matches = 1, + ... score_threshold = 0.6, + ... id=1 + ... ) + """ + return run_sync( + self.__async_client.entity_matching.predict( + sources=sources, + targets=targets, + num_matches=num_matches, + score_threshold=score_threshold, + id=id, + external_id=external_id, + ) + ) + + def refit( + self, + true_matches: Sequence[dict | tuple[int | str, int | str]], + id: int | None = None, + external_id: str | None = None, + ) -> EntityMatchingModel: + """ + `Re-fits an entity matching model, using the combination of the old and new true matches. `_ + + Note: + All users on this CDF subscription with assets read-all and entitymatching read-all and write-all + capabilities in the project, are able to access the data sent to this endpoint. + + Args: + true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + id (int | None): ids of the model to use. + external_id (str | None): external ids of the model to use. + Returns: + EntityMatchingModel: new model refitted to true_matches. + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> sources = [{'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}] + >>> targets = [{'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}] + >>> true_matches = [(1, 101)] + >>> model = client.entity_matching.refit(true_matches = true_matches, description="AssetMatchingJob1", id=1) + """ + return run_sync( + self.__async_client.entity_matching.refit(true_matches=true_matches, id=id, external_id=external_id) + ) diff --git a/cognite/client/_sync_api/events.py b/cognite/client/_sync_api/events.py new file mode 100644 index 0000000000..28d85a7481 --- /dev/null +++ b/cognite/client/_sync_api/events.py @@ -0,0 +1,688 @@ +""" +=============================================================================== +bfc59b87972253161926e6c07a36e18e +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.events import SortSpec +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + EndTimeFilter, + Event, + EventFilter, + EventList, + EventUpdate, + TimestampRange, +) +from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList +from cognite.client.data_classes.events import EventPropertyLike, EventWrite +from cognite.client.data_classes.filters import Filter +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncEventsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Event]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[EventList]: ... + + def __call__( + self, + chunk_size: int | None = None, + start_time: dict[str, Any] | TimestampRange | None = None, + end_time: dict[str, Any] | EndTimeFilter | None = None, + active_at_time: dict[str, Any] | TimestampRange | None = None, + type: str | None = None, + subtype: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + source: str | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + sort: SortSpec | list[SortSpec] | None = None, + limit: int | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + ) -> Iterator[Event | EventList]: + """ + Iterate over events + + Fetches events as they are iterated over, so you keep a limited number of events in memory. + + Args: + chunk_size (int | None): Number of events to return in each chunk. Defaults to yielding one event a time. + start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps + active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type (str | None): Type of the event, e.g 'failure'. + subtype (str | None): Subtype of the event, e.g 'electrical'. + metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. + asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. + asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. + asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. + source (str | None): The source of this event. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix (str | None): External Id provided by client. Should be unique within the project + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + limit (int | None): Maximum number of events to return. Defaults to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + + Yields: + Event | EventList: yields Event one by one if chunk_size is not specified, else EventList objects. + """ + yield from SyncIterator( + self.__async_client.events( + chunk_size=chunk_size, + start_time=start_time, + end_time=end_time, + active_at_time=active_at_time, + type=type, + subtype=subtype, + metadata=metadata, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + external_id_prefix=external_id_prefix, + sort=sort, + limit=limit, + advanced_filter=advanced_filter, + ) + ) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> Event | None: + """ + `Retrieve a single event by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + Event | None: Requested event or None if it does not exist. + + Examples: + + Get event by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.events.retrieve(id=1) + + Get event by external id: + + >>> res = client.events.retrieve(external_id="1") + """ + return run_sync(self.__async_client.events.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> EventList: + """ + `Retrieve multiple events by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + EventList: The requested events. + + Examples: + + Get events by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.events.retrieve_multiple(ids=[1, 2, 3]) + + Get events by external id: + + >>> res = client.events.retrieve_multiple(external_ids=["abc", "def"]) + """ + return run_sync( + self.__async_client.events.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_unique_values( + self, + filter: EventFilter | dict[str, Any] | None = None, + property: EventPropertyLike | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique properties with counts for events. `_ + + Args: + filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + property (EventPropertyLike | None): The property name(s) to apply the aggregation on. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to consider. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + + Returns: + UniqueResultList: List of unique values of events matching the specified filters and search. + + Examples: + + Get the unique types with count of events in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.events import EventProperty + >>> client = CogniteClient() + >>> result = client.events.aggregate_unique_values(property=EventProperty.type) + >>> print(result.unique) + + Get the unique types of events after 2020-01-01 in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.events import EventProperty + >>> from cognite.client.utils import timestamp_to_ms + >>> from datetime import datetime + >>> is_after_2020 = filters.Range(EventProperty.start_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.events.aggregate_unique_values(EventProperty.type, advanced_filter=is_after_2020) + >>> print(result.unique) + + Get the unique types of events after 2020-01-01 in your CDF project, but exclude all types that start with + "planned": + + >>> from cognite.client.data_classes.events import EventProperty + >>> from cognite.client.data_classes import aggregations + >>> agg = aggregations + >>> not_planned = agg.Not(agg.Prefix("planned")) + >>> is_after_2020 = filters.Range(EventProperty.start_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.events.aggregate_unique_values(EventProperty.type, advanced_filter=is_after_2020, aggregate_filter=not_planned) + >>> print(result.unique) + """ + return run_sync( + self.__async_client.events.aggregate_unique_values( + filter=filter, property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter + ) + ) + + def aggregate_count( + self, + property: EventPropertyLike | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + filter: EventFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Count of event matching the specified filters. `_ + + Args: + property (EventPropertyLike | None): If specified, Get an approximate number of Events with a specific property + (property is not null) and matching the filters. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count. + filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + + Returns: + int: The number of events matching the specified filters and search. + + Examples: + + Count the number of events in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.events.aggregate_count() + + Count the number of workorder events in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.events import EventProperty + >>> is_workorder = filters.Equals(EventProperty.type, "workorder") + >>> workorder_count = client.events.aggregate_count(advanced_filter=is_workorder) + """ + return run_sync( + self.__async_client.events.aggregate_count( + property=property, advanced_filter=advanced_filter, filter=filter + ) + ) + + def aggregate_cardinality_values( + self, + property: EventPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: EventFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate property count for events. `_ + + Args: + property (EventPropertyLike): The property to count the cardinality of. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + Returns: + int: The number of properties matching the specified filter. + + Examples: + + Count the number of types of events in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.events import EventProperty + >>> client = CogniteClient() + >>> type_count = client.events.aggregate_cardinality_values(EventProperty.type) + + Count the number of types of events linked to asset 123 in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.events import EventProperty + >>> is_asset = filters.ContainsAny(EventProperty.asset_ids, 123) + >>> plain_text_author_count = client.events.aggregate_cardinality_values(EventProperty.type, advanced_filter=is_asset) + """ + return run_sync( + self.__async_client.events.aggregate_cardinality_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_cardinality_properties( + self, + path: EventPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: EventFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate paths count for events. `_ + + Args: + path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. + It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of metadata keys for events in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.events import EventProperty + >>> client = CogniteClient() + >>> type_count = client.events.aggregate_cardinality_properties(EventProperty.metadata) + """ + return run_sync( + self.__async_client.events.aggregate_cardinality_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_properties( + self, + path: EventPropertyLike, + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: EventFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique paths with counts for events. `_ + + Args: + path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. + It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + + Returns: + UniqueResultList: List of unique values of events matching the specified filters and search. + + Examples: + + Get the unique metadata keys with count of events in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.events import EventProperty + >>> client = CogniteClient() + >>> result = client.events.aggregate_unique_properties(EventProperty.metadata) + >>> print(result.unique) + """ + return run_sync( + self.__async_client.events.aggregate_unique_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + @overload + def create(self, event: Sequence[Event] | Sequence[EventWrite]) -> EventList: ... + + @overload + def create(self, event: Event | EventWrite) -> Event: ... + + def create(self, event: Event | EventWrite | Sequence[Event] | Sequence[EventWrite]) -> Event | EventList: + """ + `Create one or more events. `_ + + Args: + event (Event | EventWrite | Sequence[Event] | Sequence[EventWrite]): Event or list of events to create. + + Returns: + Event | EventList: Created event(s) + + Examples: + + Create new events: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import EventWrite + >>> client = CogniteClient() + >>> events = [EventWrite(start_time=0, end_time=1), EventWrite(start_time=2, end_time=3)] + >>> res = client.events.create(events) + """ + return run_sync(self.__async_client.events.create(event=event)) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete one or more events `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete events by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.events.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.events.delete(id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + @overload + def update( + self, + item: Sequence[Event | EventWrite | EventUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> EventList: ... + + @overload + def update( + self, + item: Event | EventWrite | EventUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Event: ... + + def update( + self, + item: Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Event | EventList: + """ + `Update one or more events `_ + + Args: + item (Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate]): Event(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Event | EventList: Updated event(s) + + Examples: + + Update an event that you have fetched. This will perform a full update of the event: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> event = client.events.retrieve(id=1) + >>> event.description = "New description" + >>> res = client.events.update(event) + + Perform a partial update on a event, updating the description and adding a new field to metadata: + + >>> from cognite.client.data_classes import EventUpdate + >>> my_update = EventUpdate(id=1).description.set("New description").metadata.add({"key": "value"}) + >>> res = client.events.update(my_update) + """ + return run_sync(self.__async_client.events.update(item=item, mode=mode)) + + def search( + self, + description: str | None = None, + filter: EventFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> EventList: + """ + `Search for events `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. + + Args: + description (str | None): Fuzzy match on description. + filter (EventFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. + limit (int): Maximum number of results to return. + + Returns: + EventList: List of requested events + + Examples: + + Search for events: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.events.search(description="some description") + """ + return run_sync(self.__async_client.events.search(description=description, filter=filter, limit=limit)) + + @overload + def upsert(self, item: Sequence[Event | EventWrite], mode: Literal["patch", "replace"] = "patch") -> EventList: ... + + @overload + def upsert(self, item: Event | EventWrite, mode: Literal["patch", "replace"] = "patch") -> Event: ... + + def upsert( + self, item: Event | EventWrite | Sequence[Event | EventWrite], mode: Literal["patch", "replace"] = "patch" + ) -> Event | EventList: + """ + Upsert events, i.e., update if it exists, and create if it does not exist. + Note this is a convenience method that handles the upserting for you by first calling update on all items, + and if any of them fail because they do not exist, it will create them instead. + + For more details, see :ref:`appendix-upsert`. + + Args: + item (Event | EventWrite | Sequence[Event | EventWrite]): Event or list of events to upsert. + mode (Literal['patch', 'replace']): Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + + Returns: + Event | EventList: The upserted event(s). + + Examples: + + Upsert for events: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import EventWrite + >>> client = CogniteClient() + >>> existing_event = client.events.retrieve(id=1) + >>> existing_event.description = "New description" + >>> new_event = EventWrite(external_id="new_event", description="New event") + >>> res = client.events.upsert([existing_event, new_event], mode="replace") + """ + return run_sync(self.__async_client.events.upsert(item=item, mode=mode)) + + def list( + self, + start_time: dict[str, Any] | TimestampRange | None = None, + end_time: dict[str, Any] | EndTimeFilter | None = None, + active_at_time: dict[str, Any] | TimestampRange | None = None, + type: str | None = None, + subtype: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + source: str | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + sort: SortSpec | list[SortSpec] | None = None, + partitions: int | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + advanced_filter: Filter | dict[str, Any] | None = None, + ) -> EventList: + """ + `List events `_ + + Args: + start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps. + active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type (str | None): Type of the event, e.g 'failure'. + subtype (str | None): Subtype of the event, e.g 'electrical'. + metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. + asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. + asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. + asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. + source (str | None): The source of this event. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix (str | None): External Id provided by client. Should be unique within the project. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit (int | None): Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + + Returns: + EventList: List of requested events + + .. note:: + When using `partitions`, there are few considerations to keep in mind: + * `limit` has to be set to `None` (or `-1`). + * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status. + * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed. + + + Examples: + + List events and filter on max start time: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> event_list = client.events.list(limit=5, start_time={"max": 1500000000}) + + Iterate over events, one-by-one: + + >>> for event in client.events(): + ... event # do something with the event + + Iterate over chunks of events to reduce memory load: + + >>> for event_list in client.events(chunk_size=2500): + ... event_list # do something with the events + + Using advanced filter, find all events that have a metadata key 'timezone' starting with 'Europe', + and sort by external id ascending: + + >>> from cognite.client.data_classes import filters + >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe") + >>> res = client.events.list(advanced_filter=in_timezone, sort=("external_id", "asc")) + + Note that you can check the API documentation above to see which properties you can filter on + with which filters. + + To make it easier to avoid spelling mistakes and easier to look up available properties + for filtering and sorting, you can also use the `EventProperty` and `SortableEventProperty` Enums. + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.events import EventProperty, SortableEventProperty + >>> in_timezone = filters.Prefix(EventProperty.metadata_key("timezone"), "Europe") + >>> res = client.events.list( + ... advanced_filter=in_timezone, + ... sort=(SortableEventProperty.external_id, "asc")) + + Combine filter and advanced filter: + + >>> from cognite.client.data_classes import filters + >>> not_instrument_lvl5 = filters.And( + ... filters.ContainsAny("labels", ["Level5"]), + ... filters.Not(filters.ContainsAny("labels", ["Instrument"])) + ... ) + >>> res = client.events.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5) + """ + return run_sync( + self.__async_client.events.list( + start_time=start_time, + end_time=end_time, + active_at_time=active_at_time, + type=type, + subtype=subtype, + metadata=metadata, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + external_id_prefix=external_id_prefix, + sort=sort, + partitions=partitions, + limit=limit, + advanced_filter=advanced_filter, + ) + ) diff --git a/cognite/client/_sync_api/extractionpipelines/__init__.py b/cognite/client/_sync_api/extractionpipelines/__init__.py new file mode 100644 index 0000000000..01e329ddcb --- /dev/null +++ b/cognite/client/_sync_api/extractionpipelines/__init__.py @@ -0,0 +1,237 @@ +""" +=============================================================================== +48dfd1c74433af19a2a33d145cae12bb +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.extractionpipelines.configs import SyncExtractionPipelineConfigsAPI +from cognite.client._sync_api.extractionpipelines.runs import SyncExtractionPipelineRunsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ExtractionPipeline, ExtractionPipelineList, ExtractionPipelineUpdate +from cognite.client.data_classes.extractionpipelines import ExtractionPipelineWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncExtractionPipelinesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.runs = SyncExtractionPipelineRunsAPI(async_client) + self.config = SyncExtractionPipelineConfigsAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[ExtractionPipeline]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[ExtractionPipelineList]: ... + + def __call__( + self, chunk_size: int | None = None, limit: int | None = None + ) -> Iterator[ExtractionPipeline | ExtractionPipelineList]: + """ + Iterate over extraction pipelines + + Args: + chunk_size (int | None): Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one. + limit (int | None): Limits the number of results to be returned. Defaults to yielding all extraction pipelines. + + Yields: + ExtractionPipeline | ExtractionPipelineList: Yields extraction pipelines one by one or in chunks up to the chunk size. + """ + yield from SyncIterator(self.__async_client.extraction_pipelines(chunk_size=chunk_size, limit=limit)) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> ExtractionPipeline | None: + """ + `Retrieve a single extraction pipeline by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + ExtractionPipeline | None: Requested extraction pipeline or None if it does not exist. + + Examples: + + Get extraction pipeline by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.extraction_pipelines.retrieve(id=1) + + Get extraction pipeline by external id: + + >>> res = client.extraction_pipelines.retrieve(external_id="1") + """ + return run_sync(self.__async_client.extraction_pipelines.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> ExtractionPipelineList: + """ + `Retrieve multiple extraction pipelines by ids and external ids. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + ExtractionPipelineList: The requested ExtractionPipelines. + + Examples: + + Get ExtractionPipelines by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.extraction_pipelines.retrieve_multiple(ids=[1, 2, 3]) + + Get assets by external id: + + >>> res = client.extraction_pipelines.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.extraction_pipelines.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> ExtractionPipelineList: + """ + `List extraction pipelines `_ + + Args: + limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ExtractionPipelineList: List of requested ExtractionPipelines + + Examples: + + List ExtractionPipelines: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> ep_list = client.extraction_pipelines.list(limit=5) + """ + return run_sync(self.__async_client.extraction_pipelines.list(limit=limit)) + + @overload + def create(self, extraction_pipeline: ExtractionPipeline | ExtractionPipelineWrite) -> ExtractionPipeline: ... + + @overload + def create( + self, extraction_pipeline: Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite] + ) -> ExtractionPipelineList: ... + + def create( + self, + extraction_pipeline: ExtractionPipeline + | ExtractionPipelineWrite + | Sequence[ExtractionPipeline] + | Sequence[ExtractionPipelineWrite], + ) -> ExtractionPipeline | ExtractionPipelineList: + """ + `Create one or more extraction pipelines. `_ + + You can create an arbitrary number of extraction pipelines, and the SDK will split the request into multiple requests if necessary. + + Args: + extraction_pipeline (ExtractionPipeline | ExtractionPipelineWrite | Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]): Extraction pipeline or list of extraction pipelines to create. + + Returns: + ExtractionPipeline | ExtractionPipelineList: Created extraction pipeline(s) + + Examples: + + Create new extraction pipeline: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ExtractionPipelineWrite + >>> client = CogniteClient() + >>> extpipes = [ExtractionPipelineWrite(name="extPipe1",...), ExtractionPipelineWrite(name="extPipe2",...)] + >>> res = client.extraction_pipelines.create(extpipes) + """ + return run_sync(self.__async_client.extraction_pipelines.create(extraction_pipeline=extraction_pipeline)) + + def delete( + self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete one or more extraction pipelines `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + + Examples: + + Delete extraction pipelines by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.extraction_pipelines.delete(id=[1,2,3], external_id="3") + """ + return run_sync(self.__async_client.extraction_pipelines.delete(id=id, external_id=external_id)) + + @overload + def update( + self, item: ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate + ) -> ExtractionPipeline: ... + + @overload + def update( + self, item: Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate] + ) -> ExtractionPipelineList: ... + + def update( + self, + item: ExtractionPipeline + | ExtractionPipelineWrite + | ExtractionPipelineUpdate + | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> ExtractionPipeline | ExtractionPipelineList: + """ + `Update one or more extraction pipelines `_ + + Args: + item (ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]): Extraction pipeline(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + ExtractionPipeline | ExtractionPipelineList: Updated extraction pipeline(s) + + Examples: + + Update an extraction pipeline that you have fetched. This will perform a full update of the extraction pipeline: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ExtractionPipelineUpdate + >>> client = CogniteClient() + >>> update = ExtractionPipelineUpdate(id=1) + >>> update.description.set("Another new extpipe") + >>> res = client.extraction_pipelines.update(update) + """ + return run_sync(self.__async_client.extraction_pipelines.update(item=item, mode=mode)) diff --git a/cognite/client/_sync_api/extractionpipelines/configs.py b/cognite/client/_sync_api/extractionpipelines/configs.py new file mode 100644 index 0000000000..2eef92cf3f --- /dev/null +++ b/cognite/client/_sync_api/extractionpipelines/configs.py @@ -0,0 +1,118 @@ +""" +=============================================================================== +28c18023b9534cbfe75f43d798262161 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ExtractionPipelineConfig, ExtractionPipelineConfigRevisionList +from cognite.client.data_classes.extractionpipelines import ExtractionPipelineConfigWrite +from cognite.client.utils._async_helpers import run_sync + + +class SyncExtractionPipelineConfigsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def retrieve( + self, external_id: str, revision: int | None = None, active_at_time: int | None = None + ) -> ExtractionPipelineConfig: + """ + `Retrieve a specific configuration revision, or the latest by default ` + + By default the latest configuration revision is retrieved, or you can specify a timestamp or a revision number. + + Args: + external_id (str): External id of the extraction pipeline to retrieve config from. + revision (int | None): Optionally specify a revision number to retrieve. + active_at_time (int | None): Optionally specify a timestamp the configuration revision should be active. + + Returns: + ExtractionPipelineConfig: Retrieved extraction pipeline configuration revision + + Examples: + + Retrieve latest config revision: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.extraction_pipelines.config.retrieve("extId") + """ + return run_sync( + self.__async_client.extraction_pipelines.config.retrieve( + external_id=external_id, revision=revision, active_at_time=active_at_time + ) + ) + + def list(self, external_id: str) -> ExtractionPipelineConfigRevisionList: + """ + `Retrieve all configuration revisions from an extraction pipeline ` + + Args: + external_id (str): External id of the extraction pipeline to retrieve config from. + + Returns: + ExtractionPipelineConfigRevisionList: Retrieved extraction pipeline configuration revisions + + Examples: + + Retrieve a list of config revisions: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.extraction_pipelines.config.list("extId") + """ + return run_sync(self.__async_client.extraction_pipelines.config.list(external_id=external_id)) + + def create(self, config: ExtractionPipelineConfig | ExtractionPipelineConfigWrite) -> ExtractionPipelineConfig: + """ + `Create a new configuration revision ` + + Args: + config (ExtractionPipelineConfig | ExtractionPipelineConfigWrite): Configuration revision to create. + + Returns: + ExtractionPipelineConfig: Created extraction pipeline configuration revision + + Examples: + + Create a config revision: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ExtractionPipelineConfigWrite + >>> client = CogniteClient() + >>> res = client.extraction_pipelines.config.create(ExtractionPipelineConfigWrite(external_id="extId", config="my config contents")) + """ + return run_sync(self.__async_client.extraction_pipelines.config.create(config=config)) + + def revert(self, external_id: str, revision: int) -> ExtractionPipelineConfig: + """ + `Revert to a previous configuration revision ` + + Args: + external_id (str): External id of the extraction pipeline to revert revision for. + revision (int): Revision to revert to. + + Returns: + ExtractionPipelineConfig: New latest extraction pipeline configuration revision. + + Examples: + + Revert a config revision: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.extraction_pipelines.config.revert("extId", 5) + """ + return run_sync( + self.__async_client.extraction_pipelines.config.revert(external_id=external_id, revision=revision) + ) diff --git a/cognite/client/_sync_api/extractionpipelines/runs.py b/cognite/client/_sync_api/extractionpipelines/runs.py new file mode 100644 index 0000000000..74e5062d84 --- /dev/null +++ b/cognite/client/_sync_api/extractionpipelines/runs.py @@ -0,0 +1,126 @@ +""" +=============================================================================== +c7e0250a7afdf41370a375942043efcf +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.extractionpipelines import RunStatus +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + ExtractionPipelineRun, + ExtractionPipelineRunList, + TimestampRange, +) +from cognite.client.data_classes.extractionpipelines import ( + ExtractionPipelineRunWrite, +) +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncExtractionPipelineRunsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list( + self, + external_id: str, + statuses: RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None = None, + message_substring: str | None = None, + created_time: dict[str, Any] | TimestampRange | str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> ExtractionPipelineRunList: + """ + `List runs for an extraction pipeline with given external_id `_ + + Args: + external_id (str): Extraction pipeline external Id. + statuses (RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None): One or more among "success" / "failure" / "seen". + message_substring (str | None): Failure message part. + created_time (dict[str, Any] | TimestampRange | str | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. + If a string is passed, it is assumed to be the minimum value. + limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ExtractionPipelineRunList: List of requested extraction pipeline runs + + Tip: + The ``created_time`` parameter can also be passed as a string, to support the most typical usage pattern + of fetching the most recent runs, meaning it is implicitly assumed to be the minimum created time. The + format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute), e.g. "12d-ago". + + Examples: + + List extraction pipeline runs: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> runsList = client.extraction_pipelines.runs.list(external_id="test ext id", limit=5) + + Filter extraction pipeline runs on a given status: + + >>> runs_list = client.extraction_pipelines.runs.list(external_id="test ext id", statuses=["seen"], limit=5) + + Get all failed pipeline runs in the last 24 hours for pipeline 'extId': + + >>> from cognite.client.data_classes import ExtractionPipelineRun + >>> res = client.extraction_pipelines.runs.list(external_id="extId", statuses="failure", created_time="24h-ago") + """ + return run_sync( + self.__async_client.extraction_pipelines.runs.list( + external_id=external_id, + statuses=statuses, + message_substring=message_substring, + created_time=created_time, + limit=limit, + ) + ) + + @overload + def create(self, run: ExtractionPipelineRun | ExtractionPipelineRunWrite) -> ExtractionPipelineRun: ... + + @overload + def create( + self, run: Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite] + ) -> ExtractionPipelineRunList: ... + + def create( + self, + run: ExtractionPipelineRun + | ExtractionPipelineRunWrite + | Sequence[ExtractionPipelineRun] + | Sequence[ExtractionPipelineRunWrite], + ) -> ExtractionPipelineRun | ExtractionPipelineRunList: + """ + `Create one or more extraction pipeline runs. `_ + + You can create an arbitrary number of extraction pipeline runs, and the SDK will split the request into multiple requests. + + Args: + run (ExtractionPipelineRun | ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): ExtractionPipelineRun| ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): Extraction pipeline or list of extraction pipeline runs to create. + + Returns: + ExtractionPipelineRun | ExtractionPipelineRunList: Created extraction pipeline run(s) + + Examples: + + Report a new extraction pipeline run: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ExtractionPipelineRunWrite + >>> client = CogniteClient() + >>> res = client.extraction_pipelines.runs.create( + ... ExtractionPipelineRunWrite(status="success", extpipe_external_id="extId")) + """ + return run_sync(self.__async_client.extraction_pipelines.runs.create(run=run)) diff --git a/cognite/client/_sync_api/files.py b/cognite/client/_sync_api/files.py new file mode 100644 index 0000000000..99ad8305a9 --- /dev/null +++ b/cognite/client/_sync_api/files.py @@ -0,0 +1,964 @@ +""" +=============================================================================== +8ff3a2a3fc25ed22905564ffee81a017 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import AsyncIterator, Iterator, Sequence +from pathlib import Path +from typing import Any, BinaryIO, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + FileMetadata, + FileMetadataFilter, + FileMetadataList, + FileMetadataUpdate, + FileMetadataWrite, + FileMultipartUploadSession, + GeoLocation, + GeoLocationFilter, + Label, + LabelFilter, + TimestampRange, +) +from cognite.client.data_classes.data_modeling import NodeId +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncFilesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[FileMetadata]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[FileMetadataList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + mime_type: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + labels: LabelFilter | None = None, + geo_location: GeoLocationFilter | None = None, + source: str | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + source_created_time: dict[str, Any] | TimestampRange | None = None, + source_modified_time: dict[str, Any] | TimestampRange | None = None, + uploaded_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + directory_prefix: str | None = None, + uploaded: bool | None = None, + limit: int | None = None, + ) -> Iterator[FileMetadata | FileMetadataList]: + """ + Iterate over files + + Fetches file metadata objects as they are iterated over, so you keep a limited number of metadata objects in memory. + + Args: + chunk_size (int | None): Number of files to return in each chunk. Defaults to yielding one event a time. + name (str | None): Name of the file. + mime_type (str | None): File type. E.g. text/plain, application/pdf, .. + metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value + asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. + asset_external_ids (SequenceNotStr[str] | None): No description. + asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. + labels (LabelFilter | None): Return only the files matching the specified label(s). + geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. + source (str | None): The source of this event. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + external_id_prefix (str | None): External Id provided by client. Should be unique within the project. + directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit (int | None): Maximum number of files to return. Defaults to return all items. + + Yields: + FileMetadata | FileMetadataList: yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects. + """ + yield from SyncIterator( + self.__async_client.files( + chunk_size=chunk_size, + name=name, + mime_type=mime_type, + metadata=metadata, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + labels=labels, + geo_location=geo_location, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + source_created_time=source_created_time, + source_modified_time=source_modified_time, + uploaded_time=uploaded_time, + external_id_prefix=external_id_prefix, + directory_prefix=directory_prefix, + uploaded=uploaded, + limit=limit, + ) + ) + + def create( + self, file_metadata: FileMetadata | FileMetadataWrite, overwrite: bool = False + ) -> tuple[FileMetadata, str]: + """ + Create file without uploading content. + + Args: + file_metadata (FileMetadata | FileMetadataWrite): File metadata for the file to create. + overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + + Returns: + tuple[FileMetadata, str]: Tuple containing the file metadata and upload url of the created file. + + Examples: + + Create a file: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import FileMetadataWrite + >>> client = CogniteClient() + >>> file_metadata = FileMetadataWrite(name="MyFile") + >>> res = client.files.create(file_metadata) + """ + return run_sync(self.__async_client.files.create(file_metadata=file_metadata, overwrite=overwrite)) + + def retrieve( + self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None + ) -> FileMetadata | None: + """ + `Retrieve a single file metadata by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + instance_id (NodeId | None): Instance ID + + Returns: + FileMetadata | None: Requested file metadata or None if it does not exist. + + Examples: + + Get file metadata by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.files.retrieve(id=1) + + Get file metadata by external id: + + >>> res = client.files.retrieve(external_id="1") + """ + return run_sync(self.__async_client.files.retrieve(id=id, external_id=external_id, instance_id=instance_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + instance_ids: Sequence[NodeId] | None = None, + ignore_unknown_ids: bool = False, + ) -> FileMetadataList: + """ + `Retrieve multiple file metadatas by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + instance_ids (Sequence[NodeId] | None): Instance IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + FileMetadataList: The requested file metadatas. + + Examples: + + Get file metadatas by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.files.retrieve_multiple(ids=[1, 2, 3]) + + Get file_metadatas by external id: + + >>> res = client.files.retrieve_multiple(external_ids=["abc", "def"]) + """ + return run_sync( + self.__async_client.files.retrieve_multiple( + ids=ids, external_ids=external_ids, instance_ids=instance_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | None = None) -> int: + """ + `Aggregate files `_ + + Args: + filter (FileMetadataFilter | dict[str, Any] | None): Filter on file metadata filter with exact match + + Returns: + int: Count of files matching the filter. + + Examples: + + Get the count of files that have been uploaded: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> aggregate_uploaded = client.files.aggregate(filter={"uploaded": True}) + """ + return run_sync(self.__async_client.files.aggregate_count(filter=filter)) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete files `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): str or list of str + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete files by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.files.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.files.delete(id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + @overload + def update( + self, + item: FileMetadata | FileMetadataWrite | FileMetadataUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> FileMetadata: ... + + @overload + def update( + self, + item: Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> FileMetadataList: ... + + def update( + self, + item: FileMetadata + | FileMetadataWrite + | FileMetadataUpdate + | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> FileMetadata | FileMetadataList: + """ + `Update files `_ + Currently, a full replacement of labels on a file is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. + + Args: + item (FileMetadata | FileMetadataWrite | FileMetadataUpdate | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate]): file(s) to update. + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + FileMetadata | FileMetadataList: The updated files. + + Examples: + + Update file metadata that you have fetched. This will perform a full update of the file metadata: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> file_metadata = client.files.retrieve(id=1) + >>> file_metadata.description = "New description" + >>> res = client.files.update(file_metadata) + + Perform a partial update on file metadata, updating the source and adding a new field to metadata: + + >>> from cognite.client.data_classes import FileMetadataUpdate + >>> my_update = FileMetadataUpdate(id=1).source.set("new source").metadata.add({"key": "value"}) + >>> res = client.files.update(my_update) + + Attach labels to a files: + + >>> from cognite.client.data_classes import FileMetadataUpdate + >>> my_update = FileMetadataUpdate(id=1).labels.add(["PUMP", "VERIFIED"]) + >>> res = client.files.update(my_update) + + Detach a single label from a file: + + >>> from cognite.client.data_classes import FileMetadataUpdate + >>> my_update = FileMetadataUpdate(id=1).labels.remove("PUMP") + >>> res = client.files.update(my_update) + """ + return run_sync(self.__async_client.files.update(item=item, mode=mode)) + + def search( + self, + name: str | None = None, + filter: FileMetadataFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> FileMetadataList: + """ + `Search for files. `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. + + Args: + name (str | None): Prefix and fuzzy search on name. + filter (FileMetadataFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. + limit (int): Max number of results to return. + + Returns: + FileMetadataList: List of requested files metadata. + + Examples: + + Search for a file: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.files.search(name="some name") + + Search for an asset with an attached label: + + >>> my_label_filter = LabelFilter(contains_all=["WELL LOG"]) + >>> res = client.assets.search(name="xyz",filter=FileMetadataFilter(labels=my_label_filter)) + """ + return run_sync(self.__async_client.files.search(name=name, filter=filter, limit=limit)) + + def upload_content( + self, path: Path, external_id: str | None = None, instance_id: NodeId | None = None + ) -> FileMetadata: + """ + `Upload a file content `_ + + Args: + path (Path): Path to the file you wish to upload. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + instance_id (NodeId | None): Instance ID of the file. + Returns: + FileMetadata: No description. + """ + return run_sync( + self.__async_client.files.upload_content(path=path, external_id=external_id, instance_id=instance_id) + ) + + def upload( + self, + path: Path, + external_id: str | None = None, + name: str | None = None, + source: str | None = None, + mime_type: str | None = None, + metadata: dict[str, str] | None = None, + directory: str | None = None, + asset_ids: Sequence[int] | None = None, + source_created_time: int | None = None, + source_modified_time: int | None = None, + data_set_id: int | None = None, + labels: Sequence[Label] | None = None, + geo_location: GeoLocation | None = None, + security_categories: Sequence[int] | None = None, + recursive: bool = False, + overwrite: bool = False, + ) -> FileMetadata | FileMetadataList: + """ + `Upload a file `_ + + Args: + path (Path): Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + name (str | None): Name of the file. + source (str | None): The source of the file. + mime_type (str | None): File type. E.g. text/plain, application/pdf, ... + metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. + directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids (Sequence[int] | None): No description. + source_created_time (int | None): The timestamp for when the file was originally created in the source system. + source_modified_time (int | None): The timestamp for when the file was last modified in the source system. + data_set_id (int | None): ID of the data set. + labels (Sequence[Label] | None): A list of the labels associated with this resource item. + geo_location (GeoLocation | None): The geographic metadata of the file. + security_categories (Sequence[int] | None): Security categories to attach to this file. + recursive (bool): If path is a directory, upload all contained files recursively. + overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + + Returns: + FileMetadata | FileMetadataList: The file metadata of the uploaded file(s). + + Examples: + + Upload a file in a given path: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> from pathlib import Path + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_file = Path("/path/to/file.txt") + >>> res = client.files.upload(my_file, name="my_file") + + If name is omitted, this method will use the name of the file (file.txt in the example above): + + >>> res = client.files.upload(my_file) + + You can also upload all files in a directory by setting path to the path of a directory + (filenames will be automatically used for `name`): + + >>> upload_dir = Path("/path/to/my/directory") + >>> res = client.files.upload(upload_dir) + + You can also upload all files in a directory recursively by passing `recursive=True`: + + >>> res = client.files.upload(upload_dir, recursive=True) + + Upload a file with a label: + + >>> from cognite.client.data_classes import Label + >>> res = client.files.upload(my_file, name="my_file", labels=[Label(external_id="WELL LOG")]) + + Upload a file with a geo_location: + + >>> from cognite.client.data_classes import GeoLocation, Geometry + >>> geometry = Geometry(type="LineString", coordinates=[[30, 10], [10, 30], [40, 40]]) + >>> res = client.files.upload(my_file, geo_location=GeoLocation(type="Feature", geometry=geometry)) + """ + return run_sync( + self.__async_client.files.upload( + path=path, + external_id=external_id, + name=name, + source=source, + mime_type=mime_type, + metadata=metadata, + directory=directory, + asset_ids=asset_ids, + source_created_time=source_created_time, + source_modified_time=source_modified_time, + data_set_id=data_set_id, + labels=labels, + geo_location=geo_location, + security_categories=security_categories, + recursive=recursive, + overwrite=overwrite, + ) + ) + + def upload_content_bytes( + self, content: str | bytes | BinaryIO, external_id: str | None = None, instance_id: NodeId | None = None + ) -> FileMetadata: + """ + Upload bytes or string (UTF-8 assumed). + + Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_content_session`. + + Args: + content (str | bytes | BinaryIO): The content to upload. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + instance_id (NodeId | None): Instance ID of the file. + + Returns: + FileMetadata: No description. + + Examples: + + Finish a file creation by uploading the content using external_id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.files.upload_content_bytes( + ... b"some content", external_id="my_file_xid") + + ...or by using instance_id: + + >>> from cognite.client.data_classes.data_modeling import NodeId + >>> res = client.files.upload_content_bytes( + ... b"some content", instance_id=NodeId("my-space", "my_file_xid")) + """ + return run_sync( + self.__async_client.files.upload_content_bytes( + content=content, external_id=external_id, instance_id=instance_id + ) + ) + + def upload_bytes( + self, + content: str | bytes | BinaryIO | AsyncIterator[bytes], + name: str, + external_id: str | None = None, + source: str | None = None, + mime_type: str | None = None, + metadata: dict[str, str] | None = None, + directory: str | None = None, + asset_ids: Sequence[int] | None = None, + data_set_id: int | None = None, + labels: Sequence[Label] | None = None, + geo_location: GeoLocation | None = None, + source_created_time: int | None = None, + source_modified_time: int | None = None, + security_categories: Sequence[int] | None = None, + overwrite: bool = False, + ) -> FileMetadata: + """ + Upload bytes or string. + + You can also pass a file handle to 'content'. The file must be opened in binary mode or an error will be raised. + + Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_session`. + + Args: + content (str | bytes | BinaryIO | AsyncIterator[bytes]): The content to upload. + name (str): Name of the file. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + source (str | None): The source of the file. + mime_type (str | None): File type. E.g. text/plain, application/pdf,... + metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. + directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids (Sequence[int] | None): No description. + data_set_id (int | None): Id of the data set. + labels (Sequence[Label] | None): A list of the labels associated with this resource item. + geo_location (GeoLocation | None): The geographic metadata of the file. + source_created_time (int | None): The timestamp for when the file was originally created in the source system. + source_modified_time (int | None): The timestamp for when the file was last modified in the source system. + security_categories (Sequence[int] | None): Security categories to attach to this file. + overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + + Returns: + FileMetadata: The metadata of the uploaded file. + + Examples: + + Upload a file from memory: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.files.upload_bytes(b"some content", name="my_file", asset_ids=[1,2,3]) + """ + return run_sync( + self.__async_client.files.upload_bytes( + content=content, + name=name, + external_id=external_id, + source=source, + mime_type=mime_type, + metadata=metadata, + directory=directory, + asset_ids=asset_ids, + data_set_id=data_set_id, + labels=labels, + geo_location=geo_location, + source_created_time=source_created_time, + source_modified_time=source_modified_time, + security_categories=security_categories, + overwrite=overwrite, + ) + ) + + def multipart_upload_session( + self, + name: str, + parts: int, + external_id: str | None = None, + source: str | None = None, + mime_type: str | None = None, + metadata: dict[str, str] | None = None, + directory: str | None = None, + asset_ids: Sequence[int] | None = None, + data_set_id: int | None = None, + labels: Sequence[Label] | None = None, + geo_location: GeoLocation | None = None, + source_created_time: int | None = None, + source_modified_time: int | None = None, + security_categories: Sequence[int] | None = None, + overwrite: bool = False, + ) -> FileMultipartUploadSession: + """ + Begin uploading a file in multiple parts. This allows uploading files larger than 5GiB. + Note that the size of each part may not exceed 4000MiB, and the size of each part except the last + must be greater than 5MiB. + + The file chunks may be uploaded in any order, and in parallel, but the client must ensure that + the parts are stored in the correct order by uploading each chunk to the correct upload URL. + + This returns a context manager you must enter (using the `with` keyword), then call `upload_part` + for each part before exiting. + + Args: + name (str): Name of the file. + parts (int): The number of parts to upload, must be between 1 and 250. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + source (str | None): The source of the file. + mime_type (str | None): File type. E.g. text/plain, application/pdf,... + metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. + directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids (Sequence[int] | None): No description. + data_set_id (int | None): Id of the data set. + labels (Sequence[Label] | None): A list of the labels associated with this resource item. + geo_location (GeoLocation | None): The geographic metadata of the file. + source_created_time (int | None): The timestamp for when the file was originally created in the source system. + source_modified_time (int | None): The timestamp for when the file was last modified in the source system. + security_categories (Sequence[int] | None): Security categories to attach to this file. + overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + + Returns: + FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + + Examples: + + Upload binary data in two chunks: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> with client.files.multipart_upload_session("my_file.txt", parts=2) as session: + ... # Note that the minimum chunk size is 5 MiB. + ... session.upload_part(0, "hello" * 1_200_000) + ... session.upload_part(1, " world") + """ + return run_sync( + self.__async_client.files.multipart_upload_session( + name=name, + parts=parts, + external_id=external_id, + source=source, + mime_type=mime_type, + metadata=metadata, + directory=directory, + asset_ids=asset_ids, + data_set_id=data_set_id, + labels=labels, + geo_location=geo_location, + source_created_time=source_created_time, + source_modified_time=source_modified_time, + security_categories=security_categories, + overwrite=overwrite, + ) + ) + + def multipart_upload_content_session( + self, parts: int, external_id: str | None = None, instance_id: NodeId | None = None + ) -> FileMultipartUploadSession: + """ + Begin uploading a file in multiple parts whose metadata is already created in CDF. This allows uploading files larger than 5GiB. + Note that the size of each part may not exceed 4000MiB, and the size of each part except the last + must be greater than 5MiB. + + The file chunks may be uploaded in any order, and in parallel, but the client must ensure that + the parts are stored in the correct order by uploading each chunk to the correct upload URL. + + This returns a context manager (that also supports async) you must enter (using the `with` keyword, or `async with`), then call `upload_part` + for each part before exiting, which will automatically finalize the multipart upload. + + Args: + parts (int): The number of parts to upload, must be between 1 and 250. + external_id (str | None): The external ID provided by the client. Must be unique within the project. + instance_id (NodeId | None): Instance ID of the file. + + Returns: + FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + + Examples: + + Upload binary data in two chunks: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> with client.files.multipart_upload_content_session(external_id="external-id", parts=2) as session: + ... # Note that the minimum chunk size is 5 MiB. + ... session.upload_part(0, "hello" * 1_200_000) + ... session.upload_part(1, " world") + """ + return run_sync( + self.__async_client.files.multipart_upload_content_session( + parts=parts, external_id=external_id, instance_id=instance_id + ) + ) + + def retrieve_download_urls( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + instance_id: NodeId | Sequence[NodeId] | None = None, + extended_expiration: bool = False, + ) -> dict[int | str | NodeId, str]: + """ + Get download links by id or external id + + Args: + id (int | Sequence[int] | None): Id or list of ids. + external_id (str | SequenceNotStr[str] | None): External id or list of external ids. + instance_id (NodeId | Sequence[NodeId] | None): Instance id or list of instance ids. + extended_expiration (bool): Extend expiration time of download url to 1 hour. Defaults to false. + + Returns: + dict[int | str | NodeId, str]: Dictionary containing download urls. + """ + return run_sync( + self.__async_client.files.retrieve_download_urls( + id=id, external_id=external_id, instance_id=instance_id, extended_expiration=extended_expiration + ) + ) + + def download( + self, + directory: str | Path, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + instance_id: NodeId | Sequence[NodeId] | None = None, + keep_directory_structure: bool = False, + resolve_duplicate_file_names: bool = False, + ) -> None: + """ + `Download files by id or external id. `_ + + This method will stream all files to disk, never keeping more than 2MB in memory per worker. + The files will be stored in the provided directory using the file name retrieved from the file metadata in CDF. + You can also choose to keep the directory structure from CDF so that the files will be stored in subdirectories + matching the directory attribute on the files. When missing, the (root) directory is used. + By default, duplicate file names to the same local folder will be resolved by only keeping one of the files. + You can choose to resolve this by appending a number to the file name using the resolve_duplicate_file_names argument. + + Warning: + If you are downloading several files at once, be aware that file name collisions lead to all-but-one of + the files missing. A warning is issued when this happens, listing the affected files. + + Args: + directory (str | Path): Directory to download the file(s) to. + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. + instance_id (NodeId | Sequence[NodeId] | None): Instance ID or list of instance ids. + keep_directory_structure (bool): Whether or not to keep the directory hierarchy in CDF, + creating subdirectories as needed below the given directory. + resolve_duplicate_file_names (bool): Whether or not to resolve duplicate file names by appending a number on duplicate file names + + Examples: + + Download files by id and external id into directory 'my_directory': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.files.download(directory="my_directory", id=[1,2,3], external_id=["abc", "def"]) + + Download files by id to the current directory: + + >>> client.files.download(directory=".", id=[1,2,3]) + """ + return run_sync( + self.__async_client.files.download( + directory=directory, + id=id, + external_id=external_id, + instance_id=instance_id, + keep_directory_structure=keep_directory_structure, + resolve_duplicate_file_names=resolve_duplicate_file_names, + ) + ) + + def download_to_path( + self, path: Path, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None + ) -> None: + """ + Download a file to a specific target. + + Args: + path (Path): The path in which to place the file. + id (int | None): Id of of the file to download. + external_id (str | None): External id of the file to download. + instance_id (NodeId | None): Instance id of the file to download. + + Examples: + + Download a file by id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.files.download_to_path("~/mydir/my_downloaded_file.txt", id=123) + """ + return run_sync( + self.__async_client.files.download_to_path( + path=path, id=id, external_id=external_id, instance_id=instance_id + ) + ) + + def download_bytes( + self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None + ) -> bytes: + """ + Download a file as bytes. + + Args: + id (int | None): Id of the file + external_id (str | None): External id of the file + instance_id (NodeId | None): Instance id of the file + + Examples: + + Download a file's content into memory: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> file_content = client.files.download_bytes(id=1) + + Returns: + bytes: The file in binary format + """ + return run_sync( + self.__async_client.files.download_bytes(id=id, external_id=external_id, instance_id=instance_id) + ) + + def list( + self, + name: str | None = None, + mime_type: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + labels: LabelFilter | None = None, + geo_location: GeoLocationFilter | None = None, + source: str | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + source_created_time: dict[str, Any] | TimestampRange | None = None, + source_modified_time: dict[str, Any] | TimestampRange | None = None, + uploaded_time: dict[str, Any] | TimestampRange | None = None, + external_id_prefix: str | None = None, + directory_prefix: str | None = None, + uploaded: bool | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + ) -> FileMetadataList: + """ + `List files `_ + + Args: + name (str | None): Name of the file. + mime_type (str | None): File type. E.g. text/plain, application/pdf, .. + metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value + asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. + asset_external_ids (SequenceNotStr[str] | None): No description. + asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. + labels (LabelFilter | None): Return only the files matching the specified label filter(s). + geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. + source (str | None): The source of this event. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + external_id_prefix (str | None): External Id provided by client. Should be unique within the project. + directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit (int | None): Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + + Returns: + FileMetadataList: The requested files. + + Examples: + + List files metadata and filter on external id prefix: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> file_list = client.files.list(limit=5, external_id_prefix="prefix") + + Iterate over files metadata, one-by-one: + + >>> for file_metadata in client.files(): + ... file_metadata # do something with the file metadata + + Iterate over chunks of files metadata to reduce memory load: + + >>> for file_list in client.files(chunk_size=2500): + ... file_list # do something with the files + + Filter files based on labels: + + >>> from cognite.client.data_classes import LabelFilter + >>> my_label_filter = LabelFilter(contains_all=["WELL LOG", "VERIFIED"]) + >>> file_list = client.files.list(labels=my_label_filter) + + Filter files based on geoLocation: + + >>> from cognite.client.data_classes import GeoLocationFilter, GeometryFilter + >>> my_geo_location_filter = GeoLocationFilter(relation="intersects", shape=GeometryFilter(type="Point", coordinates=[35,10])) + >>> file_list = client.files.list(geo_location=my_geo_location_filter) + """ + return run_sync( + self.__async_client.files.list( + name=name, + mime_type=mime_type, + metadata=metadata, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + labels=labels, + geo_location=geo_location, + source=source, + created_time=created_time, + last_updated_time=last_updated_time, + source_created_time=source_created_time, + source_modified_time=source_modified_time, + uploaded_time=uploaded_time, + external_id_prefix=external_id_prefix, + directory_prefix=directory_prefix, + uploaded=uploaded, + limit=limit, + partitions=partitions, + ) + ) diff --git a/cognite/client/_sync_api/functions/__init__.py b/cognite/client/_sync_api/functions/__init__.py new file mode 100644 index 0000000000..5b8b53e461 --- /dev/null +++ b/cognite/client/_sync_api/functions/__init__.py @@ -0,0 +1,448 @@ +""" +=============================================================================== +f9f881cace1e6f653087b987c4df3c92 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.functions.calls import SyncFunctionCallsAPI +from cognite.client._sync_api.functions.schedules import SyncFunctionSchedulesAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + Function, + FunctionCall, + FunctionList, + FunctionsLimits, + TimestampRange, +) +from cognite.client.data_classes.functions import ( + HANDLER_FILE_NAME, + FunctionHandle, + FunctionsStatus, + FunctionStatus, + FunctionWrite, + RunTime, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncFunctionsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.calls = SyncFunctionCallsAPI(async_client) + self.schedules = SyncFunctionSchedulesAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Function]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[FunctionList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + owner: str | None = None, + file_id: int | None = None, + status: FunctionStatus | None = None, + external_id_prefix: str | None = None, + created_time: dict[Literal["min", "max"], int] | TimestampRange | None = None, + metadata: dict[str, str] | None = None, + limit: int | None = None, + ) -> Iterator[Function | FunctionList]: + """ + Iterate over functions. + + Args: + chunk_size (int | None): Number of functions to yield per chunk. Defaults to yielding functions one by one. + name (str | None): The name of the function. + owner (str | None): Owner of the function. + file_id (int | None): The file ID of the zip-file used to create the function. + status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix (str | None): External ID prefix to filter on. + created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata (dict[str, str] | None): No description. + limit (int | None): Maximum number of functions to return. Defaults to yielding all functions. + + Yields: + Function | FunctionList: An iterator over functions. + """ + yield from SyncIterator( + self.__async_client.functions( + chunk_size=chunk_size, + name=name, + owner=owner, + file_id=file_id, + status=status, + external_id_prefix=external_id_prefix, + created_time=created_time, + metadata=metadata, + limit=limit, + ) + ) + + def create( + self, + name: str | FunctionWrite, + folder: str | None = None, + file_id: int | None = None, + function_path: str = HANDLER_FILE_NAME, + function_handle: FunctionHandle | None = None, + external_id: str | None = None, + description: str | None = None, + owner: str | None = None, + secrets: dict[str, str] | None = None, + env_vars: dict[str, str] | None = None, + cpu: float | None = None, + memory: float | None = None, + runtime: RunTime | None = None, + metadata: dict[str, str] | None = None, + index_url: str | None = None, + extra_index_urls: list[str] | None = None, + skip_folder_validation: bool = False, + data_set_id: int | None = None, + ) -> Function: + """ + `When creating a function, `_ + the source code can be specified in one of three ways: + + - Via the `folder` argument, which is the path to the folder where the source code is located. `function_path` must point to a python file in the folder within which a function named `handle` must be defined. + - Via the `file_id` argument, which is the ID of a zip-file uploaded to the files API. `function_path` must point to a python file in the zipped folder within which a function named `handle` must be defined. + - Via the `function_handle` argument, which is a reference to a function object, which must be named `handle`. + + The function named `handle` is the entrypoint of the created function. Valid arguments to `handle` are `data`, `client`, `secrets` and `function_call_info`: + - If the user calls the function with input data, this is passed through the `data` argument. + - If the user gives one or more secrets when creating the function, these are passed through the `secrets` argument. + - Data about the function call can be accessed via the argument `function_call_info`, which is a dictionary with keys `function_id`, `call_id`, and, if the call is scheduled, `schedule_id` and `scheduled_time`. + + By default, the function is deployed with the latest version of cognite-sdk. If a specific version is desired, it can be specified either in a requirements.txt file when deploying via the `folder` argument or between `[requirements]` tags when deploying via the `function_handle` argument (see example below). + + For help with troubleshooting, please see `this page. `_ + + Args: + name (str | FunctionWrite): The name of the function or a FunctionWrite object. If a FunctionWrite + object is passed, all other arguments are ignored. + folder (str | None): Path to the folder where the function source code is located. + file_id (int | None): File ID of the code uploaded to the Files API. + function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format. + function_handle (FunctionHandle | None): Reference to a function object, which must be named `handle`. + external_id (str | None): External id of the function. + description (str | None): Description of the function. + owner (str | None): Owner of this function. Typically used to know who created it. + secrets (dict[str, str] | None): Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique. + env_vars (dict[str, str] | None): Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables. + cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime (RunTime | None): The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series. + metadata (dict[str, str] | None): Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes. + index_url (str | None): Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_ + extra_index_urls (list[str] | None): Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_ + skip_folder_validation (bool): When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False. + data_set_id (int | None): Data set to upload the function code to. Note: Does not affect the function itself. + + Returns: + Function: The created function. + + Examples: + + Create function with source code in folder: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> function = client.functions.create( + ... name="myfunction", + ... folder="path/to/code", + ... function_path="path/to/function.py") + + Create function with file_id from already uploaded source code: + + >>> function = client.functions.create( + ... name="myfunction", file_id=123, function_path="path/to/function.py") + + Create function with predefined function object named `handle`: + + >>> function = client.functions.create(name="myfunction", function_handle=handle) + + Create function with predefined function object named `handle` with dependencies: + + >>> def handle(client, data): + >>> ''' + >>> [requirements] + >>> numpy + >>> [/requirements] + >>> ''' + >>> pass + >>> + >>> function = client.functions.create(name="myfunction", function_handle=handle) + + .. note: + When using a predefined function object, you can list dependencies between the tags `[requirements]` and `[/requirements]` in the function's docstring. + The dependencies will be parsed and validated in accordance with requirement format specified in `PEP 508 `_. + """ + return run_sync( + self.__async_client.functions.create( + name=name, + folder=folder, + file_id=file_id, + function_path=function_path, + function_handle=function_handle, + external_id=external_id, + description=description, + owner=owner, + secrets=secrets, + env_vars=env_vars, + cpu=cpu, + memory=memory, + runtime=runtime, + metadata=metadata, + index_url=index_url, + extra_index_urls=extra_index_urls, + skip_folder_validation=skip_folder_validation, + data_set_id=data_set_id, + ) + ) + + def delete( + self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete one or more functions. `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids. + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. + + Example: + + Delete functions by id or external id:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.functions.delete(id=[1,2,3], external_id="function3") + """ + return run_sync(self.__async_client.functions.delete(id=id, external_id=external_id)) + + def list( + self, + name: str | None = None, + owner: str | None = None, + file_id: int | None = None, + status: FunctionStatus | None = None, + external_id_prefix: str | None = None, + created_time: dict[Literal["min", "max"], int] | TimestampRange | None = None, + metadata: dict[str, str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> FunctionList: + """ + `List all functions. `_ + + Args: + name (str | None): The name of the function. + owner (str | None): Owner of the function. + file_id (int | None): The file ID of the zip-file used to create the function. + status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix (str | None): External ID prefix to filter on. + created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes. + limit (int | None): Maximum number of functions to return. Pass in -1, float('inf') or None to list all. + + Returns: + FunctionList: List of functions + + Example: + + List functions:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> functions_list = client.functions.list() + """ + return run_sync( + self.__async_client.functions.list( + name=name, + owner=owner, + file_id=file_id, + status=status, + external_id_prefix=external_id_prefix, + created_time=created_time, + metadata=metadata, + limit=limit, + ) + ) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> Function | None: + """ + `Retrieve a single function by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + Function | None: Requested function or None if it does not exist. + + Examples: + + Get function by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.functions.retrieve(id=1) + + Get function by external id: + + >>> res = client.functions.retrieve(external_id="abc") + """ + return run_sync(self.__async_client.functions.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> FunctionList: + """ + `Retrieve multiple functions by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + FunctionList: The requested functions. + + Examples: + + Get function by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.functions.retrieve_multiple(ids=[1, 2, 3]) + + Get functions by external id: + + >>> res = client.functions.retrieve_multiple(external_ids=["func1", "func2"]) + """ + return run_sync( + self.__async_client.functions.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def call( + self, + id: int | None = None, + external_id: str | None = None, + data: dict[str, object] | None = None, + wait: bool = True, + nonce: str | None = None, + ) -> FunctionCall: + """ + `Call a function by its ID or external ID. `_. + + Args: + id (int | None): ID + external_id (str | None): External ID + data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.' + wait (bool): Wait until the function call is finished. Defaults to True. + nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials. + + Tip: + You can create a session via the Sessions API, using the client.iam.session.create() method. + + Returns: + FunctionCall: A function call object. + + Examples: + + Call a function by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> call = client.functions.call(id=1) + + Call a function directly on the `Function` object: + + >>> func = client.functions.retrieve(id=1) + >>> call = func.call() + """ + return run_sync( + self.__async_client.functions.call(id=id, external_id=external_id, data=data, wait=wait, nonce=nonce) + ) + + def limits(self) -> FunctionsLimits: + """ + `Get service limits. `_. + + Returns: + FunctionsLimits: A function limits object. + + Examples: + + Call a function by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> limits = client.functions.limits() + """ + return run_sync(self.__async_client.functions.limits()) + + def activate(self) -> FunctionsStatus: + """ + `Activate functions for the Project. `_. + + Note: + May take some time to take effect (hours). + + Returns: + FunctionsStatus: A function activation status. + + Examples: + + Call activate: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> status = client.functions.activate() + """ + return run_sync(self.__async_client.functions.activate()) + + def status(self) -> FunctionsStatus: + """ + `Functions activation status for the Project. `_. + + Returns: + FunctionsStatus: A function activation status. + + Examples: + + Call status: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> status = client.functions.status() + """ + return run_sync(self.__async_client.functions.status()) diff --git a/cognite/client/_sync_api/functions/calls.py b/cognite/client/_sync_api/functions/calls.py new file mode 100644 index 0000000000..1c3a526a31 --- /dev/null +++ b/cognite/client/_sync_api/functions/calls.py @@ -0,0 +1,174 @@ +""" +=============================================================================== +7fed9b140979eb9672e692ff65c9dba7 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import FunctionCall, FunctionCallList, FunctionCallLog +from cognite.client.utils._async_helpers import run_sync + + +class SyncFunctionCallsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list( + self, + function_id: int | None = None, + function_external_id: str | None = None, + status: str | None = None, + schedule_id: int | None = None, + start_time: dict[str, int] | None = None, + end_time: dict[str, int] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> FunctionCallList: + """ + `List all calls associated with a specific function id. `_ Either function_id or function_external_id must be specified. + + Args: + function_id (int | None): ID of the function on which the calls were made. + function_external_id (str | None): External ID of the function on which the calls were made. + status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. + schedule_id (int | None): Schedule id from which the call belongs (if any). + start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. + + Returns: + FunctionCallList: List of function calls + + Examples: + + List function calls: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> calls = client.functions.calls.list(function_id=1) + + List function calls directly on a function object: + + >>> func = client.functions.retrieve(id=1) + >>> calls = func.list_calls() + """ + return run_sync( + self.__async_client.functions.calls.list( + function_id=function_id, + function_external_id=function_external_id, + status=status, + schedule_id=schedule_id, + start_time=start_time, + end_time=end_time, + limit=limit, + ) + ) + + def retrieve( + self, call_id: int, function_id: int | None = None, function_external_id: str | None = None + ) -> FunctionCall | None: + """ + `Retrieve a single function call by id. `_ + + Args: + call_id (int): ID of the call. + function_id (int | None): ID of the function on which the call was made. + function_external_id (str | None): External ID of the function on which the call was made. + + Returns: + FunctionCall | None: Requested function call or None if either call ID or function identifier is not found. + + Examples: + + Retrieve single function call by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> call = client.functions.calls.retrieve(call_id=2, function_id=1) + + Retrieve function call directly on a function object: + + >>> func = client.functions.retrieve(id=1) + >>> call = func.retrieve_call(id=2) + """ + return run_sync( + self.__async_client.functions.calls.retrieve( + call_id=call_id, function_id=function_id, function_external_id=function_external_id + ) + ) + + def get_response( + self, call_id: int, function_id: int | None = None, function_external_id: str | None = None + ) -> dict[str, object] | None: + """ + `Retrieve the response from a function call. `_ + + Args: + call_id (int): ID of the call. + function_id (int | None): ID of the function on which the call was made. + function_external_id (str | None): External ID of the function on which the call was made. + + Returns: + dict[str, object] | None: Response from the function call. + + Examples: + + Retrieve function call response by call ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> response = client.functions.calls.get_response(call_id=2, function_id=1) + + Retrieve function call response directly on a call object: + + >>> call = client.functions.calls.retrieve(call_id=2, function_id=1) + >>> response = call.get_response() + """ + return run_sync( + self.__async_client.functions.calls.get_response( + call_id=call_id, function_id=function_id, function_external_id=function_external_id + ) + ) + + def get_logs( + self, call_id: int, function_id: int | None = None, function_external_id: str | None = None + ) -> FunctionCallLog: + """ + `Retrieve logs for function call. `_ + + Args: + call_id (int): ID of the call. + function_id (int | None): ID of the function on which the call was made. + function_external_id (str | None): External ID of the function on which the call was made. + + Returns: + FunctionCallLog: Log for the function call. + + Examples: + + Retrieve function call logs by call ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> logs = client.functions.calls.get_logs(call_id=2, function_id=1) + + Retrieve function call logs directly on a call object: + + >>> call = client.functions.calls.retrieve(call_id=2, function_id=1) + >>> logs = call.get_logs() + """ + return run_sync( + self.__async_client.functions.calls.get_logs( + call_id=call_id, function_id=function_id, function_external_id=function_external_id + ) + ) diff --git a/cognite/client/_sync_api/functions/schedules.py b/cognite/client/_sync_api/functions/schedules.py new file mode 100644 index 0000000000..f6da07b012 --- /dev/null +++ b/cognite/client/_sync_api/functions/schedules.py @@ -0,0 +1,286 @@ +""" +=============================================================================== +e069532ec367104abca73d4fad0704ab +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + ClientCredentials, + FunctionSchedule, + FunctionSchedulesList, + TimestampRange, +) +from cognite.client.data_classes.functions import FunctionScheduleWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync + + +class SyncFunctionSchedulesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[FunctionSchedule]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[FunctionSchedulesList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + function_id: int | None = None, + function_external_id: str | None = None, + created_time: dict[str, int] | TimestampRange | None = None, + cron_expression: str | None = None, + limit: int | None = None, + ) -> Iterator[FunctionSchedule | FunctionSchedulesList]: + """ + Iterate over function schedules + + Args: + chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + name (str | None): Name of the function schedule. + function_id (int | None): ID of the function the schedules are linked to. + function_external_id (str | None): External ID of the function the schedules are linked to. + created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression (str | None): Cron expression. + limit (int | None): Maximum schedules to return. Defaults to return all schedules. + + Yields: + FunctionSchedule | FunctionSchedulesList: Function schedules. + """ + yield from SyncIterator( + self.__async_client.functions.schedules( + chunk_size=chunk_size, + name=name, + function_id=function_id, + function_external_id=function_external_id, + created_time=created_time, + cron_expression=cron_expression, + limit=limit, + ) + ) + + @overload + def retrieve(self, id: int, ignore_unknown_ids: bool = False) -> FunctionSchedule | None: ... + + @overload + def retrieve(self, id: Sequence[int], ignore_unknown_ids: bool = False) -> FunctionSchedulesList: ... + + def retrieve( + self, id: int | Sequence[int], ignore_unknown_ids: bool = False + ) -> FunctionSchedule | None | FunctionSchedulesList: + """ + `Retrieve a single function schedule by id. `_ + + Args: + id (int | Sequence[int]): Schedule ID + ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + + Returns: + FunctionSchedule | None | FunctionSchedulesList: Requested function schedule or None if not found. + + Examples: + + Get function schedule by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.functions.schedules.retrieve(id=1) + """ + return run_sync(self.__async_client.functions.schedules.retrieve(id=id, ignore_unknown_ids=ignore_unknown_ids)) + + def list( + self, + name: str | None = None, + function_id: int | None = None, + function_external_id: str | None = None, + created_time: dict[str, int] | TimestampRange | None = None, + cron_expression: str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> FunctionSchedulesList: + """ + `List all schedules associated with a specific project. `_ + + Args: + name (str | None): Name of the function schedule. + function_id (int | None): ID of the function the schedules are linked to. + function_external_id (str | None): External ID of the function the schedules are linked to. + created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression (str | None): Cron expression. + limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. + + Returns: + FunctionSchedulesList: List of function schedules + + Examples: + + List function schedules: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> schedules = client.functions.schedules.list() + + List schedules directly on a function object to get only schedules associated with this particular function: + + >>> func = client.functions.retrieve(id=1) + >>> schedules = func.list_schedules(limit=None) + """ + return run_sync( + self.__async_client.functions.schedules.list( + name=name, + function_id=function_id, + function_external_id=function_external_id, + created_time=created_time, + cron_expression=cron_expression, + limit=limit, + ) + ) + + def create( + self, + name: str | FunctionScheduleWrite, + cron_expression: str | None = None, + function_id: int | None = None, + function_external_id: str | None = None, + client_credentials: dict[str, str] | ClientCredentials | None = None, + description: str | None = None, + data: dict[str, object] | None = None, + ) -> FunctionSchedule: + """ + `Create a schedule associated with a specific project. `_ + + Args: + name (str | FunctionScheduleWrite): Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument. + cron_expression (str | None): Cron expression. + function_id (int | None): Id of the function to attach the schedule to. + function_external_id (str | None): (DEPRECATED) External id of the function to attach the schedule to. + Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID. + client_credentials (dict[str, str] | ClientCredentials | None): Instance of ClientCredentials + or a dictionary containing client credentials: 'client_id' and 'client_secret'. + description (str | None): Description of the schedule. + data (dict[str, object] | None): Data to be passed to the scheduled run. + + Returns: + FunctionSchedule: Created function schedule. + + Note: + There are several ways to authenticate the function schedule — the order of priority is as follows: + 1. ``nonce`` (if provided in the ``FunctionScheduleWrite`` object) + 2. ``client_credentials`` (if provided) + 3. The credentials of *this* AsyncCogniteClient. + + Warning: + Do not pass secrets or other confidential information via the ``data`` argument. There is a dedicated + ``secrets`` argument in FunctionsAPI.create() for this purpose. + + Passing the reference to the Function by ``function_external_id`` is just here as a convenience to the user. + The API require that all schedules *must* be attached to a Function by (internal) ID for authentication- + and security purposes. This means that the lookup to get the ID is first done on behalf of the user. + + Examples: + + Create a function schedule that runs using specified client credentials (**recommended**): + + >>> import os + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ClientCredentials + >>> client = CogniteClient() + >>> schedule = client.functions.schedules.create( + ... name="My schedule", + ... function_id=123, + ... cron_expression="*/5 * * * *", + ... client_credentials=ClientCredentials("my-client-id", os.environ["MY_CLIENT_SECRET"]), + ... description="This schedule does magic stuff.", + ... data={"magic": "stuff"}, + ... ) + + You may also create a schedule that runs with your -current- credentials, i.e. the same credentials you used + to instantiate the ``AsyncCogniteClient`` (that you're using right now). **Note**: Unless you happen to already use + client credentials, *this is not a recommended way to create schedules*, as it will create an explicit dependency + on your user account, which it will run the function "on behalf of" (until the schedule is eventually removed): + + >>> schedule = client.functions.schedules.create( + ... name="My schedule", + ... function_id=456, + ... cron_expression="*/5 * * * *", + ... description="A schedule just used for some temporary testing.", + ... ) + + Create a function schedule with an oneshot session (typically used for testing purposes): + + >>> from cognite.client.data_classes.functions import FunctionScheduleWrite + >>> session = client.iam.sessions.create(session_type="ONESHOT_TOKEN_EXCHANGE") + >>> schedule = client.functions.schedules.create( + ... FunctionScheduleWrite( + ... name="My schedule", + ... function_id=456, + ... cron_expression="*/5 * * * *", + ... description="A schedule just used for some temporary testing.", + ... nonce=session.nonce + ... ), + ... ) + """ + return run_sync( + self.__async_client.functions.schedules.create( + name=name, + cron_expression=cron_expression, + function_id=function_id, + function_external_id=function_external_id, + client_credentials=client_credentials, + description=description, + data=data, + ) + ) + + def delete(self, id: int) -> None: + """ + `Delete a schedule associated with a specific project. `_ + + Args: + id (int): Id of the schedule + + Examples: + + Delete function schedule: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.functions.schedules.delete(id = 123) + """ + return run_sync(self.__async_client.functions.schedules.delete(id=id)) + + def get_input_data(self, id: int) -> dict[str, object] | None: + """ + `Retrieve the input data to the associated function. `_ + + Args: + id (int): Id of the schedule + + Returns: + dict[str, object] | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. + + Examples: + + Get schedule input data: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.functions.schedules.get_input_data(id=123) + """ + return run_sync(self.__async_client.functions.schedules.get_input_data(id=id)) diff --git a/cognite/client/_sync_api/geospatial.py b/cognite/client/_sync_api/geospatial.py new file mode 100644 index 0000000000..0078030a19 --- /dev/null +++ b/cognite/client/_sync_api/geospatial.py @@ -0,0 +1,1014 @@ +""" +=============================================================================== +2278ee58d5848b6f27ba865ac38c4d47 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from pathlib import Path +from typing import Any, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.geospatial import ( + CoordinateReferenceSystem, + CoordinateReferenceSystemList, + CoordinateReferenceSystemWrite, + Feature, + FeatureAggregateList, + FeatureList, + FeatureType, + FeatureTypeList, + FeatureTypePatch, + FeatureTypeWrite, + FeatureWrite, + FeatureWriteList, + GeospatialComputedResponse, + GeospatialComputeFunction, + OrderSpec, + RasterMetadata, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncGeospatialAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def create_feature_types(self, feature_type: FeatureType | FeatureTypeWrite) -> FeatureType: ... + + @overload + def create_feature_types( + self, feature_type: Sequence[FeatureType] | Sequence[FeatureTypeWrite] + ) -> FeatureTypeList: ... + + def create_feature_types( + self, feature_type: FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite] + ) -> FeatureType | FeatureTypeList: + """ + `Creates feature types` + + + Args: + feature_type (FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]): feature type definition or list of feature type definitions to create. + + Returns: + FeatureType | FeatureTypeList: Created feature type definition(s) + + Examples: + + Create new type definitions: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.geospatial import FeatureTypeWrite + >>> client = CogniteClient() + >>> feature_types = [ + ... FeatureTypeWrite(external_id="wells", properties={"location": {"type": "POINT", "srid": 4326}}) + ... FeatureTypeWrite( + ... external_id="cities", + ... properties={"name": {"type": "STRING", "size": 10}}, + ... search_spec={"name_index": {"properties": ["name"]}} + ... ) + ... ] + >>> res = client.geospatial.create_feature_types(feature_types) + """ + return run_sync(self.__async_client.geospatial.create_feature_types(feature_type=feature_type)) + + def delete_feature_types(self, external_id: str | SequenceNotStr[str], recursive: bool = False) -> None: + """ + `Delete one or more feature type` + + + Args: + external_id (str | SequenceNotStr[str]): External ID or list of external ids + recursive (bool): if `true` the features will also be dropped + + Examples: + + Delete feature type definitions external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.geospatial.delete_feature_types(external_id=["wells", "cities"]) + """ + return run_sync( + self.__async_client.geospatial.delete_feature_types(external_id=external_id, recursive=recursive) + ) + + def list_feature_types(self) -> FeatureTypeList: + """ + `List feature types` + + + Returns: + FeatureTypeList: List of feature types + + Examples: + + Iterate over feature type definitions: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> for feature_type in client.geospatial.list_feature_types(): + ... feature_type # do something with the feature type definition + """ + return run_sync(self.__async_client.geospatial.list_feature_types()) + + @overload + def retrieve_feature_types(self, external_id: str) -> FeatureType: ... + + @overload + def retrieve_feature_types(self, external_id: list[str]) -> FeatureTypeList: ... + + def retrieve_feature_types(self, external_id: str | list[str]) -> FeatureType | FeatureTypeList: + """ + `Retrieve feature types` + + + Args: + external_id (str | list[str]): External ID + + Returns: + FeatureType | FeatureTypeList: Requested Type or None if it does not exist. + + Examples: + + Get Type by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.geospatial.retrieve_feature_types(external_id="1") + """ + return run_sync(self.__async_client.geospatial.retrieve_feature_types(external_id=external_id)) + + def patch_feature_types(self, patch: FeatureTypePatch | Sequence[FeatureTypePatch]) -> FeatureTypeList: + """ + `Patch feature types` + + + Args: + patch (FeatureTypePatch | Sequence[FeatureTypePatch]): the patch to apply + + Returns: + FeatureTypeList: The patched feature types. + + Examples: + + Add one property to a feature type and add indexes + + >>> from cognite.client.data_classes.geospatial import Patches + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.geospatial.patch_feature_types( + ... patch=FeatureTypePatch( + ... external_id="wells", + ... property_patches=Patches(add={"altitude": {"type": "DOUBLE"}}), + ... search_spec_patches=Patches( + ... add={ + ... "altitude_idx": {"properties": ["altitude"]}, + ... "composite_idx": {"properties": ["location", "altitude"]} + ... } + ... ) + ... ) + ... ) + + Add an additional index to an existing property + + >>> from cognite.client.data_classes.geospatial import Patches + >>> res = client.geospatial.patch_feature_types( + ... patch=FeatureTypePatch( + ... external_id="wells", + ... search_spec_patches=Patches(add={"location_idx": {"properties": ["location"]}}) + ... )) + """ + return run_sync(self.__async_client.geospatial.patch_feature_types(patch=patch)) + + @overload + def create_features( + self, + feature_type_external_id: str, + feature: Feature | FeatureWrite, + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> Feature: ... + + @overload + def create_features( + self, + feature_type_external_id: str, + feature: Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList, + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> FeatureList: ... + + def create_features( + self, + feature_type_external_id: str, + feature: Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList, + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> Feature | FeatureList: + """ + `Creates features` + + + Args: + feature_type_external_id (str): Feature type definition for the features to create. + feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList): one feature or a list of features to create or a FeatureList object + allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size (int | None): maximum number of items in a single request to the api + + Returns: + Feature | FeatureList: Created features + + Examples: + + Create a new feature type and corresponding feature: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.geospatial import FeatureTypeWrite, FeatureWrite + >>> client = CogniteClient() + >>> feature_types = [ + ... FeatureTypeWrite( + ... external_id="my_feature_type", + ... properties={ + ... "location": {"type": "POINT", "srid": 4326}, + ... "temperature": {"type": "DOUBLE"} + ... } + ... ) + ... ] + >>> res = client.geospatial.create_feature_types(feature_types) + >>> res = client.geospatial.create_features( + ... feature_type_external_id="my_feature_type", + ... feature=FeatureWrite( + ... external_id="my_feature", + ... location={"wkt": "POINT(1 1)"}, + ... temperature=12.4 + ... ) + ... ) + """ + return run_sync( + self.__async_client.geospatial.create_features( + feature_type_external_id=feature_type_external_id, + feature=feature, + allow_crs_transformation=allow_crs_transformation, + chunk_size=chunk_size, + ) + ) + + def delete_features( + self, feature_type_external_id: str, external_id: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete one or more feature` + + + Args: + feature_type_external_id (str): No description. + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + + Examples: + + Delete feature type definitions external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.geospatial.delete_features( + ... feature_type_external_id="my_feature_type", + ... external_id=my_feature + ... ) + """ + return run_sync( + self.__async_client.geospatial.delete_features( + feature_type_external_id=feature_type_external_id, external_id=external_id + ) + ) + + @overload + def retrieve_features( + self, feature_type_external_id: str, external_id: str, properties: dict[str, Any] | None = None + ) -> Feature: ... + + @overload + def retrieve_features( + self, feature_type_external_id: str, external_id: list[str], properties: dict[str, Any] | None = None + ) -> FeatureList: ... + + def retrieve_features( + self, feature_type_external_id: str, external_id: str | list[str], properties: dict[str, Any] | None = None + ) -> FeatureList | Feature: + """ + `Retrieve features` + + + Args: + feature_type_external_id (str): No description. + external_id (str | list[str]): External ID or list of external ids + properties (dict[str, Any] | None): the output property selection + + Returns: + FeatureList | Feature: Requested features or None if it does not exist. + + Examples: + + Retrieve one feature by its external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.geospatial.retrieve_features( + ... feature_type_external_id="my_feature_type", + ... external_id="my_feature" + ... ) + """ + return run_sync( + self.__async_client.geospatial.retrieve_features( + feature_type_external_id=feature_type_external_id, external_id=external_id, properties=properties + ) + ) + + @overload + def update_features( + self, + feature_type_external_id: str, + feature: Feature | FeatureWrite, + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> Feature: ... + + @overload + def update_features( + self, + feature_type_external_id: str, + feature: Sequence[Feature] | Sequence[FeatureWrite], + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> FeatureList: ... + + def update_features( + self, + feature_type_external_id: str, + feature: Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite], + allow_crs_transformation: bool = False, + chunk_size: int | None = None, + ) -> Feature | FeatureList: + """ + `Update features` + + + Args: + feature_type_external_id (str): No description. + feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite]): feature or list of features. + allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size (int | None): maximum number of items in a single request to the api + + Returns: + Feature | FeatureList: Updated features + + Examples: + + Update one feature: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_feature = client.geospatial.create_features( + ... feature_type_external_id="my_feature_type", + ... feature=Feature(external_id="my_feature", temperature=12.4) + ... ) + >>> my_updated_feature = client.geospatial.update_features( + ... feature_type_external_id="my_feature_type", + ... feature=Feature(external_id="my_feature", temperature=6.237) + ... ) + """ + return run_sync( + self.__async_client.geospatial.update_features( + feature_type_external_id=feature_type_external_id, + feature=feature, + allow_crs_transformation=allow_crs_transformation, + chunk_size=chunk_size, + ) + ) + + def list_features( + self, + feature_type_external_id: str, + filter: dict[str, Any] | None = None, + properties: dict[str, Any] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + allow_crs_transformation: bool = False, + ) -> FeatureList: + """ + `List features` + + + This method allows to filter all features. + + Args: + feature_type_external_id (str): the feature type to list features for + filter (dict[str, Any] | None): the list filter + properties (dict[str, Any] | None): the output property selection + limit (int | None): Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features. + allow_crs_transformation (bool): If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + + Returns: + FeatureList: The filtered features + + Examples: + + List features: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_feature_type = client.geospatial.retrieve_feature_types( + ... external_id="my_feature_type" + ... ) + >>> my_feature = client.geospatial.create_features( + ... feature_type_external_id=my_feature_type, + ... feature=Feature( + ... external_id="my_feature", + ... temperature=12.4, + ... location={"wkt": "POINT(0 1)"} + ... ) + ... ) + >>> res = client.geospatial.list_features( + ... feature_type_external_id="my_feature_type", + ... filter={"range": {"property": "temperature", "gt": 12.0}} + ... ) + >>> for f in res: + ... # do something with the features + + Search for features and select output properties: + + >>> res = client.geospatial.list_features( + ... feature_type_external_id=my_feature_type, + ... filter={}, + ... properties={"temperature": {}, "pressure": {}} + ... ) + + Search for features with spatial filters: + + >>> res = client.geospatial.list_features( + ... feature_type_external_id=my_feature_type, + ... filter={"stWithin": { + ... "property": "location", + ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"} + ... }} + ... ) + """ + return run_sync( + self.__async_client.geospatial.list_features( + feature_type_external_id=feature_type_external_id, + filter=filter, + properties=properties, + limit=limit, + allow_crs_transformation=allow_crs_transformation, + ) + ) + + def search_features( + self, + feature_type_external_id: str, + filter: dict[str, Any] | None = None, + properties: dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + order_by: Sequence[OrderSpec] | None = None, + allow_crs_transformation: bool = False, + allow_dimensionality_mismatch: bool = False, + ) -> FeatureList: + """ + `Search for features` + + + This method allows to order the result by one or more of the properties of the feature type. + However, the number of items returned is limited to 1000 and there is no support for cursors yet. + If you need to return more than 1000 items, use the `stream_features(...)` method instead. + + Args: + feature_type_external_id (str): The feature type to search for + filter (dict[str, Any] | None): The search filter + properties (dict[str, Any] | None): The output property selection + limit (int): Maximum number of results + order_by (Sequence[OrderSpec] | None): The order specification + allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + + Returns: + FeatureList: the filtered features + + Examples: + + Search for features: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_feature_type = client.geospatial.retrieve_feature_types( + ... external_id="my_feature_type" + ... ) + >>> my_feature = client.geospatial.create_features( + ... feature_type_external_id=my_feature_type, + ... feature=Feature( + ... external_id="my_feature", + ... temperature=12.4, + ... location={"wkt": "POINT(0 1)"} + ... ) + ... ) + >>> res = client.geospatial.search_features( + ... feature_type_external_id="my_feature_type", + ... filter={"range": {"property": "temperature", "gt": 12.0}} + ... ) + >>> for f in res: + ... # do something with the features + + Search for features and select output properties: + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={}, + ... properties={"temperature": {}, "pressure": {}} + ... ) + + Search for features and do CRS conversion on an output property: + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={}, + ... properties={"location": {"srid": 3995}} + ... ) + + Search for features and order results: + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={}, + ... order_by=[ + ... OrderSpec("temperature", "ASC"), + ... OrderSpec("pressure", "DESC")] + ... ) + + Search for features with spatial filters: + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={"stWithin": { + ... "property": "location", + ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"} + ... }} + ... ) + + Combining multiple filters: + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={"and": [ + ... {"range": {"property": "temperature", "gt": 12.0}}, + ... {"stWithin": { + ... "property": "location", + ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"} + ... }} + ... ]} + ... ) + + >>> res = client.geospatial.search_features( + ... feature_type_external_id=my_feature_type, + ... filter={"or": [ + ... {"range": {"property": "temperature", "gt": 12.0}}, + ... {"stWithin": { + ... "property": "location", + ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"} + ... }} + ... ]} + ... ) + """ + return run_sync( + self.__async_client.geospatial.search_features( + feature_type_external_id=feature_type_external_id, + filter=filter, + properties=properties, + limit=limit, + order_by=order_by, + allow_crs_transformation=allow_crs_transformation, + allow_dimensionality_mismatch=allow_dimensionality_mismatch, + ) + ) + + def stream_features( + self, + feature_type_external_id: str, + filter: dict[str, Any] | None = None, + properties: dict[str, Any] | None = None, + allow_crs_transformation: bool = False, + allow_dimensionality_mismatch: bool = False, + ) -> Iterator[Feature]: + """ + `Stream features` + + + This method allows to return any number of items until the underlying + api calls times out. The order of the result items is not deterministic. + If you need to order the results, use the `search_features(...)` method instead. + + Args: + feature_type_external_id (str): the feature type to search for + filter (dict[str, Any] | None): the search filter + properties (dict[str, Any] | None): the output property selection + allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + + Yields: + Feature: a generator for the filtered features + + Examples: + + Stream features: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_feature = client.geospatial.create_features( + ... feature_type_external_id="my_feature_type", + ... feature=Feature(external_id="my_feature", temperature=12.4) + ... ) + >>> features = client.geospatial.stream_features( + ... feature_type_external_id="my_feature_type", + ... filter={"range": {"property": "temperature", "gt": 12.0}} + ... ) + >>> for f in features: + ... # do something with the features + + Stream features and select output properties: + + >>> features = client.geospatial.stream_features( + ... feature_type_external_id="my_feature_type", + ... filter={}, + ... properties={"temperature": {}, "pressure": {}} + ... ) + >>> for f in features: + ... # do something with the features + """ + yield from SyncIterator( + self.__async_client.geospatial( + feature_type_external_id=feature_type_external_id, + filter=filter, + properties=properties, + allow_crs_transformation=allow_crs_transformation, + allow_dimensionality_mismatch=allow_dimensionality_mismatch, + ) + ) + + def aggregate_features( + self, + feature_type_external_id: str, + filter: dict[str, Any] | None = None, + group_by: SequenceNotStr[str] | None = None, + order_by: Sequence[OrderSpec] | None = None, + output: dict[str, Any] | None = None, + ) -> FeatureAggregateList: + """ + `Aggregate filtered features` + + + Args: + feature_type_external_id (str): the feature type to filter features from + filter (dict[str, Any] | None): the search filter + group_by (SequenceNotStr[str] | None): list of properties to group by with + order_by (Sequence[OrderSpec] | None): the order specification + output (dict[str, Any] | None): the aggregate output + + Returns: + FeatureAggregateList: the filtered features + + Examples: + + Aggregate property of features: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_feature = client.geospatial.create_features( + ... feature_type_external_id="my_feature_type", + ... feature=Feature(external_id="my_feature", temperature=12.4) + ... ) + >>> res = client.geospatial.aggregate_features( + ... feature_type_external_id="my_feature_type", + ... filter={"range": {"property": "temperature", "gt": 12.0}}, + ... group_by=["category"], + ... order_by=[OrderSpec("category", "ASC")], + ... output={"min_temperature": {"min": {"property": "temperature"}}, + ... "max_volume": {"max": {"property": "volume"}} + ... } + ... ) + >>> for a in res: + ... # loop over aggregates in different groups + """ + return run_sync( + self.__async_client.geospatial.aggregate_features( + feature_type_external_id=feature_type_external_id, + filter=filter, + group_by=group_by, + order_by=order_by, + output=output, + ) + ) + + def get_coordinate_reference_systems(self, srids: int | Sequence[int]) -> CoordinateReferenceSystemList: + """ + `Get Coordinate Reference Systems` + + + Args: + srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + + Returns: + CoordinateReferenceSystemList: Requested CRSs. + + Examples: + + Get two CRS definitions: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> crs = client.geospatial.get_coordinate_reference_systems(srids=[4326, 4327]) + """ + return run_sync(self.__async_client.geospatial.get_coordinate_reference_systems(srids=srids)) + + def list_coordinate_reference_systems(self, only_custom: bool = False) -> CoordinateReferenceSystemList: + """ + `List Coordinate Reference Systems` + + + Args: + only_custom (bool): list only custom CRSs or not + + Returns: + CoordinateReferenceSystemList: list of CRSs. + + Examples: + + Fetch all custom CRSs: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> crs = client.geospatial.list_coordinate_reference_systems(only_custom=True) + """ + return run_sync(self.__async_client.geospatial.list_coordinate_reference_systems(only_custom=only_custom)) + + def create_coordinate_reference_systems( + self, + crs: CoordinateReferenceSystem + | CoordinateReferenceSystemWrite + | Sequence[CoordinateReferenceSystem] + | Sequence[CoordinateReferenceSystemWrite], + ) -> CoordinateReferenceSystemList: + """ + `Create Coordinate Reference System` + + + Args: + crs (CoordinateReferenceSystem | CoordinateReferenceSystemWrite | Sequence[CoordinateReferenceSystem] | Sequence[CoordinateReferenceSystemWrite]): a CoordinateReferenceSystem or a list of CoordinateReferenceSystem + + Returns: + CoordinateReferenceSystemList: list of CRSs. + + Examples: + + Create a custom CRS: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import CoordinateReferenceSystemWrite + >>> client = CogniteClient() + >>> custom_crs = CoordinateReferenceSystemWrite( + ... srid = 121111, + ... wkt=( + ... 'PROJCS["NTF (Paris) / Lambert zone II",' + ... ' GEOGCS["NTF (Paris)",' + ... ' DATUM["Nouvelle_Triangulation_Francaise_Paris",' + ... ' SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936265,' + ... ' AUTHORITY["EPSG","7011"]],' + ... ' TOWGS84[-168,-60,320,0,0,0,0],' + ... ' AUTHORITY["EPSG","6807"]],' + ... ' PRIMEM["Paris",2.33722917,' + ... ' AUTHORITY["EPSG","8903"]],' + ... ' UNIT["grad",0.01570796326794897,' + ... ' AUTHORITY["EPSG","9105"]], ' + ... ' AUTHORITY["EPSG","4807"]],' + ... ' PROJECTION["Lambert_Conformal_Conic_1SP"],' + ... ' PARAMETER["latitude_of_origin",52],' + ... ' PARAMETER["central_meridian",0],' + ... ' PARAMETER["scale_factor",0.99987742],' + ... ' PARAMETER["false_easting",600000],' + ... ' PARAMETER["false_northing",2200000],' + ... ' UNIT["metre",1,' + ... ' AUTHORITY["EPSG","9001"]],' + ... ' AXIS["X",EAST],' + ... ' AXIS["Y",NORTH],' + ... ' AUTHORITY["EPSG","27572"]]' + ... ), + ... proj_string=( + ... '+proj=lcc +lat_1=46.8 +lat_0=46.8 +lon_0=0 +k_0=0.99987742 ' + ... '+x_0=600000 +y_0=2200000 +a=6378249.2 +b=6356515 ' + ... '+towgs84=-168,-60,320,0,0,0,0 +pm=paris +units=m +no_defs' + ... ) + ... ) + >>> crs = client.geospatial.create_coordinate_reference_systems(custom_crs) + """ + return run_sync(self.__async_client.geospatial.create_coordinate_reference_systems(crs=crs)) + + def delete_coordinate_reference_systems(self, srids: int | Sequence[int]) -> None: + """ + `Delete Coordinate Reference System` + + + Args: + srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + + Examples: + + Delete a custom CRS: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> crs = client.geospatial.delete_coordinate_reference_systems(srids=[121111]) + """ + return run_sync(self.__async_client.geospatial.delete_coordinate_reference_systems(srids=srids)) + + def put_raster( + self, + feature_type_external_id: str, + feature_external_id: str, + raster_property_name: str, + raster_format: str, + raster_srid: int, + file: str | Path, + allow_crs_transformation: bool = False, + raster_scale_x: float | None = None, + raster_scale_y: float | None = None, + ) -> RasterMetadata: + """ + `Put raster ` + + Args: + feature_type_external_id (str): No description. + feature_external_id (str): one feature or a list of features to create + raster_property_name (str): the raster property name + raster_format (str): the raster input format + raster_srid (int): the associated SRID for the raster + file (str | Path): the path to the file of the raster + allow_crs_transformation (bool): When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + raster_scale_x (float | None): the X component of the pixel width in units of coordinate reference system + raster_scale_y (float | None): the Y component of the pixel height in units of coordinate reference system + + Returns: + RasterMetadata: the raster metadata if it was ingested successfully + + Examples: + + Put a raster in a feature raster property: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> feature_type = ... + >>> feature = ... + >>> raster_property_name = ... + >>> metadata = client.geospatial.put_raster(feature_type.external_id, feature.external_id, + ... raster_property_name, "XYZ", 3857, file) + """ + return run_sync( + self.__async_client.geospatial.put_raster( + feature_type_external_id=feature_type_external_id, + feature_external_id=feature_external_id, + raster_property_name=raster_property_name, + raster_format=raster_format, + raster_srid=raster_srid, + file=file, + allow_crs_transformation=allow_crs_transformation, + raster_scale_x=raster_scale_x, + raster_scale_y=raster_scale_y, + ) + ) + + def delete_raster(self, feature_type_external_id: str, feature_external_id: str, raster_property_name: str) -> None: + """ + `Delete raster ` + + Args: + feature_type_external_id (str): No description. + feature_external_id (str): one feature or a list of features to create + raster_property_name (str): the raster property name + + Examples: + + Delete a raster in a feature raster property: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> feature_type = ... + >>> feature = ... + >>> raster_property_name = ... + >>> client.geospatial.delete_raster(feature_type.external_id, feature.external_id, raster_property_name) + """ + return run_sync( + self.__async_client.geospatial.delete_raster( + feature_type_external_id=feature_type_external_id, + feature_external_id=feature_external_id, + raster_property_name=raster_property_name, + ) + ) + + def get_raster( + self, + feature_type_external_id: str, + feature_external_id: str, + raster_property_name: str, + raster_format: str, + raster_options: dict[str, Any] | None = None, + raster_srid: int | None = None, + raster_scale_x: float | None = None, + raster_scale_y: float | None = None, + allow_crs_transformation: bool = False, + ) -> bytes: + """ + `Get raster ` + + Args: + feature_type_external_id (str): Feature type definition for the features to create. + feature_external_id (str): one feature or a list of features to create + raster_property_name (str): the raster property name + raster_format (str): the raster output format + raster_options (dict[str, Any] | None): GDAL raster creation key-value options + raster_srid (int | None): the SRID for the output raster + raster_scale_x (float | None): the X component of the output pixel width in units of coordinate reference system + raster_scale_y (float | None): the Y component of the output pixel height in units of coordinate reference system + allow_crs_transformation (bool): When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + + Returns: + bytes: the raster data + + Examples: + + Get a raster from a feature raster property: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> feature_type = ... + >>> feature = ... + >>> raster_property_name = ... + >>> raster_data = client.geospatial.get_raster(feature_type.external_id, feature.external_id, + ... raster_property_name, "XYZ", {"SIGNIFICANT_DIGITS": "4"}) + """ + return run_sync( + self.__async_client.geospatial.get_raster( + feature_type_external_id=feature_type_external_id, + feature_external_id=feature_external_id, + raster_property_name=raster_property_name, + raster_format=raster_format, + raster_options=raster_options, + raster_srid=raster_srid, + raster_scale_x=raster_scale_x, + raster_scale_y=raster_scale_y, + allow_crs_transformation=allow_crs_transformation, + ) + ) + + def compute(self, output: dict[str, GeospatialComputeFunction]) -> GeospatialComputedResponse: + """ + `Compute ` + + Args: + output (dict[str, GeospatialComputeFunction]): No description. + + Returns: + GeospatialComputedResponse: Mapping of keys to computed items. + + Examples: + + Compute the transformation of an ewkt geometry from one SRID to another: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.geospatial import GeospatialGeometryTransformComputeFunction, GeospatialGeometryValueComputeFunction + >>> client = CogniteClient() + >>> compute_function = GeospatialGeometryTransformComputeFunction(GeospatialGeometryValueComputeFunction("SRID=4326;POLYGON((0 0,10 0,10 10,0 10,0 0))"), srid=23031) + >>> compute_result = client.geospatial.compute(output = {"output": compute_function}) + """ + return run_sync(self.__async_client.geospatial.compute(output=output)) diff --git a/cognite/client/_sync_api/hosted_extractors/__init__.py b/cognite/client/_sync_api/hosted_extractors/__init__.py new file mode 100644 index 0000000000..62603921c0 --- /dev/null +++ b/cognite/client/_sync_api/hosted_extractors/__init__.py @@ -0,0 +1,31 @@ +""" +=============================================================================== +a13269ade1cded310610304c48e405b6 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.hosted_extractors.destinations import SyncDestinationsAPI +from cognite.client._sync_api.hosted_extractors.jobs import SyncJobsAPI +from cognite.client._sync_api.hosted_extractors.mappings import SyncMappingsAPI +from cognite.client._sync_api.hosted_extractors.sources import SyncSourcesAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncHostedExtractorsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.sources = SyncSourcesAPI(async_client) + self.destinations = SyncDestinationsAPI(async_client) + self.jobs = SyncJobsAPI(async_client) + self.mappings = SyncMappingsAPI(async_client) diff --git a/cognite/client/_sync_api/hosted_extractors/destinations.py b/cognite/client/_sync_api/hosted_extractors/destinations.py new file mode 100644 index 0000000000..a72d83bba1 --- /dev/null +++ b/cognite/client/_sync_api/hosted_extractors/destinations.py @@ -0,0 +1,219 @@ +""" +=============================================================================== +b9cc50d0274c8bd1cef9f8f73abb5509 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.hosted_extractors.destinations import ( + Destination, + DestinationList, + DestinationUpdate, + DestinationWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncDestinationsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Destination]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[Destination]: ... + + def __call__( + self, chunk_size: int | None = None, limit: int | None = None + ) -> Iterator[Destination | DestinationList]: + """ + Iterate over destinations + + Fetches Destination as they are iterated over, so you keep a limited number of destinations in memory. + + Args: + chunk_size (int | None): Number of Destinations to return in each chunk. Defaults to yielding one Destination a time. + limit (int | None): Maximum number of Destination to return. Defaults to returning all items. + + Yields: + Destination | DestinationList: yields Destination one by one if chunk_size is not specified, else DestinationList objects. + """ + yield from SyncIterator(self.__async_client.hosted_extractors.destinations(chunk_size=chunk_size, limit=limit)) + + @overload + def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Destination: ... + + @overload + def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> DestinationList: ... + + def retrieve( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Destination | DestinationList: + """ + `Retrieve one or more destinations. `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + + + Returns: + Destination | DestinationList: Requested destinations + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.destinations.retrieve('myDestination') + + Get multiple destinations by id: + + >>> res = client.hosted_extractors.destinations.retrieve(["myDestination", "myDestination2"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.hosted_extractors.destinations.retrieve( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def delete( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False + ) -> None: + """ + `Delete one or more destsinations `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + force (bool): Delete any jobs associated with each item. + + Examples: + + Delete destinations by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.hosted_extractors.destinations.delete(["myDest", "MyDest2"]) + """ + return run_sync( + self.__async_client.hosted_extractors.destinations.delete( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force + ) + ) + + @overload + def create(self, items: DestinationWrite) -> Destination: ... + + @overload + def create(self, items: Sequence[DestinationWrite]) -> DestinationList: ... + + def create(self, items: DestinationWrite | Sequence[DestinationWrite]) -> Destination | DestinationList: + """ + `Create one or more destinations. `_ + + Args: + items (DestinationWrite | Sequence[DestinationWrite]): Destination(s) to create. + + Returns: + Destination | DestinationList: Created destination(s) + + Examples: + + Create new destination: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import DestinationWrite, SessionWrite + >>> client = CogniteClient() + >>> destination = DestinationWrite(external_id='my_dest', credentials=SessionWrite("my_nonce"), target_data_set_id=123) + >>> res = client.hosted_extractors.destinations.create(destination) + """ + return run_sync(self.__async_client.hosted_extractors.destinations.create(items=items)) + + @overload + def update( + self, + items: DestinationWrite | DestinationUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Destination: ... + + @overload + def update( + self, + items: Sequence[DestinationWrite | DestinationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> DestinationList: ... + + def update( + self, + items: DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Destination | DestinationList: + """ + `Update one or more destinations. `_ + + Args: + items (DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate]): Destination(s) to update. + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Destination | DestinationList: Updated destination(s) + + Examples: + + Update destination: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import DestinationUpdate + >>> client = CogniteClient() + >>> destination = DestinationUpdate('my_dest').target_data_set_id.set(123) + >>> res = client.hosted_extractors.destinations.update(destination) + """ + return run_sync(self.__async_client.hosted_extractors.destinations.update(items=items, mode=mode)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DestinationList: + """ + `List destinations `_ + + Args: + limit (int | None): Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + DestinationList: List of requested destinations + + Examples: + + List destinations: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> destination_list = client.hosted_extractors.destinations.list(limit=5) + + Iterate over destinations, one-by-one: + + >>> for destination in client.hosted_extractors.destinations(): + ... destination # do something with the destination + + Iterate over chunks of destinations to reduce memory load: + + >>> for destination_list in client.hosted_extractors.destinations(chunk_size=25): + ... destination_list # do something with the destinationss + """ + return run_sync(self.__async_client.hosted_extractors.destinations.list(limit=limit)) diff --git a/cognite/client/_sync_api/hosted_extractors/jobs.py b/cognite/client/_sync_api/hosted_extractors/jobs.py new file mode 100644 index 0000000000..4d26031f92 --- /dev/null +++ b/cognite/client/_sync_api/hosted_extractors/jobs.py @@ -0,0 +1,282 @@ +""" +=============================================================================== +6ca49a702912a31decfb46b9c6b61e22 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.hosted_extractors.jobs import ( + Job, + JobList, + JobLogsList, + JobMetricsList, + JobUpdate, + JobWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncJobsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Job]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[JobList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Job | JobList]: + """ + Iterate over jobs + + Fetches jobs as they are iterated over, so you keep a limited number of jobs in memory. + + Args: + chunk_size (int | None): Number of jobs to return in each chunk. Defaults to yielding one job a time. + limit (int | None): Maximum number of jobs to return. Defaults to returning all items. + + Yields: + Job | JobList: yields Job one by one if chunk_size is not specified, else JobList objects. + """ + yield from SyncIterator(self.__async_client.hosted_extractors.jobs(chunk_size=chunk_size, limit=limit)) + + @overload + def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Job | None: ... + + @overload + def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> JobList: ... + + def retrieve( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Job | None | JobList: + """ + `Retrieve one or more jobs. `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the job type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + + Returns: + Job | None | JobList: Requested jobs + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.jobs.retrieve('myJob') + + Get multiple jobs by id: + + >>> res = client.hosted_extractors.jobs.retrieve(["myJob", "myOtherJob"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.hosted_extractors.jobs.retrieve( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete one or more jobs `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + Examples: + + Delete jobs by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.hosted_extractors.jobs.delete(["myMQTTJob", "MyEventHubJob"]) + """ + return run_sync( + self.__async_client.hosted_extractors.jobs.delete( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def create(self, items: JobWrite) -> Job: ... + + @overload + def create(self, items: Sequence[JobWrite]) -> JobList: ... + + def create(self, items: JobWrite | Sequence[JobWrite]) -> Job | JobList: + """ + `Create one or more jobs. `_ + + Args: + items (JobWrite | Sequence[JobWrite]): Job(s) to create. + + Returns: + Job | JobList: Created job(s) + + Examples: + + Create new job: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceWrite + >>> client = CogniteClient() + >>> job_write = EventHubSourceWrite('my_event_hub', 'http://myeventhub.com', "My EventHub", 'my_key', 'my_value') + >>> job = client.hosted_extractors.jobs.create(job_write) + """ + return run_sync(self.__async_client.hosted_extractors.jobs.create(items=items)) + + @overload + def update( + self, + items: JobWrite | JobUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Job: ... + + @overload + def update( + self, + items: Sequence[JobWrite | JobUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> JobList: ... + + def update( + self, + items: JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Job | JobList: + """ + `Update one or more jobs. `_ + + Args: + items (JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate]): Job(s) to update. + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Job | JobList: Updated job(s) + + Examples: + + Update job: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceUpdate + >>> client = CogniteClient() + >>> job = EventHubSourceUpdate('my_event_hub').event_hub_name.set("My Updated EventHub") + >>> updated_job = client.hosted_extractors.jobs.update(job) + """ + return run_sync(self.__async_client.hosted_extractors.jobs.update(items=items, mode=mode)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> JobList: + """ + `List jobs `_ + + Args: + limit (int | None): Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + JobList: List of requested jobs + + Examples: + + List jobs: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> job_list = client.hosted_extractors.jobs.list(limit=5) + + Iterate over jobs, one-by-one: + + >>> for job in client.hosted_extractors.jobs(): + ... job # do something with the job + + Iterate over chunks of jobs to reduce memory load: + + >>> for job_list in client.hosted_extractors.jobs(chunk_size=25): + ... job_list # do something with the jobs + """ + return run_sync(self.__async_client.hosted_extractors.jobs.list(limit=limit)) + + def list_logs( + self, + job: str | None = None, + source: str | None = None, + destination: str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> JobLogsList: + """ + `List job logs. `_ + + Args: + job (str | None): Require returned logs to belong to the job given by this external ID. + source (str | None): Require returned logs to belong to the any job with source given by this external ID. + destination (str | None): Require returned logs to belong to the any job with destination given by this external ID. + limit (int | None): Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + JobLogsList: List of requested job logs + + Examples: + + Reqests logs for a specific job: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.jobs.list_logs(job="myJob") + """ + return run_sync( + self.__async_client.hosted_extractors.jobs.list_logs( + job=job, source=source, destination=destination, limit=limit + ) + ) + + def list_metrics( + self, + job: str | None = None, + source: str | None = None, + destination: str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> JobMetricsList: + """ + `List job metrics. `_ + + Args: + job (str | None): Require returned metrics to belong to the job given by this external ID. + source (str | None): Require returned metrics to belong to the any job with source given by this external ID. + destination (str | None): Require returned metrics to belong to the any job with destination given by this external ID. + limit (int | None): Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + JobMetricsList: List of requested job metrics + + Examples: + + Reqests metrics for a specific job: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.jobs.list_metrics(job="myJob") + """ + return run_sync( + self.__async_client.hosted_extractors.jobs.list_metrics( + job=job, source=source, destination=destination, limit=limit + ) + ) diff --git a/cognite/client/_sync_api/hosted_extractors/mappings.py b/cognite/client/_sync_api/hosted_extractors/mappings.py new file mode 100644 index 0000000000..8b5eaef658 --- /dev/null +++ b/cognite/client/_sync_api/hosted_extractors/mappings.py @@ -0,0 +1,201 @@ +""" +=============================================================================== +095cf76b161e9f80a1d645fe4494034b +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.hosted_extractors import Mapping, MappingList, MappingUpdate, MappingWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncMappingsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Mapping]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[Mapping]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Mapping | MappingList]: + """ + Iterate over mappings + + Fetches Mapping as they are iterated over, so you keep a limited number of mappings in memory. + + Args: + chunk_size (int | None): Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time. + limit (int | None): Maximum number of mappings to return. Defaults to returning all items. + + Yields: + Mapping | MappingList: yields Mapping one by one if chunk_size is not specified, else MappingList objects. + """ + yield from SyncIterator(self.__async_client.hosted_extractors.mappings(chunk_size=chunk_size, limit=limit)) + + @overload + def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Mapping: ... + + @overload + def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> MappingList: ... + + def retrieve( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Mapping | MappingList: + """ + `Retrieve one or more mappings. `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + + + Returns: + Mapping | MappingList: Requested mappings + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.mappings.retrieve('myMapping') + + Get multiple mappings by id: + + >>> res = client.hosted_extractors.mappings.retrieve(["myMapping", "myMapping2"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.hosted_extractors.mappings.retrieve( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def delete( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False + ) -> None: + """ + `Delete one or more mappings `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found + force (bool): Delete any jobs associated with each item. + + Examples: + + Delete mappings by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.hosted_extractors.mappings.delete(["myMapping", "MyMapping2"]) + """ + return run_sync( + self.__async_client.hosted_extractors.mappings.delete( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force + ) + ) + + @overload + def create(self, items: MappingWrite) -> Mapping: ... + + @overload + def create(self, items: Sequence[MappingWrite]) -> MappingList: ... + + def create(self, items: MappingWrite | Sequence[MappingWrite]) -> Mapping | MappingList: + """ + `Create one or more mappings. `_ + + Args: + items (MappingWrite | Sequence[MappingWrite]): Mapping(s) to create. + + Returns: + Mapping | MappingList: Created mapping(s) + + Examples: + + Create new mapping: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import MappingWrite, CustomMapping + >>> client = CogniteClient() + >>> mapping = MappingWrite(external_id="my_mapping", mapping=CustomMapping("some expression"), published=True, input="json") + >>> res = client.hosted_extractors.mappings.create(mapping) + """ + return run_sync(self.__async_client.hosted_extractors.mappings.create(items=items)) + + @overload + def update(self, items: MappingWrite | MappingUpdate) -> Mapping: ... + + @overload + def update(self, items: Sequence[MappingWrite | MappingUpdate]) -> MappingList: ... + + def update( + self, items: MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate] + ) -> Mapping | MappingList: + """ + `Update one or more mappings. `_ + + Args: + items (MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]): Mapping(s) to update. + + Returns: + Mapping | MappingList: Updated mapping(s) + + Examples: + + Update mapping: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import MappingUpdate + >>> client = CogniteClient() + >>> mapping = MappingUpdate('my_mapping').published.set(False) + >>> res = client.hosted_extractors.mappings.update(mapping) + """ + return run_sync(self.__async_client.hosted_extractors.mappings.update(items=items)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> MappingList: + """ + `List mappings `_ + + Args: + limit (int | None): Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + MappingList: List of requested mappings + + Examples: + + List mappings: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> mapping_list = client.hosted_extractors.mappings.list(limit=5) + + Iterate over mappings, one-by-one: + + >>> for mapping in client.hosted_extractors.mappings(): + ... mapping # do something with the mapping + + Iterate over chunks of mappings to reduce memory load: + + >>> for mapping_list in client.hosted_extractors.mappings(chunk_size=25): + ... mapping_list # do something with the mappings + """ + return run_sync(self.__async_client.hosted_extractors.mappings.list(limit=limit)) diff --git a/cognite/client/_sync_api/hosted_extractors/sources.py b/cognite/client/_sync_api/hosted_extractors/sources.py new file mode 100644 index 0000000000..23f9a720d3 --- /dev/null +++ b/cognite/client/_sync_api/hosted_extractors/sources.py @@ -0,0 +1,210 @@ +""" +=============================================================================== +05d0853992db787473021dbdd653b725 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.hosted_extractors.sources import Source, SourceList, SourceUpdate, SourceWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSourcesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Source]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[SourceList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Source | SourceList]: + """ + Iterate over sources + + Fetches sources as they are iterated over, so you keep a limited number of sources in memory. + + Args: + chunk_size (int | None): Number of sources to return in each chunk. Defaults to yielding one source a time. + limit (int | None): Maximum number of sources to return. Defaults to returning all items. + + Yields: + Source | SourceList: yields Source one by one if chunk_size is not specified, else SourceList objects. + """ + yield from SyncIterator(self.__async_client.hosted_extractors.sources(chunk_size=chunk_size, limit=limit)) + + @overload + def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Source: ... + + @overload + def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> SourceList: ... + + def retrieve( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Source | SourceList: + """ + `Retrieve one or more sources. `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + + Returns: + Source | SourceList: Requested sources + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.hosted_extractors.sources.retrieve('myMQTTSource') + + Get multiple sources by id: + + >>> res = client.hosted_extractors.sources.retrieve(["myMQTTSource", "MyEventHubSource"], ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.hosted_extractors.sources.retrieve( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def delete( + self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False + ) -> None: + """ + `Delete one or more sources `_ + + Args: + external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + force (bool): Delete any jobs associated with each item. + Examples: + + Delete sources by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.hosted_extractors.sources.delete(["myMQTTSource", "MyEventHubSource"]) + """ + return run_sync( + self.__async_client.hosted_extractors.sources.delete( + external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force + ) + ) + + @overload + def create(self, items: SourceWrite) -> Source: ... + + @overload + def create(self, items: Sequence[SourceWrite]) -> SourceList: ... + + def create(self, items: SourceWrite | Sequence[SourceWrite]) -> Source | SourceList: + """ + `Create one or more sources. `_ + + Args: + items (SourceWrite | Sequence[SourceWrite]): Source(s) to create. + + Returns: + Source | SourceList: Created source(s) + + Examples: + + Create new source: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceWrite + >>> client = CogniteClient() + >>> source = EventHubSourceWrite('my_event_hub', 'http://myeventhub.com', "My EventHub", 'my_key', 'my_value') + >>> res = client.hosted_extractors.sources.create(source) + """ + return run_sync(self.__async_client.hosted_extractors.sources.create(items=items)) + + @overload + def update( + self, + items: SourceWrite | SourceUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Source: ... + + @overload + def update( + self, + items: Sequence[SourceWrite | SourceUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> SourceList: ... + + def update( + self, + items: SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Source | SourceList: + """ + `Update one or more sources. `_ + + Args: + items (SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate]): Source(s) to update. + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Source | SourceList: Updated source(s) + + Examples: + + Update source: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceUpdate + >>> client = CogniteClient() + >>> source = EventHubSourceUpdate('my_event_hub').event_hub_name.set("My Updated EventHub") + >>> res = client.hosted_extractors.sources.update(source) + """ + return run_sync(self.__async_client.hosted_extractors.sources.update(items=items, mode=mode)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SourceList: + """ + `List sources `_ + + Args: + limit (int | None): Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + SourceList: List of requested sources + + Examples: + + List sources: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> source_list = client.hosted_extractors.sources.list(limit=5) + + Iterate over sources, one-by-one: + + >>> for source in client.hosted_extractors.sources(): + ... source # do something with the source + + Iterate over chunks of sources to reduce memory load: + + >>> for source_list in client.hosted_extractors.sources(chunk_size=25): + ... source_list # do something with the sources + """ + return run_sync(self.__async_client.hosted_extractors.sources.list(limit=limit)) diff --git a/cognite/client/_sync_api/iam/__init__.py b/cognite/client/_sync_api/iam/__init__.py new file mode 100644 index 0000000000..8bfb27d66d --- /dev/null +++ b/cognite/client/_sync_api/iam/__init__.py @@ -0,0 +1,85 @@ +""" +=============================================================================== +4757d559b846e5066a596fd5502a1c79 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._api.iam import ComparableCapability +from cognite.client._sync_api.iam.groups import SyncGroupsAPI +from cognite.client._sync_api.iam.security_categories import SyncSecurityCategoriesAPI +from cognite.client._sync_api.iam.sessions import SyncSessionsAPI +from cognite.client._sync_api.iam.token import SyncTokenAPI +from cognite.client._sync_api.org_apis.principals import SyncPrincipalsAPI +from cognite.client._sync_api.user_profiles import SyncUserProfilesAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.capabilities import ( + Capability, +) +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncIAMAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.groups = SyncGroupsAPI(async_client) + self.security_categories = SyncSecurityCategoriesAPI(async_client) + self.sessions = SyncSessionsAPI(async_client) + self.user_profiles = SyncUserProfilesAPI(async_client) + self.principals = SyncPrincipalsAPI(async_client) + self.token = SyncTokenAPI(async_client) + + def verify_capabilities(self, desired_capabilities: ComparableCapability) -> list[Capability]: + """ + Helper method to compare your current capabilities with a set of desired capabilities and return any missing. + + Args: + desired_capabilities (ComparableCapability): List of desired capabilities to check against existing. + + Returns: + list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + + Examples: + + Ensure that the user's credentials have access to read- and write assets in all scope, + and write events scoped to a specific dataset with id=123: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl + >>> client = CogniteClient() + >>> to_check = [ + ... AssetsAcl( + ... actions=[AssetsAcl.Action.Read, AssetsAcl.Action.Write], + ... scope=AssetsAcl.Scope.All()), + ... EventsAcl( + ... actions=[EventsAcl.Action.Write], + ... scope=EventsAcl.Scope.DataSet([123]), + ... )] + >>> if missing := client.iam.verify_capabilities(to_check): + ... pass # do something + + Capabilities can also be passed as dictionaries: + + >>> to_check = [ + ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}}, + ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}}, + ... ] + >>> missing = client.iam.verify_capabilities(to_check) + + You may also load capabilities from a dict-representation directly into ACLs (access-control list) + by using ``Capability.load``. This will also ensure that the capabilities are valid. + + >>> from cognite.client.data_classes.capabilities import Capability + >>> acls = [Capability.load(cap) for cap in to_check] + """ + return run_sync(self.__async_client.iam.verify_capabilities(desired_capabilities=desired_capabilities)) diff --git a/cognite/client/_sync_api/iam/groups.py b/cognite/client/_sync_api/iam/groups.py new file mode 100644 index 0000000000..d198967141 --- /dev/null +++ b/cognite/client/_sync_api/iam/groups.py @@ -0,0 +1,140 @@ +""" +=============================================================================== +f25241082c8f9272e0bb26f7b5814867 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import Group, GroupList +from cognite.client.data_classes.iam import GroupWrite +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncGroupsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list(self, all: bool = False) -> GroupList: + """ + `List groups. `_ + + Args: + all (bool): Whether to get all groups, only available with the groups:list acl. + + Returns: + GroupList: List of groups. + + Example: + + List your own groups: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> my_groups = client.iam.groups.list() + + List all groups: + + >>> all_groups = client.iam.groups.list(all=True) + """ + return run_sync(self.__async_client.iam.groups.list(all=all)) + + @overload + def create(self, group: Group | GroupWrite) -> Group: ... + + @overload + def create(self, group: Sequence[Group] | Sequence[GroupWrite]) -> GroupList: ... + + def create(self, group: Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]) -> Group | GroupList: + """ + `Create one or more groups. `_ + + Args: + group (Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]): Group or list of groups to create. + Returns: + Group | GroupList: The created group(s). + + Example: + + Create a group without any members: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import GroupWrite + >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl + >>> client = CogniteClient() + >>> my_capabilities = [ + ... AssetsAcl([AssetsAcl.Action.Read], AssetsAcl.Scope.All()), + ... EventsAcl([EventsAcl.Action.Write], EventsAcl.Scope.DataSet([123, 456]))] + >>> my_group = GroupWrite(name="My Group", capabilities=my_capabilities) + >>> res = client.iam.groups.create(my_group) + + Create a group whose members are managed externally (by your company's identity provider (IdP)). + This is done by using the ``source_id`` field. If this is the same ID as a group in the IdP, + a user in that group will implicitly be a part of this group as well. + + >>> grp = GroupWrite( + ... name="Externally managed group", + ... capabilities=my_capabilities, + ... source_id="b7c9a5a4...") + >>> res = client.iam.groups.create(grp) + + Create a group whose members are managed internally by Cognite. This group may grant access through + listing specific users or include them all. This is done by passing the ``members`` field, either a + list of strings with the unique user identifiers or as the constant ``ALL_USER_ACCOUNTS``. To find the + user identifiers, you may use the UserProfilesAPI: ``client.iam.user_profiles.list()``. + + >>> from cognite.client.data_classes import ALL_USER_ACCOUNTS + >>> all_group = GroupWrite( + ... name="Everyone is welcome!", + ... capabilities=my_capabilities, + ... members=ALL_USER_ACCOUNTS, + ... ) + >>> user_list_group = GroupWrite( + ... name="Specfic users only", + ... capabilities=my_capabilities, + ... members=["XRsSD1k3mTIKG", "M0SxY6bM9Jl"]) + >>> res = client.iam.groups.create([user_list_group, all_group]) + + Capabilities are often defined in configuration files, like YAML or JSON. You may convert capabilities + from a dict-representation directly into ACLs (access-control list) by using ``Capability.load``. + This will also ensure that the capabilities are valid. + + >>> from cognite.client.data_classes.capabilities import Capability + >>> unparsed_capabilities = [ + ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}}, + ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}}, + ... ] + >>> acls = [Capability.load(cap) for cap in unparsed_capabilities] + >>> group = GroupWrite(name="Another group", capabilities=acls) + """ + return run_sync(self.__async_client.iam.groups.create(group=group)) + + def delete(self, id: int | Sequence[int]) -> None: + """ + `Delete one or more groups. `_ + + Args: + id (int | Sequence[int]): ID or list of IDs of groups to delete. + + Example: + + Delete group:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.iam.groups.delete(1) + """ + return run_sync(self.__async_client.iam.groups.delete(id=id)) diff --git a/cognite/client/_sync_api/iam/security_categories.py b/cognite/client/_sync_api/iam/security_categories.py new file mode 100644 index 0000000000..5924edd3a8 --- /dev/null +++ b/cognite/client/_sync_api/iam/security_categories.py @@ -0,0 +1,100 @@ +""" +=============================================================================== +7f0398db4522da948046b7d45c0017ad +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import SecurityCategory, SecurityCategoryList +from cognite.client.data_classes.iam import SecurityCategoryWrite +from cognite.client.utils._async_helpers import run_sync + + +class SyncSecurityCategoriesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SecurityCategoryList: + """ + `List security categories. `_ + + Args: + limit (int | None): Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + SecurityCategoryList: List of security categories + + Example: + + List security categories:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.security_categories.list() + """ + return run_sync(self.__async_client.iam.security_categories.list(limit=limit)) + + @overload + def create(self, security_category: SecurityCategory | SecurityCategoryWrite) -> SecurityCategory: ... + + @overload + def create( + self, security_category: Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite] + ) -> SecurityCategoryList: ... + + def create( + self, + security_category: SecurityCategory + | SecurityCategoryWrite + | Sequence[SecurityCategory] + | Sequence[SecurityCategoryWrite], + ) -> SecurityCategory | SecurityCategoryList: + """ + `Create one or more security categories. `_ + + Args: + security_category (SecurityCategory | SecurityCategoryWrite | Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]): Security category or list of categories to create. + + Returns: + SecurityCategory | SecurityCategoryList: The created security category or categories. + + Example: + + Create security category:: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import SecurityCategoryWrite + >>> client = CogniteClient() + >>> my_category = SecurityCategoryWrite(name="My Category") + >>> res = client.iam.security_categories.create(my_category) + """ + return run_sync(self.__async_client.iam.security_categories.create(security_category=security_category)) + + def delete(self, id: int | Sequence[int]) -> None: + """ + `Delete one or more security categories. `_ + + Args: + id (int | Sequence[int]): ID or list of IDs of security categories to delete. + + Example: + + Delete security category:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.iam.security_categories.delete(1) + """ + return run_sync(self.__async_client.iam.security_categories.delete(id=id)) diff --git a/cognite/client/_sync_api/iam/sessions.py b/cognite/client/_sync_api/iam/sessions.py new file mode 100644 index 0000000000..e0d609cf86 --- /dev/null +++ b/cognite/client/_sync_api/iam/sessions.py @@ -0,0 +1,109 @@ +""" +=============================================================================== +edd85437841df66e4e84593b2edf78d2 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ClientCredentials, CreatedSession, Session, SessionList +from cognite.client.data_classes.iam import SessionStatus, SessionType +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSessionsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def create( + self, + client_credentials: ClientCredentials | None = None, + session_type: SessionType | Literal["DEFAULT"] = "DEFAULT", + ) -> CreatedSession: + """ + `Create a session. `_ + + Args: + client_credentials (ClientCredentials | None): The client credentials to create the session. This is required + if session_type is set to 'CLIENT_CREDENTIALS'. + session_type (SessionType | Literal['DEFAULT']): The type of session to create. Can be + either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. + Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. + If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if + this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used. + + Session Types: + + * **client_credentials**: Credentials for a session using client credentials from an identity provider. + * **token_exchange**: Credentials for a session using token exchange to reuse the user's credentials. + * **one_shot_token_exchange**: Credentials for a session using one-shot token exchange to reuse the user's credentials. One-shot sessions are short-lived sessions that are not refreshed and do not require support for token exchange from the identity provider. + + Returns: + CreatedSession: The object with token inspection details. + """ + return run_sync( + self.__async_client.iam.sessions.create(client_credentials=client_credentials, session_type=session_type) + ) + + @overload + def revoke(self, id: int) -> Session: ... + + @overload + def revoke(self, id: Sequence[int]) -> SessionList: ... + + def revoke(self, id: int | Sequence[int]) -> Session | SessionList: + """ + `Revoke access to a session. Revocation of a session may in some cases take up to 1 hour to take effect. `_ + + Args: + id (int | Sequence[int]): Id or list of session ids + + Returns: + Session | SessionList: List of revoked sessions. If the user does not have the sessionsAcl:LIST capability, then only the session IDs will be present in the response. + """ + return run_sync(self.__async_client.iam.sessions.revoke(id=id)) + + @overload + def retrieve(self, id: int) -> Session: ... + + @overload + def retrieve(self, id: Sequence[int]) -> SessionList: ... + + def retrieve(self, id: int | Sequence[int]) -> Session | SessionList: + """ + `Retrieves sessions with given IDs. `_ + + The request will fail if any of the IDs does not belong to an existing session. + + Args: + id (int | Sequence[int]): Id or list of session ids + + Returns: + Session | SessionList: Session or list of sessions. + """ + return run_sync(self.__async_client.iam.sessions.retrieve(id=id)) + + def list(self, status: SessionStatus | None = None, limit: int = DEFAULT_LIMIT_READ) -> SessionList: + """ + `List all sessions in the current project. `_ + + Args: + status (SessionStatus | None): If given, only sessions with the given status are returned. + limit (int): Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + SessionList: a list of sessions in the current project. + """ + return run_sync(self.__async_client.iam.sessions.list(status=status, limit=limit)) diff --git a/cognite/client/_sync_api/iam/token.py b/cognite/client/_sync_api/iam/token.py new file mode 100644 index 0000000000..6be8edab7e --- /dev/null +++ b/cognite/client/_sync_api/iam/token.py @@ -0,0 +1,40 @@ +""" +=============================================================================== +5268479111509d912fc224eb231afa08 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.iam import TokenInspection +from cognite.client.utils._async_helpers import run_sync + + +class SyncTokenAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def inspect(self) -> TokenInspection: + """ + Inspect a token. + + Get details about which projects it belongs to and which capabilities are granted to it. + + Returns: + TokenInspection: The object with token inspection details. + + Example: + + Inspect token:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.token.inspect() + """ + return run_sync(self.__async_client.iam.token.inspect()) diff --git a/cognite/client/_sync_api/labels.py b/cognite/client/_sync_api/labels.py new file mode 100644 index 0000000000..1a846d1ce6 --- /dev/null +++ b/cognite/client/_sync_api/labels.py @@ -0,0 +1,205 @@ +""" +=============================================================================== +1f594af9469b8c7776ec026b65f20394 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + LabelDefinition, + LabelDefinitionList, + LabelDefinitionWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncLabelsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[LabelDefinition]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[LabelDefinitionList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + external_id_prefix: str | None = None, + limit: int | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + ) -> Iterator[LabelDefinition | LabelDefinitionList]: + """ + Iterate over Labels + + Args: + chunk_size (int | None): Number of Labels to return in each chunk. Defaults to yielding one Label a time. + name (str | None): returns the label definitions matching that name + external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified + limit (int | None): Maximum number of label definitions to return. Defaults return all labels. + data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. + + Yields: + LabelDefinition | LabelDefinitionList: yields Labels one by one or in chunks. + """ + yield from SyncIterator( + self.__async_client.labels( + chunk_size=chunk_size, + name=name, + external_id_prefix=external_id_prefix, + limit=limit, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + ) + ) + + @overload + def retrieve(self, external_id: str, ignore_unknown_ids: Literal[True]) -> LabelDefinition | None: ... + + @overload + def retrieve(self, external_id: str, ignore_unknown_ids: Literal[False] = False) -> LabelDefinition: ... + + @overload + def retrieve(self, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> LabelDefinitionList: ... + + def retrieve( + self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> LabelDefinition | LabelDefinitionList | None: + """ + `Retrieve one or more label definitions by external id. `_ + + Args: + external_id (str | SequenceNotStr[str]): External ID or list of external ids + ignore_unknown_ids (bool): If True, ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + LabelDefinition | LabelDefinitionList | None: The requested label definition(s) + + Examples: + + Get label by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.labels.retrieve(external_id="my_label", ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.labels.retrieve(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + def list( + self, + name: str | None = None, + external_id_prefix: str | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> LabelDefinitionList: + """ + `List Labels `_ + + Args: + name (str | None): returns the label definitions matching that name + external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified + data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. + limit (int | None): Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + LabelDefinitionList: List of requested Labels + + Examples: + + List Labels and filter on name: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> label_list = client.labels.list(limit=5, name="Pump") + + Iterate over label definitions, one-by-one: + + >>> for label in client.labels(): + ... label # do something with the label definition + + Iterate over chunks of label definitions to reduce memory load: + + >>> for label_list in client.labels(chunk_size=2500): + ... label_list # do something with the type definitions + """ + return run_sync( + self.__async_client.labels.list( + name=name, + external_id_prefix=external_id_prefix, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + limit=limit, + ) + ) + + @overload + def create(self, label: LabelDefinition | LabelDefinitionWrite) -> LabelDefinition: ... + + @overload + def create(self, label: Sequence[LabelDefinition | LabelDefinitionWrite]) -> LabelDefinitionList: ... + + def create( + self, label: LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite] + ) -> LabelDefinition | LabelDefinitionList: + """ + `Create one or more label definitions. `_ + + Args: + label (LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]): The label definition(s) to create. + + Returns: + LabelDefinition | LabelDefinitionList: Created label definition(s) + + Raises: + TypeError: Function input 'label' is of the wrong type + + Examples: + + Create new label definitions: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import LabelDefinitionWrite + >>> client = CogniteClient() + >>> labels = [LabelDefinitionWrite(external_id="ROTATING_EQUIPMENT", name="Rotating equipment"), LabelDefinitionWrite(external_id="PUMP", name="pump")] + >>> res = client.labels.create(labels) + """ + return run_sync(self.__async_client.labels.create(label=label)) + + def delete(self, external_id: str | SequenceNotStr[str] | None = None) -> None: + """ + `Delete one or more label definitions `_ + + Args: + external_id (str | SequenceNotStr[str] | None): One or more label external ids + + Examples: + + Delete label definitions by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.labels.delete(external_id=["big_pump", "small_pump"]) + """ + return run_sync(self.__async_client.labels.delete(external_id=external_id)) diff --git a/cognite/client/_sync_api/org_apis/principals.py b/cognite/client/_sync_api/org_apis/principals.py new file mode 100644 index 0000000000..53b3791335 --- /dev/null +++ b/cognite/client/_sync_api/org_apis/principals.py @@ -0,0 +1,116 @@ +""" +=============================================================================== +c58c6fb6020424956202225e2d5994fc +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.principals import Principal, PrincipalList +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncPrincipalsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def me(self) -> Principal: + """ + `Get the current caller's information. `_ + + Returns: + Principal: The principal of the user running the code, i.e. the + principal *this* AsyncCogniteClient was instantiated with. + + Examples: + Get your own principal: + >>> from cognite.client import CogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.principals.me() + """ + return run_sync(self.__async_client.iam.principals.me()) + + @overload + def retrieve(self, id: str) -> Principal | None: ... + + @overload + def retrieve(self, *, external_id: str) -> Principal | None: ... + + @overload + def retrieve(self, id: SequenceNotStr[str], *, ignore_unknown_ids: bool = False) -> PrincipalList: ... + + @overload + def retrieve(self, *, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> PrincipalList: ... + + @overload + def retrieve(self, id: None = None, *, ignore_unknown_ids: bool = False) -> PrincipalList: ... + + @overload + def retrieve(self, *, external_id: None = None, ignore_unknown_ids: bool = False) -> PrincipalList: ... + + def retrieve( + self, + id: str | Sequence[str] | None = None, + external_id: str | Sequence[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> Principal | PrincipalList | None: + """ + `Retrieve principal by reference in the organization `_ + + Args: + id (str | Sequence[str] | None): The ID(s) of the principal(s) to retrieve. + external_id (str | Sequence[str] | None): The external ID(s) of the principal to retrieve. + ignore_unknown_ids (bool): This is only relevant when retrieving multiple principals. If set to True, + the method will return the principals that were found and ignore the ones that were not found. + If set to False, the method will raise a CogniteAPIError if any of the + specified principals were not found. Defaults to False. + + Returns: + Principal | PrincipalList | None: The principal(s) with the specified ID(s) or external ID(s). + + Examples: + Retrieve a principal by ID: + >>> from cognite.client import CogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.principals.retrieve(id="20u3of8-1234-5678-90ab-cdef12345678") + + Retrieve a principal by external ID: + >>> res = client.iam.principals.retrieve(external_id="my_external_id") + """ + return run_sync( + self.__async_client.iam.principals.retrieve( + id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list(self, types: str | Sequence[str] | None = None, limit: int = DEFAULT_LIMIT_READ) -> PrincipalList: + """ + `List principals in the organization `_ + + Args: + types (str | Sequence[str] | None): Filter by principal type(s). Defaults to None, which means no filtering. + limit (int): The maximum number of principals to return. Defaults to 25. + + Returns: + PrincipalList: The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with. + + Examples: + List principals in the organization: + >>> from cognite.client import CogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.principals.list(types="USER", limit=10) + """ + return run_sync(self.__async_client.iam.principals.list(types=types, limit=limit)) diff --git a/cognite/client/_sync_api/postgres_gateway/__init__.py b/cognite/client/_sync_api/postgres_gateway/__init__.py new file mode 100644 index 0000000000..df15957f5b --- /dev/null +++ b/cognite/client/_sync_api/postgres_gateway/__init__.py @@ -0,0 +1,27 @@ +""" +=============================================================================== +506bda1e5a8fa5d128a4da3ae05bb18b +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.postgres_gateway.tables import SyncTablesAPI +from cognite.client._sync_api.postgres_gateway.users import SyncUsersAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncPostgresGatewaysAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.users = SyncUsersAPI(async_client) + self.tables = SyncTablesAPI(async_client) diff --git a/cognite/client/_sync_api/postgres_gateway/tables.py b/cognite/client/_sync_api/postgres_gateway/tables.py new file mode 100644 index 0000000000..a1a02d3954 --- /dev/null +++ b/cognite/client/_sync_api/postgres_gateway/tables.py @@ -0,0 +1,193 @@ +""" +=============================================================================== +cee739aab6c29319b2333faea5e5a88d +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +import cognite.client.data_classes.postgres_gateway.tables as pg +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncTablesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[pg.Table]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[pg.TableList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[pg.Table | pg.TableList]: + """ + Iterate over custom tables + + Fetches custom tables as they are iterated over, so you keep a limited number of custom tables in memory. + + Args: + chunk_size (int | None): Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time. + limit (int | None): Maximum number of custom tables to return. Defaults to return all. + + Yields: + pg.Table | pg.TableList: yields Table one by one if chunk_size is not specified, else TableList objects. + """ + yield from SyncIterator(self.__async_client.postgres_gateway.tables(chunk_size=chunk_size, limit=limit)) + + @overload + def create(self, username: str, items: pg.TableWrite) -> pg.Table: ... + + @overload + def create(self, username: str, items: Sequence[pg.TableWrite]) -> pg.TableList: ... + + def create(self, username: str, items: pg.TableWrite | Sequence[pg.TableWrite]) -> pg.Table | pg.TableList: + """ + `Create tables `_ + + Args: + username (str): The name of the username (a.k.a. database) to be managed from the API + items (pg.TableWrite | Sequence[pg.TableWrite]): The table(s) to create + + Returns: + pg.Table | pg.TableList: Created tables + + Examples: + + Create custom table: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> from cognite.client.data_classes.postgres_gateway import ViewTableWrite + >>> client = CogniteClient() + >>> table = ViewTableWrite(tablename="myCustom", options=ViewId(space="mySpace", external_id="myExternalId", version="v1")) + >>> res = client.postgres_gateway.tables.create("myUserName",table) + """ + return run_sync(self.__async_client.postgres_gateway.tables.create(username=username, items=items)) + + @overload + def retrieve(self, username: str, tablename: str, ignore_unknown_ids: Literal[False] = False) -> pg.Table: ... + + @overload + def retrieve(self, username: str, tablename: str, ignore_unknown_ids: Literal[True]) -> pg.Table | None: ... + + @overload + def retrieve( + self, username: str, tablename: SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> pg.TableList: ... + + def retrieve( + self, username: str, tablename: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> pg.Table | pg.TableList | None: + """ + `Retrieve a list of tables by their tables names `_ + + Retrieve a list of Postgres tables for a user by their table names, optionally ignoring unknown table names + + Args: + username (str): The username (a.k.a. database) to be managed from the API + tablename (str | SequenceNotStr[str]): The name of the table(s) to be retrieved + ignore_unknown_ids (bool): Ignore table names not found + + Returns: + pg.Table | pg.TableList | None: Foreign tables + + Examples: + + Retrieve custom table: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.postgres_gateway.tables.retrieve("myUserName", 'myCustom') + + Get multiple custom tables by id: + + >>> res = client.postgres_gateway.tables.retrieve("myUserName", ["myCustom", "myCustom2"]) + """ + return run_sync( + self.__async_client.postgres_gateway.tables.retrieve( + username=username, tablename=tablename, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def delete(self, username: str, tablename: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete postgres table(s) `_ + + Args: + username (str): The name of the username (a.k.a. database) to be managed from the API + tablename (str | SequenceNotStr[str]): The name of the table(s) to be deleted + ignore_unknown_ids (bool): Ignore table names that are not found + + Examples: + + Delete custom table: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.postgres_gateway.tables.delete("myUserName", ["myCustom", "myCustom2"]) + """ + return run_sync( + self.__async_client.postgres_gateway.tables.delete( + username=username, tablename=tablename, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list( + self, + username: str, + include_built_ins: Literal["yes", "no"] | None = "no", + limit: int | None = DEFAULT_LIMIT_READ, + ) -> pg.TableList: + """ + `List postgres tables `_ + + List all tables in a given project. + + Args: + username (str): The name of the username (a.k.a. database) to be managed from the API + include_built_ins (Literal['yes', 'no'] | None): Determines if API should return built-in tables or not + limit (int | None): Limits the number of results to be returned. + + Returns: + pg.TableList: Foreign tables + + Examples: + + List tables: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> custom_table_list = client.postgres_gateway.tables.list("myUserName", limit=5) + + Iterate over tables, one-by-one: + + >>> for table in client.postgres_gateway.tables(): + ... table # do something with the custom table + + Iterate over chunks of tables to reduce memory load: + + >>> for table_list in client.postgres_gateway.tables(chunk_size=25): + ... table_list # do something with the custom tables + """ + return run_sync( + self.__async_client.postgres_gateway.tables.list( + username=username, include_built_ins=include_built_ins, limit=limit + ) + ) diff --git a/cognite/client/_sync_api/postgres_gateway/users.py b/cognite/client/_sync_api/postgres_gateway/users.py new file mode 100644 index 0000000000..abe567627f --- /dev/null +++ b/cognite/client/_sync_api/postgres_gateway/users.py @@ -0,0 +1,219 @@ +""" +=============================================================================== +a98416ce52e9dbf7e93c562767fc169e +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.postgres_gateway.users import ( + User, + UserCreated, + UserCreatedList, + UserList, + UserUpdate, + UserWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncUsersAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[User]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[UserList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[User | UserList]: + """ + Iterate over users + + Fetches user as they are iterated over, so you keep a limited number of users in memory. + + Args: + chunk_size (int | None): Number of users to return in each chunk. Defaults to yielding one user at a time. + limit (int | None): Maximum number of users to return. Defaults to return all. + + Yields: + User | UserList: yields User one by one if chunk_size is not specified, else UserList objects. + """ + yield from SyncIterator(self.__async_client.postgres_gateway.users(chunk_size=chunk_size, limit=limit)) + + @overload + def create(self, user: UserWrite) -> UserCreated: ... + + @overload + def create(self, user: Sequence[UserWrite]) -> UserCreatedList: ... + + def create(self, user: UserWrite | Sequence[UserWrite]) -> UserCreated | UserCreatedList: + """ + `Create Users `_ + + Create postgres users. + + Args: + user (UserWrite | Sequence[UserWrite]): The user(s) to create. + + Returns: + UserCreated | UserCreatedList: The created user(s) + + Examples: + + Create user: + + >>> import os + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.postgres_gateway import UserWrite, SessionCredentials + >>> from cognite.client.data_classes import ClientCredentials + >>> client = CogniteClient() + >>> session = client.iam.sessions.create( + ... ClientCredentials(os.environ["IDP_CLIENT_ID"], os.environ["IDP_CLIENT_SECRET"]), + ... session_type="CLIENT_CREDENTIALS" + ... ) + >>> user = UserWrite(credentials=SessionCredentials(nonce=session.nonce)) + >>> res = client.postgres_gateway.users.create(user) + """ + return run_sync(self.__async_client.postgres_gateway.users.create(user=user)) + + @overload + def update(self, items: UserUpdate | UserWrite) -> User: ... + + @overload + def update(self, items: Sequence[UserUpdate | UserWrite]) -> UserList: ... + + def update(self, items: UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]) -> User | UserList: + """ + `Update users `_ + + Update postgres users + + Args: + items (UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]): The user(s) to update. + + Returns: + User | UserList: The updated user(s) + + Examples: + + Update user: + + >>> import os + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.postgres_gateway import UserUpdate, SessionCredentials + >>> from cognite.client.data_classes import ClientCredentials + >>> client = CogniteClient() + >>> session = client.iam.sessions.create( + ... ClientCredentials(os.environ["IDP_CLIENT_ID"], os.environ["IDP_CLIENT_SECRET"]), + ... session_type="CLIENT_CREDENTIALS" + ... ) + >>> update = UserUpdate('myUser').credentials.set(SessionCredentials(nonce=session.nonce)) + >>> res = client.postgres_gateway.users.update(update) + """ + return run_sync(self.__async_client.postgres_gateway.users.update(items=items)) + + def delete(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete postgres user(s) `_ + + Delete postgres users + + Args: + username (str | SequenceNotStr[str]): Usernames of the users to delete. + ignore_unknown_ids (bool): Ignore usernames that are not found + + + Examples: + + Delete users: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.postgres_gateway.users.delete(["myUser", "myUser2"]) + """ + return run_sync( + self.__async_client.postgres_gateway.users.delete(username=username, ignore_unknown_ids=ignore_unknown_ids) + ) + + @overload + def retrieve(self, username: str, ignore_unknown_ids: bool = False) -> User: ... + + @overload + def retrieve(self, username: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> UserList: ... + + def retrieve(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> User | UserList: + """ + `Retrieve a list of users by their usernames `_ + + Retrieve a list of postgres users by their usernames, optionally ignoring unknown usernames + + Args: + username (str | SequenceNotStr[str]): Usernames of the users to retrieve. + ignore_unknown_ids (bool): Ignore usernames that are not found + + Returns: + User | UserList: The retrieved user(s). + + Examples: + + Retrieve user: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.postgres_gateway.users.retrieve("myUser", ignore_unknown_ids=True) + """ + return run_sync( + self.__async_client.postgres_gateway.users.retrieve( + username=username, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list(self, limit: int = DEFAULT_LIMIT_READ) -> UserList: + """ + `Fetch scoped users `_ + + List all users in a given project. + + Args: + limit (int): Limits the number of results to be returned. + + Returns: + UserList: A list of users + + Examples: + + List users: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> user_list = client.postgres_gateway.users.list(limit=5) + + Iterate over users, one-by-one: + + >>> for user in client.postgres_gateway.users(): + ... user # do something with the user + + Iterate over chunks of users to reduce memory load: + + >>> for user_list in client.postgres_gateway.users(chunk_size=25): + ... user_list # do something with the users + """ + return run_sync(self.__async_client.postgres_gateway.users.list(limit=limit)) diff --git a/cognite/client/_sync_api/raw/__init__.py b/cognite/client/_sync_api/raw/__init__.py new file mode 100644 index 0000000000..4a7de2f137 --- /dev/null +++ b/cognite/client/_sync_api/raw/__init__.py @@ -0,0 +1,29 @@ +""" +=============================================================================== +c4064290ea82271873bbc2c0a330fc30 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.raw.databases import SyncRawDatabasesAPI +from cognite.client._sync_api.raw.rows import SyncRawRowsAPI +from cognite.client._sync_api.raw.tables import SyncRawTablesAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncRawAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.databases = SyncRawDatabasesAPI(async_client) + self.tables = SyncRawTablesAPI(async_client) + self.rows = SyncRawRowsAPI(async_client) diff --git a/cognite/client/_sync_api/raw/databases.py b/cognite/client/_sync_api/raw/databases.py new file mode 100644 index 0000000000..43fe28bd08 --- /dev/null +++ b/cognite/client/_sync_api/raw/databases.py @@ -0,0 +1,123 @@ +""" +=============================================================================== +06bccd4498118fae6c78fda684de0367 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.raw import Database, DatabaseList +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncRawDatabasesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Database]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[DatabaseList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Database | DatabaseList]: + """ + Iterate over databases + + Fetches dbs as they are iterated over, so you keep a limited number of dbs in memory. + + Args: + chunk_size (int | None): Number of dbs to return in each chunk. Defaults to yielding one db a time. + limit (int | None): Maximum number of dbs to return. Defaults to return all items. + + Yields: + Database | DatabaseList: No description. + """ + yield from SyncIterator(self.__async_client.raw.databases(chunk_size=chunk_size, limit=limit)) + + @overload + def create(self, name: str) -> Database: ... + + @overload + def create(self, name: list[str]) -> DatabaseList: ... + + def create(self, name: str | list[str]) -> Database | DatabaseList: + """ + `Create one or more databases. `_ + + Args: + name (str | list[str]): A db name or list of db names to create. + + Returns: + Database | DatabaseList: Database or list of databases that has been created. + + Examples: + + Create a new database: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.raw.databases.create("db1") + """ + return run_sync(self.__async_client.raw.databases.create(name=name)) + + def delete(self, name: str | SequenceNotStr[str], recursive: bool = False) -> None: + """ + `Delete one or more databases. `_ + + Args: + name (str | SequenceNotStr[str]): A db name or list of db names to delete. + recursive (bool): Recursively delete all tables in the database(s). + + Examples: + + Delete a list of databases: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.raw.databases.delete(["db1", "db2"]) + """ + return run_sync(self.__async_client.raw.databases.delete(name=name, recursive=recursive)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatabaseList: + """ + `List databases `_ + + Args: + limit (int | None): Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + DatabaseList: List of requested databases. + + Examples: + + List the first 5 databases: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> db_list = client.raw.databases.list(limit=5) + + Iterate over databases, one-by-one: + + >>> for db in client.raw.databases(): + ... db # do something with the db + + Iterate over chunks of databases to reduce memory load: + + >>> for db_list in client.raw.databases(chunk_size=2500): + ... db_list # do something with the dbs + """ + return run_sync(self.__async_client.raw.databases.list(limit=limit)) diff --git a/cognite/client/_sync_api/raw/rows.py b/cognite/client/_sync_api/raw/rows.py new file mode 100644 index 0000000000..d30532e5d5 --- /dev/null +++ b/cognite/client/_sync_api/raw/rows.py @@ -0,0 +1,343 @@ +""" +=============================================================================== +d83244377c50c368e66b22af80198a2c +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.raw import Row, RowList, RowWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + import pandas as pd + + +class SyncRawRowsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, db_name: str, table_name: str, *, chunk_size: None, partitions: None) -> Iterator[Row]: ... + + @overload + def __call__(self, db_name: str, table_name: str, *, chunk_size: None, partitions: int) -> Iterator[RowList]: ... + + @overload + def __call__(self, db_name: str, table_name: str, *, chunk_size: int, partitions: None) -> Iterator[RowList]: ... + + def __call__( + self, + db_name: str, + table_name: str, + chunk_size: int | None = None, + limit: int | None = None, + min_last_updated_time: int | None = None, + max_last_updated_time: int | None = None, + columns: list[str] | None = None, + partitions: int | None = None, + ) -> Iterator[Row | RowList]: + """ + Iterate over rows. + + Fetches rows as they are iterated over, so you keep a limited number of rows in memory. + + Note: + When iterating using partitions > 1, the memory usage is bounded at 2 x partitions x chunk_size. This is implemented + by halting retrieval speed when the callers code can't keep up. + + Args: + db_name (str): Name of the database + table_name (str): Name of the table to iterate over rows for + chunk_size (int | None): Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. + Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows. + limit (int | None): Maximum number of rows to return. Can be used with partitions. Defaults to returning all items. + min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). ms since epoch. + max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). ms since epoch. + columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys. + partitions (int | None): Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. + The setting is capped at ``global_config.max_workers`` and _can_ be used with a finite limit. To prevent unexpected + problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ + + Yields: + Row | RowList: An iterator yielding the requested row or rows. + """ + yield from SyncIterator( + self.__async_client.raw.rows( + db_name=db_name, + table_name=table_name, + chunk_size=chunk_size, + limit=limit, + min_last_updated_time=min_last_updated_time, + max_last_updated_time=max_last_updated_time, + columns=columns, + partitions=partitions, + ) + ) + + def insert( + self, + db_name: str, + table_name: str, + row: Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict, + ensure_parent: bool = False, + ) -> None: + """ + `Insert one or more rows into a table. `_ + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + row (Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict): The row(s) to insert + ensure_parent (bool): Create database/table if they don't already exist. + + Examples: + + Insert new rows into a table: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import RowWrite + >>> client = CogniteClient() + >>> rows = [RowWrite(key="r1", columns={"col1": "val1", "col2": "val1"}), + ... RowWrite(key="r2", columns={"col1": "val2", "col2": "val2"})] + >>> client.raw.rows.insert("db1", "table1", rows) + + You may also insert a dictionary directly: + + >>> rows = { + ... "key-1": {"col1": 1, "col2": 2}, + ... "key-2": {"col1": 3, "col2": 4, "col3": "high five"}, + ... } + >>> client.raw.rows.insert("db1", "table1", rows) + """ + return run_sync( + self.__async_client.raw.rows.insert( + db_name=db_name, table_name=table_name, row=row, ensure_parent=ensure_parent + ) + ) + + def insert_dataframe( + self, db_name: str, table_name: str, dataframe: pd.DataFrame, ensure_parent: bool = False, dropna: bool = True + ) -> None: + """ + `Insert pandas dataframe into a table `_ + + Uses index for row keys. + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + dataframe (pd.DataFrame): The dataframe to insert. Index will be used as row keys. + ensure_parent (bool): Create database/table if they don't already exist. + dropna (bool): Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True + + Examples: + + Insert new rows into a table: + + >>> import pandas as pd + >>> from cognite.client import CogniteClient + >>> + >>> client = CogniteClient() + >>> df = pd.DataFrame( + ... {"col-a": [1, 3, None], "col-b": [2, -1, 9]}, + ... index=["r1", "r2", "r3"]) + >>> res = client.raw.rows.insert_dataframe( + ... "db1", "table1", df, dropna=True) + """ + return run_sync( + self.__async_client.raw.rows.insert_dataframe( + db_name=db_name, table_name=table_name, dataframe=dataframe, ensure_parent=ensure_parent, dropna=dropna + ) + ) + + def delete(self, db_name: str, table_name: str, key: str | SequenceNotStr[str]) -> None: + """ + `Delete rows from a table. `_ + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + key (str | SequenceNotStr[str]): The key(s) of the row(s) to delete. + + Examples: + + Delete rows from table: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> keys_to_delete = ["k1", "k2", "k3"] + >>> client.raw.rows.delete("db1", "table1", keys_to_delete) + """ + return run_sync(self.__async_client.raw.rows.delete(db_name=db_name, table_name=table_name, key=key)) + + def retrieve(self, db_name: str, table_name: str, key: str) -> Row | None: + """ + `Retrieve a single row by key. `_ + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + key (str): The key of the row to retrieve. + + Returns: + Row | None: The requested row. + + Examples: + + Retrieve a row with key 'k1' from table 't1' in database 'db1': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> row = client.raw.rows.retrieve("db1", "t1", "k1") + + You may access the data directly on the row (like a dict), or use '.get' when keys can be missing: + + >>> val1 = row["col1"] + >>> val2 = row.get("col2") + """ + return run_sync(self.__async_client.raw.rows.retrieve(db_name=db_name, table_name=table_name, key=key)) + + def retrieve_dataframe( + self, + db_name: str, + table_name: str, + min_last_updated_time: int | None = None, + max_last_updated_time: int | None = None, + columns: list[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + last_updated_time_in_index: bool = False, + infer_dtypes: bool = True, + ) -> pd.DataFrame: + """ + `Retrieve rows in a table as a pandas dataframe. `_ + + Rowkeys are used as the index. + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + min_last_updated_time (int | None): Rows must have been last updated after this time. ms since epoch. + max_last_updated_time (int | None): Rows must have been last updated before this time. ms since epoch. + columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys. + limit (int | None): The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. + When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.max_workers`` for an unlimited query + (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out + `concurrency limits in the API documentation. `_ + last_updated_time_in_index (bool): Use a MultiIndex with row keys and last_updated_time as index. + infer_dtypes (bool): If True, pandas will try to infer dtypes of the columns. Defaults to True. + + Returns: + pd.DataFrame: The requested rows in a pandas dataframe. + + Examples: + + Get dataframe: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> df = client.raw.rows.retrieve_dataframe("db1", "t1", limit=5) + """ + return run_sync( + self.__async_client.raw.rows.retrieve_dataframe( + db_name=db_name, + table_name=table_name, + min_last_updated_time=min_last_updated_time, + max_last_updated_time=max_last_updated_time, + columns=columns, + limit=limit, + partitions=partitions, + last_updated_time_in_index=last_updated_time_in_index, + infer_dtypes=infer_dtypes, + ) + ) + + def list( + self, + db_name: str, + table_name: str, + min_last_updated_time: int | None = None, + max_last_updated_time: int | None = None, + columns: list[str] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + ) -> RowList: + """ + `List rows in a table. `_ + + Args: + db_name (str): Name of the database. + table_name (str): Name of the table. + min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). ms since epoch. + max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). ms since epoch. + columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys. + limit (int | None): The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. + When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.max_workers`` for an unlimited query + (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out + `concurrency limits in the API documentation. `_ + + Returns: + RowList: The requested rows. + + Examples: + + List a few rows: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> row_list = client.raw.rows.list("db1", "tbl1", limit=5) + + Read an entire table efficiently by using concurrency (default behavior when ``limit=None``): + + >>> row_list = client.raw.rows.list("db1", "tbl1", limit=None) + + Iterate through all rows one-by-one to reduce memory load (no concurrency used): + + >>> for row in client.raw.rows("db1", "t1", columns=["col1","col2"]): + ... val1 = row["col1"] # You may access the data directly + ... val2 = row.get("col2") # ...or use '.get' when keys can be missing + + Iterate through all rows, one chunk at a time, to reduce memory load (no concurrency used): + + >>> for row_list in client.raw.rows("db1", "t1", chunk_size=2500): + ... row_list # Do something with the rows + + Iterate through a massive table to reduce memory load while using concurrency for high throughput. + Note: ``partitions`` must be specified for concurrency to be used (this is different from ``list()`` + to keep backward compatibility). Supplying a finite ``limit`` does not affect concurrency settings + (except for very small values). + + >>> rows_iterator = client.raw.rows( + ... db_name="db1", table_name="t1", partitions=5, chunk_size=5000, limit=1_000_000 + ... ) + >>> for row_list in rows_iterator: + ... row_list # Do something with the rows + """ + return run_sync( + self.__async_client.raw.rows.list( + db_name=db_name, + table_name=table_name, + min_last_updated_time=min_last_updated_time, + max_last_updated_time=max_last_updated_time, + columns=columns, + limit=limit, + partitions=partitions, + ) + ) diff --git a/cognite/client/_sync_api/raw/tables.py b/cognite/client/_sync_api/raw/tables.py new file mode 100644 index 0000000000..75e96b95b9 --- /dev/null +++ b/cognite/client/_sync_api/raw/tables.py @@ -0,0 +1,128 @@ +""" +=============================================================================== +854dbb194c522a53e14d24961ede0b14 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import raw +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncRawTablesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, db_name: str, chunk_size: None = None) -> Iterator[raw.Table]: ... + + @overload + def __call__(self, db_name: str, chunk_size: int) -> Iterator[raw.TableList]: ... + + def __call__( + self, db_name: str, chunk_size: int | None = None, limit: int | None = None + ) -> Iterator[raw.Table | raw.TableList]: + """ + Iterate over tables + + Fetches tables as they are iterated over, so you keep a limited number of tables in memory. + + Args: + db_name (str): Name of the database to iterate over tables for + chunk_size (int | None): Number of tables to return in each chunk. Defaults to yielding one table a time. + limit (int | None): Maximum number of tables to return. Defaults to return all items. + + Yields: + raw.Table | raw.TableList: No description. + """ + yield from SyncIterator(self.__async_client.raw.tables(db_name=db_name, chunk_size=chunk_size, limit=limit)) + + @overload + def create(self, db_name: str, name: str) -> raw.Table: ... + + @overload + def create(self, db_name: str, name: list[str]) -> raw.TableList: ... + + def create(self, db_name: str, name: str | list[str]) -> raw.Table | raw.TableList: + """ + `Create one or more tables. `_ + + Args: + db_name (str): Database to create the tables in. + name (str | list[str]): A table name or list of table names to create. + + Returns: + raw.Table | raw.TableList: raw.Table or list of tables that has been created. + + Examples: + + Create a new table in a database: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.raw.tables.create("db1", "table1") + """ + return run_sync(self.__async_client.raw.tables.create(db_name=db_name, name=name)) + + def delete(self, db_name: str, name: str | SequenceNotStr[str]) -> None: + """ + `Delete one or more tables. `_ + + Args: + db_name (str): Database to delete tables from. + name (str | SequenceNotStr[str]): A table name or list of table names to delete. + + Examples: + + Delete a list of tables: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.raw.tables.delete("db1", ["table1", "table2"]) + """ + return run_sync(self.__async_client.raw.tables.delete(db_name=db_name, name=name)) + + def list(self, db_name: str, limit: int | None = DEFAULT_LIMIT_READ) -> raw.TableList: + """ + `List tables `_ + + Args: + db_name (str): The database to list tables from. + limit (int | None): Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + raw.TableList: List of requested tables. + + Examples: + + List the first 5 tables: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> table_list = client.raw.tables.list("db1", limit=5) + + Iterate over tables, one-by-one: + + >>> for table in client.raw.tables(db_name="db1"): + ... table # do something with the table + + Iterate over chunks of tables to reduce memory load: + + >>> for table_list in client.raw.tables(db_name="db1", chunk_size=25): + ... table_list # do something with the tables + """ + return run_sync(self.__async_client.raw.tables.list(db_name=db_name, limit=limit)) diff --git a/cognite/client/_sync_api/relationships.py b/cognite/client/_sync_api/relationships.py new file mode 100644 index 0000000000..1ff354fb84 --- /dev/null +++ b/cognite/client/_sync_api/relationships.py @@ -0,0 +1,416 @@ +""" +=============================================================================== +78502466323dd0f7badd4b11014e4c65 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + Relationship, + RelationshipList, + RelationshipUpdate, + RelationshipWrite, +) +from cognite.client.data_classes.labels import LabelFilter +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncRelationshipsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Relationship]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[RelationshipList]: ... + + def __call__( + self, + chunk_size: int | None = None, + source_external_ids: SequenceNotStr[str] | None = None, + source_types: SequenceNotStr[str] | None = None, + target_external_ids: SequenceNotStr[str] | None = None, + target_types: SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + start_time: dict[str, int] | None = None, + end_time: dict[str, int] | None = None, + confidence: dict[str, int] | None = None, + last_updated_time: dict[str, int] | None = None, + created_time: dict[str, int] | None = None, + active_at_time: dict[str, int] | None = None, + labels: LabelFilter | None = None, + limit: int | None = None, + fetch_resources: bool = False, + ) -> Iterator[Relationship | RelationshipList]: + """ + Iterate over relationships + + Fetches relationships as they are iterated over, so you keep a limited number of relationships in memory. + + Args: + chunk_size (int | None): Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time. + source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field + source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field + target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field + target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field + data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. + start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence (dict[str, int] | None): Range to filter the field for (inclusive). + last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). + created_time (dict[str, int] | None): Range to filter the field for (inclusive). + active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels (LabelFilter | None): Return only the resource matching the specified label constraints. + limit (int | None): No description. + fetch_resources (bool): No description. + + Yields: + Relationship | RelationshipList: yields Relationship one by one if chunk_size is not specified, else RelationshipList objects. + """ + yield from SyncIterator( + self.__async_client.relationships( + chunk_size=chunk_size, + source_external_ids=source_external_ids, + source_types=source_types, + target_external_ids=target_external_ids, + target_types=target_types, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + start_time=start_time, + end_time=end_time, + confidence=confidence, + last_updated_time=last_updated_time, + created_time=created_time, + active_at_time=active_at_time, + labels=labels, + limit=limit, + fetch_resources=fetch_resources, + ) + ) + + def retrieve(self, external_id: str, fetch_resources: bool = False) -> Relationship | None: + """ + Retrieve a single relationship by external id. + + Args: + external_id (str): External ID + fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the source and target fields. + + Returns: + Relationship | None: Requested relationship or None if it does not exist. + + Examples: + + Get relationship by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.relationships.retrieve(external_id="1") + """ + return run_sync( + self.__async_client.relationships.retrieve(external_id=external_id, fetch_resources=fetch_resources) + ) + + def retrieve_multiple( + self, external_ids: SequenceNotStr[str], fetch_resources: bool = False, ignore_unknown_ids: bool = False + ) -> RelationshipList: + """ + `Retrieve multiple relationships by external id. `_ + + Args: + external_ids (SequenceNotStr[str]): External IDs + fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the + source and target fields. + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + RelationshipList: The requested relationships. + + Examples: + + Get relationships by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.relationships.retrieve_multiple(external_ids=["abc", "def"]) + """ + return run_sync( + self.__async_client.relationships.retrieve_multiple( + external_ids=external_ids, fetch_resources=fetch_resources, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list( + self, + source_external_ids: SequenceNotStr[str] | None = None, + source_types: SequenceNotStr[str] | None = None, + target_external_ids: SequenceNotStr[str] | None = None, + target_types: SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + start_time: dict[str, int] | None = None, + end_time: dict[str, int] | None = None, + confidence: dict[str, int] | None = None, + last_updated_time: dict[str, int] | None = None, + created_time: dict[str, int] | None = None, + active_at_time: dict[str, int] | None = None, + labels: LabelFilter | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + fetch_resources: bool = False, + ) -> RelationshipList: + """ + `Lists relationships stored in the project based on a query filter given in the payload of this request. Up to 1000 relationships can be retrieved in one operation. `_ + + Args: + source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field + source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field + target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field + target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field + data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. + start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence (dict[str, int] | None): Range to filter the field for (inclusive). + last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). + created_time (dict[str, int] | None): Range to filter the field for (inclusive). + active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels (LabelFilter | None): Return only the resource matching the specified label constraints. + limit (int | None): Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed. + fetch_resources (bool): if true, will try to return the full resources referenced by the relationship in the source and target fields. + + Returns: + RelationshipList: List of requested relationships + + Examples: + + List relationships: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> relationship_list = client.relationships.list(limit=5) + + Iterate over relationships, one-by-one: + + >>> for relationship in client.relationships(): + ... relationship # do something with the relationship + """ + return run_sync( + self.__async_client.relationships.list( + source_external_ids=source_external_ids, + source_types=source_types, + target_external_ids=target_external_ids, + target_types=target_types, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + start_time=start_time, + end_time=end_time, + confidence=confidence, + last_updated_time=last_updated_time, + created_time=created_time, + active_at_time=active_at_time, + labels=labels, + limit=limit, + partitions=partitions, + fetch_resources=fetch_resources, + ) + ) + + @overload + def create(self, relationship: Relationship | RelationshipWrite) -> Relationship: ... + + @overload + def create(self, relationship: Sequence[Relationship | RelationshipWrite]) -> RelationshipList: ... + + def create( + self, relationship: Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite] + ) -> Relationship | RelationshipList: + """ + `Create one or more relationships. `_ + + Args: + relationship (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to create. + + Returns: + Relationship | RelationshipList: Created relationship(s) + + Note: + - The source_type and target_type field in the Relationship(s) can be any string among "Asset", "TimeSeries", "File", "Event", "Sequence". + - Do not provide the value for the source and target arguments of the Relationship class, only source_external_id / source_type and target_external_id / target_type. These (source and target) are used as part of fetching actual resources specified in other fields. + + Examples: + + Create a new relationship specifying object type and external id for source and target: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import RelationshipWrite + >>> client = CogniteClient() + >>> flowrel1 = RelationshipWrite( + ... external_id="flow_1", + ... source_external_id="source_ext_id", + ... source_type="asset", + ... target_external_id="target_ext_id", + ... target_type="event", + ... confidence=0.1, + ... data_set_id=1234 + ... ) + >>> flowrel2 = RelationshipWrite( + ... external_id="flow_2", + ... source_external_id="source_ext_id", + ... source_type="asset", + ... target_external_id="target_ext_id", + ... target_type="event", + ... confidence=0.1, + ... data_set_id=1234 + ... ) + >>> res = client.relationships.create([flowrel1,flowrel2]) + """ + return run_sync(self.__async_client.relationships.create(relationship=relationship)) + + @overload + def update(self, item: Relationship | RelationshipWrite | RelationshipUpdate) -> Relationship: ... + + @overload + def update(self, item: Sequence[Relationship | RelationshipWrite | RelationshipUpdate]) -> RelationshipList: ... + + def update( + self, + item: Relationship + | RelationshipWrite + | RelationshipUpdate + | Sequence[Relationship | RelationshipWrite | RelationshipUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Relationship | RelationshipList: + """ + `Update one or more relationships `_ + Currently, a full replacement of labels on a relationship is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. + + Args: + item (Relationship | RelationshipWrite | RelationshipUpdate | Sequence[Relationship | RelationshipWrite | RelationshipUpdate]): Relationship(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Relationship | RelationshipList: Updated relationship(s) + + Examples: + Update a data set that you have fetched. This will perform a full update of the data set: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> rel = client.relationships.retrieve(external_id="flow1") + >>> rel.confidence = 0.75 + >>> res = client.relationships.update(rel) + + Perform a partial update on a relationship, setting a source_external_id and a confidence: + + >>> from cognite.client.data_classes import RelationshipUpdate + >>> my_update = RelationshipUpdate(external_id="flow_1").source_external_id.set("alternate_source").confidence.set(0.97) + >>> res1 = client.relationships.update(my_update) + >>> # Remove an already set optional field like so + >>> another_update = RelationshipUpdate(external_id="flow_1").confidence.set(None) + >>> res2 = client.relationships.update(another_update) + + Attach labels to a relationship: + + >>> from cognite.client.data_classes import RelationshipUpdate + >>> my_update = RelationshipUpdate(external_id="flow_1").labels.add(["PUMP", "VERIFIED"]) + >>> res = client.relationships.update(my_update) + + Detach a single label from a relationship: + + >>> from cognite.client.data_classes import RelationshipUpdate + >>> my_update = RelationshipUpdate(external_id="flow_1").labels.remove("PUMP") + >>> res = client.relationships.update(my_update) + """ + return run_sync(self.__async_client.relationships.update(item=item, mode=mode)) + + @overload + def upsert( + self, item: Sequence[Relationship | RelationshipWrite], mode: Literal["patch", "replace"] = "patch" + ) -> RelationshipList: ... + + @overload + def upsert( + self, item: Relationship | RelationshipWrite, mode: Literal["patch", "replace"] = "patch" + ) -> Relationship: ... + + def upsert( + self, + item: Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite], + mode: Literal["patch", "replace"] = "patch", + ) -> Relationship | RelationshipList: + """ + Upsert relationships, i.e., update if it exists, and create if it does not exist. + Note this is a convenience method that handles the upserting for you by first calling update on all items, + and if any of them fail because they do not exist, it will create them instead. + + For more details, see :ref:`appendix-upsert`. + + Args: + item (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to upsert. + mode (Literal['patch', 'replace']): Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + + Returns: + Relationship | RelationshipList: The upserted relationship(s). + + Examples: + + Upsert for relationships: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import RelationshipWrite + >>> client = CogniteClient() + >>> existing_relationship = client.relationships.retrieve(id=1) + >>> existing_relationship.description = "New description" + >>> new_relationship = RelationshipWrite( + ... external_id="new_relationship", + ... source_external_id="new_source", + ... source_type="asset", + ... target_external_id="new_target", + ... target_type="event" + ... ) + >>> res = client.relationships.upsert([existing_relationship, new_relationship], mode="replace") + """ + return run_sync(self.__async_client.relationships.upsert(item=item, mode=mode)) + + def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete one or more relationships. `_ + + Args: + external_id (str | SequenceNotStr[str]): External ID or list of external ids + ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + Examples: + + Delete relationships by external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.relationships.delete(external_id=["a","b"]) + """ + return run_sync( + self.__async_client.relationships.delete(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) diff --git a/cognite/client/_sync_api/sequence_data.py b/cognite/client/_sync_api/sequence_data.py new file mode 100644 index 0000000000..2d145b0ad8 --- /dev/null +++ b/cognite/client/_sync_api/sequence_data.py @@ -0,0 +1,326 @@ +""" +=============================================================================== +9812a01728a87fc7f57b745b3b175322 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +import typing +from typing import TYPE_CHECKING, Any, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import SequenceRows, SequenceRowsList +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + import pandas as pd + + +class SyncSequencesDataAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def insert( + self, + rows: SequenceRows + | dict[int, typing.Sequence[int | float | str]] + | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] + | typing.Sequence[dict[str, Any]], + columns: SequenceNotStr[str] | None = None, + id: int | None = None, + external_id: str | None = None, + ) -> None: + """ + `Insert rows into a sequence `_ + + Args: + rows (SequenceRows | dict[int, typing.Sequence[int | float | str]] | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] | typing.Sequence[dict[str, Any]]): The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below. + columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. + id (int | None): Id of sequence to insert rows into. + external_id (str | None): External id of sequence to insert rows into. + + Examples: + Your rows of data can be a list of tuples where the first element is the rownumber and the second element is the data to be inserted: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import SequenceWrite, SequenceColumnWrite + >>> client = CogniteClient() + >>> seq = client.sequences.create( + ... SequenceWrite( + ... columns=[ + ... SequenceColumnWrite(value_type="STRING", external_id="col_a"), + ... SequenceColumnWrite(value_type="DOUBLE", external_id ="col_b") + ... ], + ... ) + ... ) + >>> data = [(1, ['pi',3.14]), (2, ['e',2.72]) ] + >>> client.sequences.data.insert(columns=["col_a","col_b"], rows=data, id=1) + + They can also be provided as a list of API-style objects with a rowNumber and values field: + + >>> data = [{"rowNumber": 123, "values": ['str',3]}, {"rowNumber": 456, "values": ["bar",42]} ] + >>> client.sequences.data.insert(data, id=1, columns=["col_a","col_b"]) # implicit columns are retrieved from metadata + + Or they can be a given as a dictionary with row number as the key, and the value is the data to be inserted at that row: + + >>> data = {123 : ['str',3], 456 : ['bar',42] } + >>> client.sequences.data.insert(columns=['stringColumn','intColumn'], rows=data, id=1) + + Finally, they can be a SequenceData object retrieved from another request. In this case columns from this object are used as well. + + >>> data = client.sequences.data.retrieve(id=2,start=0,end=10) + >>> client.sequences.data.insert(rows=data, id=1,columns=None) + """ + return run_sync( + self.__async_client.sequences.data.insert(rows=rows, columns=columns, id=id, external_id=external_id) + ) + + def insert_dataframe( + self, dataframe: pd.DataFrame, id: int | None = None, external_id: str | None = None, dropna: bool = True + ) -> None: + """ + `Insert a Pandas dataframe. `_ + + The index of the dataframe must contain the row numbers. The names of the remaining columns specify the column external ids. + The sequence and columns must already exist. + + Args: + dataframe (pd.DataFrame): Pandas DataFrame object containing the sequence data. + id (int | None): Id of sequence to insert rows into. + external_id (str | None): External id of sequence to insert rows into. + dropna (bool): Whether to drop rows where all values are missing. Default: True. + + Examples: + Insert three rows into columns 'col_a' and 'col_b' of the sequence with id=123: + + >>> from cognite.client import CogniteClient + >>> import pandas as pd + >>> client = CogniteClient() + >>> df = pd.DataFrame({'col_a': [1, 2, 3], 'col_b': [4, 5, 6]}, index=[1, 2, 3]) + >>> client.sequences.data.insert_dataframe(df, id=123) + """ + return run_sync( + self.__async_client.sequences.data.insert_dataframe( + dataframe=dataframe, id=id, external_id=external_id, dropna=dropna + ) + ) + + def delete(self, rows: typing.Sequence[int], id: int | None = None, external_id: str | None = None) -> None: + """ + `Delete rows from a sequence `_ + + Args: + rows (typing.Sequence[int]): List of row numbers. + id (int | None): Id of sequence to delete rows from. + external_id (str | None): External id of sequence to delete rows from. + + Examples: + + Delete rows from a sequence: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.sequences.data.delete(id=1, rows=[1,2,42]) + """ + return run_sync(self.__async_client.sequences.data.delete(rows=rows, id=id, external_id=external_id)) + + def delete_range(self, start: int, end: int | None, id: int | None = None, external_id: str | None = None) -> None: + """ + `Delete a range of rows from a sequence. Note this operation is potentially slow, as retrieves each row before deleting. `_ + + Args: + start (int): Row number to start from (inclusive). + end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence. + id (int | None): Id of sequence to delete rows from. + external_id (str | None): External id of sequence to delete rows from. + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.sequences.data.delete_range(id=1, start=0, end=None) + """ + return run_sync( + self.__async_client.sequences.data.delete_range(start=start, end=end, id=id, external_id=external_id) + ) + + @overload + def retrieve( + self, + *, + external_id: str, + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRows: ... + + @overload + def retrieve( + self, + *, + external_id: SequenceNotStr[str], + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRowsList: ... + + @overload + def retrieve( + self, + *, + id: int, + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRows: ... + + @overload + def retrieve( + self, + *, + id: typing.Sequence[int], + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRowsList: ... + + @overload + def retrieve( + self, + *, + id: typing.Sequence[int] | int, + external_id: SequenceNotStr[str] | str, + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRowsList: ... + + def retrieve( + self, + external_id: str | SequenceNotStr[str] | None = None, + id: int | typing.Sequence[int] | None = None, + start: int = 0, + end: int | None = None, + columns: SequenceNotStr[str] | None = None, + limit: int | None = None, + ) -> SequenceRows | SequenceRowsList: + """ + `Retrieve data from a sequence `_ + + Args: + external_id (str | SequenceNotStr[str] | None): The external id of the sequence to retrieve from. + id (int | typing.Sequence[int] | None): The internal if the sequence to retrieve from. + start (int): Row number to start from (inclusive). + end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. + columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + limit (int | None): Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end'). + + Returns: + SequenceRows | SequenceRowsList: SequenceRows if a single identifier was given, else SequenceDataList + + Examples: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.data.retrieve(id=1) + >>> tuples = [(r,v) for r,v in res.items()] # You can use this iterator in for loops and list comprehensions, + >>> single_value = res[23] # ... get the values at a single row number, + >>> col = res.get_column(external_id='columnExtId') # ... get the array of values for a specific column, + >>> df = res.to_pandas() # ... or convert the result to a dataframe + """ + return run_sync( + self.__async_client.sequences.data.retrieve( + external_id=external_id, id=id, start=start, end=end, columns=columns, limit=limit + ) + ) + + def retrieve_last_row( + self, + id: int | None = None, + external_id: str | None = None, + columns: SequenceNotStr[str] | None = None, + before: int | None = None, + ) -> SequenceRows: + """ + `Retrieves the last row (i.e the row with the highest row number) in a sequence. `_ + + Args: + id (int | None): Id or list of ids. + external_id (str | None): External id or list of external ids. + columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + before (int | None): (optional, int): Get latest datapoint before this row number. + + Returns: + SequenceRows: A Datapoints object containing the requested data, or a list of such objects. + + Examples: + + Getting the latest row in a sequence before row number 1000: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.data.retrieve_last_row(id=1, before=1000) + """ + return run_sync( + self.__async_client.sequences.data.retrieve_last_row( + id=id, external_id=external_id, columns=columns, before=before + ) + ) + + def retrieve_dataframe( + self, + start: int, + end: int | None, + columns: list[str] | None = None, + external_id: str | None = None, + column_names: str | None = None, + id: int | None = None, + limit: int | None = None, + ) -> pd.DataFrame: + """ + `Retrieve data from a sequence as a pandas dataframe `_ + + Args: + start (int): (inclusive) row number to start from. + end (int | None): (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence. + columns (list[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + external_id (str | None): External id of sequence. + column_names (str | None): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence. + id (int | None): Id of sequence + limit (int | None): Maximum number of rows to return per sequence. + + Returns: + pd.DataFrame: The requested sequence data in a pandas DataFrame + + Examples: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> df = client.sequences.data.retrieve_dataframe(id=1, start=0, end=None) + """ + return run_sync( + self.__async_client.sequences.data.retrieve_dataframe( + start=start, + end=end, + columns=columns, + external_id=external_id, + column_names=column_names, + id=id, + limit=limit, + ) + ) diff --git a/cognite/client/_sync_api/sequences.py b/cognite/client/_sync_api/sequences.py new file mode 100644 index 0000000000..257f2d7156 --- /dev/null +++ b/cognite/client/_sync_api/sequences.py @@ -0,0 +1,725 @@ +""" +=============================================================================== +487393dca45db624fbbc00b67ff1d158 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +import typing +from collections.abc import Iterator +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.sequences import SortSpec +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.sequence_data import SyncSequencesDataAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import Sequence, SequenceFilter, SequenceList, SequenceUpdate +from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList +from cognite.client.data_classes.filters import Filter +from cognite.client.data_classes.sequences import ( + SequenceProperty, + SequenceWrite, +) +from cognite.client.data_classes.shared import TimestampRange +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSequencesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.data = SyncSequencesDataAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Sequence]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[SequenceList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + external_id_prefix: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: typing.Sequence[int] | None = None, + asset_subtree_ids: int | typing.Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | typing.Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + created_time: dict[str, Any] | None = None, + last_updated_time: dict[str, Any] | None = None, + limit: int | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | None = None, + ) -> Iterator[Sequence | SequenceList]: + """ + Iterate over sequences + + Fetches sequences as they are iterated over, so you keep a limited number of objects in memory. + + Args: + chunk_size (int | None): Number of sequences to return in each chunk. Defaults to yielding one event a time. + name (str | None): Filter out sequences that do not have this *exact* name. + external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId + metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. + asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. + created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit (int | None): Max number of sequences to return. Defaults to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Yields: + Sequence | SequenceList: yields Sequence one by one if chunk_size is not specified, else SequenceList objects. + """ + yield from SyncIterator( + self.__async_client.sequences( + chunk_size=chunk_size, + name=name, + external_id_prefix=external_id_prefix, + metadata=metadata, + asset_ids=asset_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + created_time=created_time, + last_updated_time=last_updated_time, + limit=limit, + advanced_filter=advanced_filter, + sort=sort, + ) + ) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> Sequence | None: + """ + `Retrieve a single sequence by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + + Returns: + Sequence | None: Requested sequence or None if it does not exist. + + Examples: + + Get sequence by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.retrieve(id=1) + + Get sequence by external id: + + >>> res = client.sequences.retrieve() + """ + return run_sync(self.__async_client.sequences.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: typing.Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> SequenceList: + """ + `Retrieve multiple sequences by id. `_ + + Args: + ids (typing.Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + SequenceList: The requested sequences. + + Examples: + + Get sequences by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.retrieve_multiple(ids=[1, 2, 3]) + + Get sequences by external id: + + >>> res = client.sequences.retrieve_multiple(external_ids=["abc", "def"]) + """ + return run_sync( + self.__async_client.sequences.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_count( + self, + advanced_filter: Filter | dict[str, Any] | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Count of sequences matching the specified filters and search. `_ + + Args: + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count. + filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down sequences to count requiring exact match. + + Returns: + int: The number of sequences matching the specified filters and search. + + Examples: + + Count the number of time series in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.sequences.aggregate_count() + + Count the number of sequences with external id prefixed with "mapping:" in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> is_mapping = filters.Prefix(SequenceProperty.external_id, "mapping:") + >>> count = client.sequences.aggregate_count(advanced_filter=is_mapping) + """ + return run_sync(self.__async_client.sequences.aggregate_count(advanced_filter=advanced_filter, filter=filter)) + + def aggregate_cardinality_values( + self, + property: SequenceProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate property count for sequences. `_ + + Args: + property (SequenceProperty | str | list[str]): The property to count the cardinality of. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of different values for the metadata key "efficiency" used for sequences in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> client = CogniteClient() + >>> count = client.sequences.aggregate_cardinality_values(SequenceProperty.metadata_key("efficiency")) + + Count the number of timezones (metadata key) for sequences with the word "critical" in the description + in your CDF project, but exclude timezones from america: + + >>> from cognite.client.data_classes import filters, aggregations as aggs + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> not_america = aggs.Not(aggs.Prefix("america")) + >>> is_critical = filters.Search(SequenceProperty.description, "critical") + >>> timezone_count = client.sequences.aggregate_cardinality_values( + ... SequenceProperty.metadata_key("timezone"), + ... advanced_filter=is_critical, + ... aggregate_filter=not_america) + """ + return run_sync( + self.__async_client.sequences.aggregate_cardinality_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_cardinality_properties( + self, + path: SequenceProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate paths count for sequences. `_ + + Args: + path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of different metadata keys in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> client = CogniteClient() + >>> count = client.sequences.aggregate_cardinality_values(SequenceProperty.metadata) + """ + return run_sync( + self.__async_client.sequences.aggregate_cardinality_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_values( + self, + property: SequenceProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique paths with counts for sequences. `_ + + Args: + property (SequenceProperty | str | list[str]): The property to group by. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + + Returns: + UniqueResultList: List of unique values of sequences matching the specified filters and search. + + Examples: + + Get the timezones (metadata key) with count for your sequences in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> client = CogniteClient() + >>> result = client.sequences.aggregate_unique_values(SequenceProperty.metadata_key("timezone")) + >>> print(result.unique) + + Get the different metadata keys with count used for sequences created after 2020-01-01 in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> from cognite.client.utils import timestamp_to_ms + >>> from datetime import datetime + >>> created_after_2020 = filters.Range(SequenceProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.sequences.aggregate_unique_values(SequenceProperty.metadata, advanced_filter=created_after_2020) + >>> print(result.unique) + + Get the different metadata keys with count for sequences updated after 2020-01-01 in your CDF project, but exclude all metadata keys that + starts with "test": + + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> from cognite.client.data_classes import aggregations as aggs, filters + >>> not_test = aggs.Not(aggs.Prefix("test")) + >>> created_after_2020 = filters.Range(SequenceProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.sequences.aggregate_unique_values(SequenceProperty.metadata, advanced_filter=created_after_2020, aggregate_filter=not_test) + >>> print(result.unique) + """ + return run_sync( + self.__async_client.sequences.aggregate_unique_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_properties( + self, + path: SequenceProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Find approximate unique sequence properties. `_ + + Args: + path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + + Returns: + UniqueResultList: List of unique values of sequences matching the specified filters and search. + + Examples: + + Get the metadata keys with count for your sequences in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.sequences import SequenceProperty + >>> client = CogniteClient() + >>> result = client.sequences.aggregate_unique_properties(SequenceProperty.metadata) + """ + return run_sync( + self.__async_client.sequences.aggregate_unique_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + @overload + def create(self, sequence: Sequence | SequenceWrite) -> Sequence: ... + + @overload + def create(self, sequence: typing.Sequence[Sequence] | typing.Sequence[SequenceWrite]) -> SequenceList: ... + + def create( + self, sequence: Sequence | SequenceWrite | typing.Sequence[Sequence] | typing.Sequence[SequenceWrite] + ) -> Sequence | SequenceList: + """ + `Create one or more sequences. `_ + + Args: + sequence (Sequence | SequenceWrite | typing.Sequence[Sequence] | typing.Sequence[SequenceWrite]): Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here. + + Returns: + Sequence | SequenceList: The created sequence(s). + + Examples: + + Create a new sequence: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import SequenceWrite, SequenceColumnWrite + >>> client = CogniteClient() + >>> column_def = [ + ... SequenceColumnWrite(value_type="STRING", external_id="user", description="some description"), + ... SequenceColumnWrite(value_type="DOUBLE", external_id="amount") + ... ] + >>> seq = client.sequences.create(SequenceWrite(external_id="my_sequence", columns=column_def)) + + Create a new sequence with the same column specifications as an existing sequence: + + >>> seq2 = client.sequences.create(SequenceWrite(external_id="my_copied_sequence", columns=column_def)) + """ + return run_sync(self.__async_client.sequences.create(sequence=sequence)) + + def delete( + self, + id: int | typing.Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete one or more sequences. `_ + + Args: + id (int | typing.Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete sequences by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.sequences.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.sequences.delete(id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + @overload + def update( + self, + item: Sequence | SequenceWrite | SequenceUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Sequence: ... + + @overload + def update( + self, + item: typing.Sequence[Sequence | SequenceWrite | SequenceUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> SequenceList: ... + + def update( + self, + item: Sequence | SequenceWrite | SequenceUpdate | typing.Sequence[Sequence | SequenceWrite | SequenceUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Sequence | SequenceList: + """ + `Update one or more sequences. `_ + + Args: + item (Sequence | SequenceWrite | SequenceUpdate | typing.Sequence[Sequence | SequenceWrite | SequenceUpdate]): Sequences to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Sequence | SequenceList: Updated sequences. + + Examples: + + Update a sequence that you have fetched. This will perform a full update of the sequences: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.retrieve(id=1) + >>> res.description = "New description" + >>> res = client.sequences.update(res) + + Perform a partial update on a sequence, updating the description and adding a new field to metadata: + + >>> from cognite.client.data_classes import SequenceUpdate + >>> my_update = SequenceUpdate(id=1).description.set("New description").metadata.add({"key": "value"}) + >>> res = client.sequences.update(my_update) + + **Updating column definitions** + + Currently, updating the column definitions of a sequence is only supported through partial update, using `add`, `remove` and `modify` methods on the `columns` property. + + Add a single new column: + + >>> from cognite.client.data_classes import SequenceUpdate, SequenceColumnWrite + >>> + >>> my_update = SequenceUpdate(id=1).columns.add( + ... SequenceColumnWrite(value_type ="STRING",external_id="user", description ="some description") + ... ) + >>> res = client.sequences.update(my_update) + + Add multiple new columns: + + >>> from cognite.client.data_classes import SequenceUpdate, SequenceColumnWrite + >>> + >>> column_def = [ + ... SequenceColumnWrite(value_type ="STRING",external_id="user", description ="some description"), + ... SequenceColumnWrite(value_type="DOUBLE", external_id="amount") + ... ] + >>> my_update = SequenceUpdate(id=1).columns.add(column_def) + >>> res = client.sequences.update(my_update) + + Remove a single column: + + >>> from cognite.client.data_classes import SequenceUpdate + >>> + >>> my_update = SequenceUpdate(id=1).columns.remove("col_external_id1") + >>> res = client.sequences.update(my_update) + + Remove multiple columns: + + >>> from cognite.client.data_classes import SequenceUpdate + >>> + >>> my_update = SequenceUpdate(id=1).columns.remove(["col_external_id1","col_external_id2"]) + >>> res = client.sequences.update(my_update) + + Update existing columns: + + >>> from cognite.client.data_classes import SequenceUpdate, SequenceColumnUpdate + >>> + >>> column_updates = [ + ... SequenceColumnUpdate(external_id="col_external_id_1").external_id.set("new_col_external_id"), + ... SequenceColumnUpdate(external_id="col_external_id_2").description.set("my new description"), + ... ] + >>> my_update = SequenceUpdate(id=1).columns.modify(column_updates) + >>> res = client.sequences.update(my_update) + """ + return run_sync(self.__async_client.sequences.update(item=item, mode=mode)) + + @overload + def upsert( + self, item: typing.Sequence[Sequence | SequenceWrite], mode: Literal["patch", "replace"] = "patch" + ) -> SequenceList: ... + + @overload + def upsert(self, item: Sequence | SequenceWrite, mode: Literal["patch", "replace"] = "patch") -> Sequence: ... + + def upsert( + self, + item: Sequence | SequenceWrite | typing.Sequence[Sequence | SequenceWrite], + mode: Literal["patch", "replace"] = "patch", + ) -> Sequence | SequenceList: + """ + Upsert sequences, i.e., update if it exists, and create if it does not exist. + Note this is a convenience method that handles the upserting for you by first calling update on all items, + and if any of them fail because they do not exist, it will create them instead. + + For more details, see :ref:`appendix-upsert`. + + Args: + item (Sequence | SequenceWrite | typing.Sequence[Sequence | SequenceWrite]): Sequence or list of sequences to upsert. + mode (Literal['patch', 'replace']): Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + + Returns: + Sequence | SequenceList: The upserted sequence(s). + + Examples: + + Upsert for sequences: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import SequenceWrite, SequenceColumnWrite + >>> client = CogniteClient() + >>> existing_sequence = client.sequences.retrieve(id=1) + >>> existing_sequence.description = "New description" + >>> new_sequence = SequenceWrite( + ... external_id="new_sequence", + ... description="New sequence", + ... columns=[SequenceColumnWrite(external_id="col1", value_type="STRING")] + ... ) + >>> res = client.sequences.upsert([existing_sequence, new_sequence], mode="replace") + """ + return run_sync(self.__async_client.sequences.upsert(item=item, mode=mode)) + + def search( + self, + name: str | None = None, + description: str | None = None, + query: str | None = None, + filter: SequenceFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> SequenceList: + """ + `Search for sequences. `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. + + Args: + name (str | None): Prefix and fuzzy search on name. + description (str | None): Prefix and fuzzy search on description. + query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter (SequenceFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. + limit (int): Max number of results to return. + + Returns: + SequenceList: The search result as a SequenceList + + Examples: + + Search for a sequence: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.search(name="some name") + """ + return run_sync( + self.__async_client.sequences.search( + name=name, description=description, query=query, filter=filter, limit=limit + ) + ) + + def list( + self, + name: str | None = None, + external_id_prefix: str | None = None, + metadata: dict[str, str] | None = None, + asset_ids: typing.Sequence[int] | None = None, + asset_subtree_ids: int | typing.Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | typing.Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | None = None, + ) -> SequenceList: + """ + `List sequences `_ + + Args: + name (str | None): Filter out sequences that do not have this *exact* name. + external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId + metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. + asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit (int | None): Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Returns: + SequenceList: The requested sequences. + + .. note:: + When using `partitions`, there are few considerations to keep in mind: + * `limit` has to be set to `None` (or `-1`). + * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status. + * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed. + + + Examples: + + List sequences: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.sequences.list(limit=5) + + Iterate over sequences, one-by-one: + + >>> for seq in client.sequences(): + ... seq # do something with the sequence + + Iterate over chunks of sequences to reduce memory load: + + >>> for seq_list in client.sequences(chunk_size=2500): + ... seq_list # do something with the sequences + + Using advanced filter, find all sequences that have a metadata key 'timezone' starting with 'Europe', + and sort by external id ascending: + + >>> from cognite.client.data_classes import filters + >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe") + >>> res = client.sequences.list(advanced_filter=in_timezone, sort=("external_id", "asc")) + + Note that you can check the API documentation above to see which properties you can filter on + with which filters. + + To make it easier to avoid spelling mistakes and easier to look up available properties + for filtering and sorting, you can also use the `SequenceProperty` and `SortableSequenceProperty` Enums. + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.sequences import SequenceProperty, SortableSequenceProperty + >>> in_timezone = filters.Prefix(SequenceProperty.metadata_key("timezone"), "Europe") + >>> res = client.sequences.list( + ... advanced_filter=in_timezone, + ... sort=(SortableSequenceProperty.external_id, "asc")) + + Combine filter and advanced filter: + + >>> from cognite.client.data_classes import filters + >>> not_instrument_lvl5 = filters.And( + ... filters.ContainsAny("labels", ["Level5"]), + ... filters.Not(filters.ContainsAny("labels", ["Instrument"])) + ... ) + >>> res = client.sequences.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5) + """ + return run_sync( + self.__async_client.sequences.list( + name=name, + external_id_prefix=external_id_prefix, + metadata=metadata, + asset_ids=asset_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + created_time=created_time, + last_updated_time=last_updated_time, + limit=limit, + partitions=partitions, + advanced_filter=advanced_filter, + sort=sort, + ) + ) diff --git a/cognite/client/_sync_api/simulators/__init__.py b/cognite/client/_sync_api/simulators/__init__.py new file mode 100644 index 0000000000..a9bf772981 --- /dev/null +++ b/cognite/client/_sync_api/simulators/__init__.py @@ -0,0 +1,81 @@ +""" +=============================================================================== +a7c293e0cee8a18b88bd0f3649a337a3 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.simulators.integrations import SyncSimulatorIntegrationsAPI +from cognite.client._sync_api.simulators.logs import SyncSimulatorLogsAPI +from cognite.client._sync_api.simulators.models import SyncSimulatorModelsAPI +from cognite.client._sync_api.simulators.routines import SyncSimulatorRoutinesAPI +from cognite.client._sync_api.simulators.runs import SyncSimulatorRunsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.simulators.simulators import Simulator, SimulatorList +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.integrations = SyncSimulatorIntegrationsAPI(async_client) + self.models = SyncSimulatorModelsAPI(async_client) + self.runs = SyncSimulatorRunsAPI(async_client) + self.routines = SyncSimulatorRoutinesAPI(async_client) + self.logs = SyncSimulatorLogsAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Simulator]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Simulator | SimulatorList]: + """ + Iterate over simulators + + Fetches simulators as they are iterated over, so you keep a limited number of simulators in memory. + + Args: + chunk_size (int | None): Number of simulators to return in each chunk. Defaults to yielding one simulator a time. + limit (int | None): Maximum number of simulators to return. Defaults to return all items. + + Yields: + Simulator | SimulatorList: yields Simulator one by one if chunk is not specified, else SimulatorList objects. + """ + yield from SyncIterator(self.__async_client.simulators(chunk_size=chunk_size, limit=limit)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SimulatorList: + """ + `List all simulators `_ + + Args: + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + + Returns: + SimulatorList: List of simulators + + Examples: + List simulators: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.list(limit=10) + + Iterate over simulators, one-by-one: + >>> for simulator in client.simulators(): + ... simulator # do something with the simulator + """ + return run_sync(self.__async_client.simulators.list(limit=limit)) diff --git a/cognite/client/_sync_api/simulators/integrations.py b/cognite/client/_sync_api/simulators/integrations.py new file mode 100644 index 0000000000..a69ca12c7a --- /dev/null +++ b/cognite/client/_sync_api/simulators/integrations.py @@ -0,0 +1,122 @@ +""" +=============================================================================== +f0ef21e3c2f87b01ca37ee1c89d64b8c +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.simulators.simulators import SimulatorIntegration, SimulatorIntegrationList +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorIntegrationsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorIntegrationList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulatorIntegration]: ... + + def __call__( + self, + chunk_size: int | None = None, + simulator_external_ids: str | SequenceNotStr[str] | None = None, + active: bool | None = None, + limit: int | None = None, + ) -> Iterator[SimulatorIntegration | SimulatorIntegrationList]: + """ + Iterate over simulator integrations + + Fetches simulator integrations as they are iterated over, so you keep a limited number of simulator integrations in memory. + + Args: + chunk_size (int | None): Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time. + simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. + active (bool | None): Filter on active status of the simulator integration. + limit (int | None): The maximum number of simulator integrations to return, pass None to return all. + + Yields: + SimulatorIntegration | SimulatorIntegrationList: yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.integrations( + chunk_size=chunk_size, simulator_external_ids=simulator_external_ids, active=active, limit=limit + ) + ) + + def list( + self, + limit: int | None = DEFAULT_LIMIT_READ, + simulator_external_ids: str | SequenceNotStr[str] | None = None, + active: bool | None = None, + ) -> SimulatorIntegrationList: + """ + `Filter simulator integrations `_ + + Retrieves a list of simulator integrations that match the given criteria. + + Args: + limit (int | None): The maximum number of simulator integrations to return, pass None to return all. + simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. + active (bool | None): Filter on active status of the simulator integration. + + Returns: + SimulatorIntegrationList: List of simulator integrations + + Examples: + List a few simulator integrations: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.integrations.list(limit=10) + + Iterate over simulator integrations, one-by-one: + >>> for integration in client.simulators.integrations(): + ... integration # do something with the simulator integration + + Filter simulator integrations by simulator external ids and active status: + >>> res = client.simulators.integrations.list( + ... simulator_external_ids=["sim1", "sim2"], + ... active=True, + ... ) + """ + return run_sync( + self.__async_client.simulators.integrations.list( + limit=limit, simulator_external_ids=simulator_external_ids, active=active + ) + ) + + def delete( + self, ids: int | Sequence[int] | None = None, external_ids: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete simulator integrations `_ + + Args: + ids (int | Sequence[int] | None): Id(s) of simulator integrations to delete + external_ids (str | SequenceNotStr[str] | None): External_id(s) of simulator integrations to delete + + Examples: + Delete simulator integrations by id or external id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.simulators.integrations.delete(ids=[1,2,3], external_ids="foo") + """ + return run_sync(self.__async_client.simulators.integrations.delete(ids=ids, external_ids=external_ids)) diff --git a/cognite/client/_sync_api/simulators/logs.py b/cognite/client/_sync_api/simulators/logs.py new file mode 100644 index 0000000000..8844e3d387 --- /dev/null +++ b/cognite/client/_sync_api/simulators/logs.py @@ -0,0 +1,70 @@ +""" +=============================================================================== +a0924757f3aa2b1e9014f5bc1247ee5f +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.simulators.logs import SimulatorLog, SimulatorLogList +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorLogsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def retrieve(self, ids: int) -> SimulatorLog | None: ... + + @overload + def retrieve(self, ids: Sequence[int]) -> SimulatorLogList | None: ... + + def retrieve(self, ids: int | Sequence[int]) -> SimulatorLogList | SimulatorLog | None: + """ + `Retrieve simulator logs `_ + + Simulator logs track what happens during simulation runs, model parsing, and generic connector logic. + They provide valuable information for monitoring, debugging, and auditing. + + Simulator logs capture important events, messages, and exceptions that occur during the execution of simulations, model parsing, and connector operations. + They help users identify issues, diagnose problems, and gain insights into the behavior of the simulator integrations. + + Args: + ids (int | Sequence[int]): The ids of the simulator log. + + Returns: + SimulatorLogList | SimulatorLog | None: Requested simulator log(s) + + Examples: + Get simulator logs by simulator model id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> model = client.simulators.models.retrieve(ids=1) + >>> logs = client.simulators.logs.retrieve(ids=model.log_id) + + Get simulator logs by simulator integration id: + >>> integrations = client.simulators.integrations.list() + >>> logs = client.simulators.logs.retrieve(ids=integrations[0].log_id) + + Get simulator logs by simulation run id: + >>> run = client.simulators.runs.retrieve(ids=1) + >>> logs = client.simulators.logs.retrieve(ids=run.log_id) + + Get simulator logs directly on a simulation run object: + >>> run = client.simulators.runs.retrieve(ids=2) + >>> res = run.get_logs() + """ + return run_sync(self.__async_client.simulators.logs.retrieve(ids=ids)) diff --git a/cognite/client/_sync_api/simulators/models.py b/cognite/client/_sync_api/simulators/models.py new file mode 100644 index 0000000000..47492244af --- /dev/null +++ b/cognite/client/_sync_api/simulators/models.py @@ -0,0 +1,251 @@ +""" +=============================================================================== +ca070d285539cd38d07de5a0322d3737 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.simulators.models_revisions import SyncSimulatorModelRevisionsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.simulators.filters import PropertySort +from cognite.client.data_classes.simulators.models import ( + SimulatorModel, + SimulatorModelList, + SimulatorModelUpdate, + SimulatorModelWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorModelsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.revisions = SyncSimulatorModelRevisionsAPI(async_client) + + def list( + self, + limit: int | None = DEFAULT_LIMIT_READ, + simulator_external_ids: str | SequenceNotStr[str] | None = None, + sort: PropertySort | None = None, + ) -> SimulatorModelList: + """ + `Filter simulator models `_ + + Retrieves a list of simulator models that match the given criteria. + + Args: + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). + sort (PropertySort | None): The criteria to sort by. + + Returns: + SimulatorModelList: List of simulator models + + Examples: + List simulator models: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.models.list(limit=10) + + Iterate over simulator models, one-by-one: + >>> for model in client.simulators.models(): + ... model # do something with the simulator model + + Specify filter and sort order: + >>> from cognite.client.data_classes.simulators.filters import PropertySort + >>> res = client.simulators.models.list( + ... simulator_external_ids=["simulator_external_id"], + ... sort=PropertySort( + ... property="createdTime", + ... order="asc" + ... ) + ... ) + """ + return run_sync( + self.__async_client.simulators.models.list( + limit=limit, simulator_external_ids=simulator_external_ids, sort=sort + ) + ) + + @overload + def retrieve(self, *, ids: int) -> SimulatorModel | None: ... + + @overload + def retrieve(self, *, external_ids: str) -> SimulatorModel | None: ... + + @overload + def retrieve(self, *, ids: Sequence[int]) -> SimulatorModelList: ... + + @overload + def retrieve(self, *, external_ids: SequenceNotStr[str]) -> SimulatorModelList: ... + + def retrieve( + self, *, ids: int | Sequence[int] | None = None, external_ids: str | SequenceNotStr[str] | None = None + ) -> SimulatorModel | SimulatorModelList | None: + """ + `Retrieve simulator models `_ + + Retrieve one or more simulator models by ID(s) or external ID(s). + + Args: + ids (int | Sequence[int] | None): The id of the simulator model(s). + external_ids (str | SequenceNotStr[str] | None): The external id of the simulator model(s). + + Returns: + SimulatorModel | SimulatorModelList | None: Requested simulator model(s) + + Examples: + Get simulator model by id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.models.retrieve(ids=1) + + Get simulator model by external id: + >>> res = client.simulators.models.retrieve(external_ids="model_external_id") + + Get multiple simulator models by ids: + >>> res = client.simulators.models.retrieve(ids=[1,2]) + + Get multiple simulator models by external ids: + >>> res = client.simulators.models.retrieve( + ... external_ids=["model_external_id", "model_external_id2"] + ... ) + """ + return run_sync(self.__async_client.simulators.models.retrieve(ids=ids, external_ids=external_ids)) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulatorModel]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorModelList]: ... + + def __call__( + self, + chunk_size: int | None = None, + simulator_external_ids: str | SequenceNotStr[str] | None = None, + sort: PropertySort | None = None, + limit: int | None = None, + ) -> Iterator[SimulatorModel | SimulatorModelList]: + """ + Iterate over simulator simulator models + + Fetches simulator models as they are iterated over, so you keep a limited number of simulator models in memory. + + Args: + chunk_size (int | None): Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time. + simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). + sort (PropertySort | None): The criteria to sort by. + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + + Yields: + SimulatorModel | SimulatorModelList: yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.models( + chunk_size=chunk_size, simulator_external_ids=simulator_external_ids, sort=sort, limit=limit + ) + ) + + @overload + def create(self, items: SimulatorModelWrite) -> SimulatorModel: ... + + @overload + def create(self, items: Sequence[SimulatorModelWrite]) -> SimulatorModelList: ... + + def create(self, items: SimulatorModelWrite | Sequence[SimulatorModelWrite]) -> SimulatorModel | SimulatorModelList: + """ + `Create simulator models `_ + + Args: + items (SimulatorModelWrite | Sequence[SimulatorModelWrite]): The model(s) to create. + + Returns: + SimulatorModel | SimulatorModelList: Created simulator model(s) + + Examples: + Create new simulator models: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.simulators import SimulatorModelWrite + >>> client = CogniteClient() + >>> models = [ + ... SimulatorModelWrite( + ... name="model1", simulator_external_id="sim1", type="SteadyState", + ... data_set_id=1, external_id="model_external_id" + ... ), + ... SimulatorModelWrite( + ... name="model2", simulator_external_id="sim2", type="SteadyState", + ... data_set_id=2, external_id="model_external_id2" + ... ) + ... ] + >>> res = client.simulators.models.create(models) + """ + return run_sync(self.__async_client.simulators.models.create(items=items)) + + def delete( + self, ids: int | Sequence[int] | None = None, external_ids: str | SequenceNotStr[str] | None = None + ) -> None: + """ + `Delete simulator models `_ + + Args: + ids (int | Sequence[int] | None): id (or sequence of ids) for the model(s) to delete. + external_ids (str | SequenceNotStr[str] | None): external id (or sequence of external ids) for the model(s) to delete. + + Examples: + Delete simulator models by id or external id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.simulators.models.delete(ids=[1,2,3], external_ids="model_external_id") + """ + return run_sync(self.__async_client.simulators.models.delete(ids=ids, external_ids=external_ids)) + + @overload + def update( + self, items: Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate] + ) -> SimulatorModelList: ... + + @overload + def update(self, items: SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate) -> SimulatorModel: ... + + def update( + self, + items: SimulatorModel + | SimulatorModelWrite + | SimulatorModelUpdate + | Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate], + ) -> SimulatorModel | SimulatorModelList: + """ + `Update simulator models `_ + + Args: + items (SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate | Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate]): The model to update. + + Returns: + SimulatorModel | SimulatorModelList: Updated simulator model(s) + + Examples: + Update a simulator model that you have fetched. This will perform a full update of the model: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> model = client.simulators.models.retrieve(external_ids="model_external_id") + >>> model.name = "new_name" + >>> res = client.simulators.models.update(model) + """ + return run_sync(self.__async_client.simulators.models.update(items=items)) diff --git a/cognite/client/_sync_api/simulators/models_revisions.py b/cognite/client/_sync_api/simulators/models_revisions.py new file mode 100644 index 0000000000..8ca08faa00 --- /dev/null +++ b/cognite/client/_sync_api/simulators/models_revisions.py @@ -0,0 +1,231 @@ +""" +=============================================================================== +7619ac448bf753adf6658c74b2748fd6 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.shared import TimestampRange +from cognite.client.data_classes.simulators.filters import PropertySort +from cognite.client.data_classes.simulators.models import ( + SimulatorModelRevision, + SimulatorModelRevisionList, + SimulatorModelRevisionWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorModelRevisionsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list( + self, + limit: int = DEFAULT_LIMIT_READ, + sort: PropertySort | None = None, + model_external_ids: str | SequenceNotStr[str] | None = None, + all_versions: bool | None = None, + created_time: TimestampRange | None = None, + last_updated_time: TimestampRange | None = None, + ) -> SimulatorModelRevisionList: + """ + `Filter simulator model revisions `_ + + Retrieves a list of simulator model revisions that match the given criteria. + + Args: + limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + sort (PropertySort | None): The criteria to sort by. + model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. + all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time (TimestampRange | None): Filter by created time. + last_updated_time (TimestampRange | None): Filter by last updated time. + + Returns: + SimulatorModelRevisionList: List of simulator model revisions + + Examples: + List simulator model revisions: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.models.revisions.list(limit=10) + + Specify filter and sort order: + >>> from cognite.client.data_classes.simulators.filters import PropertySort + >>> from cognite.client.data_classes.shared import TimestampRange + >>> res = client.simulators.models.revisions.list( + ... model_external_ids=["model1", "model2"], + ... all_versions=True, + ... created_time=TimestampRange(min=0, max=1000000), + ... last_updated_time=TimestampRange(min=0, max=1000000), + ... sort=PropertySort(order="asc", property="createdTime"), + ... limit=10 + ... ) + """ + return run_sync( + self.__async_client.simulators.models.revisions.list( + limit=limit, + sort=sort, + model_external_ids=model_external_ids, + all_versions=all_versions, + created_time=created_time, + last_updated_time=last_updated_time, + ) + ) + + @overload + def retrieve(self, *, ids: int) -> SimulatorModelRevision | None: ... + + @overload + def retrieve(self, *, external_ids: str) -> SimulatorModelRevision | None: ... + + @overload + def retrieve(self, *, ids: Sequence[int]) -> SimulatorModelRevisionList: ... + + @overload + def retrieve(self, *, external_ids: SequenceNotStr[str]) -> SimulatorModelRevisionList: ... + + def retrieve( + self, *, ids: int | Sequence[int] | None = None, external_ids: str | SequenceNotStr[str] | None = None + ) -> SimulatorModelRevision | SimulatorModelRevisionList | None: + """ + `Retrieve simulator model revisions `_ + + Retrieve one or more simulator model revisions by ID(s) or external ID(s). + + Args: + ids (int | Sequence[int] | None): The ids of the simulator model revisions. + external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator model revisions. + + Returns: + SimulatorModelRevision | SimulatorModelRevisionList | None: Requested simulator model revision(s) + + Examples: + Get simulator model revision by id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.models.revisions.retrieve(ids=1) + + Get simulator model revision by external id: + >>> res = client.simulators.models.revisions.retrieve( + ... external_ids="revision_external_id" + ... ) + + Get multiple simulator model revisions by ids: + >>> res = client.simulators.models.revisions.retrieve(ids=[1,2]) + + Get multiple simulator model revisions by external ids: + >>> res = client.simulators.models.revisions.retrieve( + ... external_ids=["revision1", "revision2"] + ... ) + """ + return run_sync(self.__async_client.simulators.models.revisions.retrieve(ids=ids, external_ids=external_ids)) + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorModelRevisionList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulatorModelRevision]: ... + + def __call__( + self, + chunk_size: int | None = None, + sort: PropertySort | None = None, + model_external_ids: str | SequenceNotStr[str] | None = None, + all_versions: bool | None = None, + created_time: TimestampRange | None = None, + last_updated_time: TimestampRange | None = None, + limit: int | None = None, + ) -> Iterator[SimulatorModelRevision | SimulatorModelRevisionList]: + """ + Iterate over simulator simulator model revisions + + Fetches simulator model revisions as they are iterated over, so you keep a limited number of simulator model revisions in memory. + + Args: + chunk_size (int | None): Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time. + sort (PropertySort | None): The criteria to sort by. + model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. + all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time (TimestampRange | None): Filter by created time. + last_updated_time (TimestampRange | None): Filter by last updated time. + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + + Yields: + SimulatorModelRevision | SimulatorModelRevisionList: yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.models.revisions( + chunk_size=chunk_size, + sort=sort, + model_external_ids=model_external_ids, + all_versions=all_versions, + created_time=created_time, + last_updated_time=last_updated_time, + limit=limit, + ) + ) + + @overload + def create(self, items: SimulatorModelRevisionWrite) -> SimulatorModelRevision: ... + + @overload + def create(self, items: Sequence[SimulatorModelRevisionWrite]) -> SimulatorModelRevisionList: ... + + def create( + self, items: SimulatorModelRevisionWrite | Sequence[SimulatorModelRevisionWrite] + ) -> SimulatorModelRevision | SimulatorModelRevisionList: + """ + `Create simulator model revisions `_ + + Args: + items (SimulatorModelRevisionWrite | Sequence[SimulatorModelRevisionWrite]): The model revision(s) to create. + + Returns: + SimulatorModelRevision | SimulatorModelRevisionList: Created simulator model revision(s) + + Examples: + Create new simulator model revisions: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.simulators import SimulatorModelRevisionWrite, SimulatorModelDependencyFileId, SimulatorModelRevisionDependency + >>> client = CogniteClient() + >>> revisions = [ + ... SimulatorModelRevisionWrite( + ... external_id="revision1", + ... file_id=1, + ... model_external_id="a_1", + ... ), + ... SimulatorModelRevisionWrite( + ... external_id="revision2", + ... file_id=2, + ... model_external_id="a_2", + ... external_dependencies = [ + ... SimulatorModelRevisionDependency( + ... file=SimulatorModelDependencyFileId(id=123), + ... arguments={ + ... "fieldA": "value1", + ... "fieldB": "value2", + ... }, + ... ) + ... ] + ... ), + ... ] + >>> res = client.simulators.models.revisions.create(revisions) + """ + return run_sync(self.__async_client.simulators.models.revisions.create(items=items)) diff --git a/cognite/client/_sync_api/simulators/routine_revisions.py b/cognite/client/_sync_api/simulators/routine_revisions.py new file mode 100644 index 0000000000..1a27c5426c --- /dev/null +++ b/cognite/client/_sync_api/simulators/routine_revisions.py @@ -0,0 +1,309 @@ +""" +=============================================================================== +a28661302438648ad307b55a1e1a5010 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.shared import TimestampRange +from cognite.client.data_classes.simulators import PropertySort +from cognite.client.data_classes.simulators.routine_revisions import ( + SimulatorRoutineRevision, + SimulatorRoutineRevisionList, + SimulatorRoutineRevisionWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorRoutineRevisionsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorRoutineRevisionList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulatorRoutineRevision]: ... + + def __call__( + self, + chunk_size: int | None = None, + routine_external_ids: SequenceNotStr[str] | None = None, + model_external_ids: SequenceNotStr[str] | None = None, + simulator_integration_external_ids: SequenceNotStr[str] | None = None, + simulator_external_ids: SequenceNotStr[str] | None = None, + created_time: TimestampRange | None = None, + all_versions: bool = False, + include_all_fields: bool = False, + limit: int | None = None, + sort: PropertySort | None = None, + ) -> Iterator[SimulatorRoutineRevision | SimulatorRoutineRevisionList]: + """ + Iterate over simulator routine revisions + + Fetches simulator routine revisions as they are iterated over, so you keep a limited number of simulator routine revisions in memory. + + Args: + chunk_size (int | None): Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time. + routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. + model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. + simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. + simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. + created_time (TimestampRange | None): Filter on created time. + all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. + sort (PropertySort | None): The criteria to sort by. + + Yields: + SimulatorRoutineRevision | SimulatorRoutineRevisionList: yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.routines.revisions( + chunk_size=chunk_size, + routine_external_ids=routine_external_ids, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + simulator_external_ids=simulator_external_ids, + created_time=created_time, + all_versions=all_versions, + include_all_fields=include_all_fields, + limit=limit, + sort=sort, + ) + ) + + @overload + def retrieve(self, *, ids: int) -> SimulatorRoutineRevision | None: ... + + @overload + def retrieve(self, *, external_ids: str) -> SimulatorRoutineRevision | None: ... + + @overload + def retrieve(self, *, ids: Sequence[int]) -> SimulatorRoutineRevisionList: ... + + @overload + def retrieve(self, *, external_ids: SequenceNotStr[str]) -> SimulatorRoutineRevisionList: ... + + def retrieve( + self, *, ids: int | Sequence[int] | None = None, external_ids: str | SequenceNotStr[str] | None = None + ) -> SimulatorRoutineRevision | SimulatorRoutineRevisionList | None: + """ + `Retrieve simulator routine revisions `_ + + Retrieve simulator routine revisions by ID or External Id. + + Args: + ids (int | Sequence[int] | None): Simulator routine revision ID or list of IDs + external_ids (str | SequenceNotStr[str] | None): Simulator routine revision External ID or list of external IDs + + Returns: + SimulatorRoutineRevision | SimulatorRoutineRevisionList | None: Requested simulator routine revision + + Examples: + Get simulator routine revision by id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.routines.revisions.retrieve(ids=123) + + Get simulator routine revision by external id: + >>> res = client.simulators.routines.revisions.retrieve(external_ids="routine_v1") + """ + return run_sync(self.__async_client.simulators.routines.revisions.retrieve(ids=ids, external_ids=external_ids)) + + @overload + def create(self, items: Sequence[SimulatorRoutineRevisionWrite]) -> SimulatorRoutineRevisionList: ... + + @overload + def create(self, items: SimulatorRoutineRevisionWrite) -> SimulatorRoutineRevision: ... + + def create( + self, items: SimulatorRoutineRevisionWrite | Sequence[SimulatorRoutineRevisionWrite] + ) -> SimulatorRoutineRevision | SimulatorRoutineRevisionList: + """ + `Create simulator routine revisions `_ + + Args: + items (SimulatorRoutineRevisionWrite | Sequence[SimulatorRoutineRevisionWrite]): Simulator routine revisions to create. + + Returns: + SimulatorRoutineRevision | SimulatorRoutineRevisionList: Created simulator routine revision(s) + + Examples: + Create new simulator routine revisions: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.simulators.routine_revisions import ( + ... SimulatorRoutineRevisionWrite, + ... SimulatorRoutineConfiguration, + ... SimulatorRoutineInputConstant, + ... SimulatorRoutineOutput, + ... SimulatorRoutineDataSampling, + ... SimulatorRoutineStep, + ... SimulatorRoutineStepArguments, + ... SimulatorRoutineStage, + ... SimulationValueUnitInput, + ... ) + >>> client = CogniteClient() + >>> routine_revisions = [ + ... SimulatorRoutineRevisionWrite( + ... external_id="routine_rev_1", + ... routine_external_id="routine_1", + ... configuration=SimulatorRoutineConfiguration( + ... data_sampling=SimulatorRoutineDataSampling( + ... sampling_window=15, + ... granularity=1, + ... ), + ... inputs=[ + ... SimulatorRoutineInputConstant( + ... name="Tubing Head Pressure", + ... reference_id="THP", + ... value=124.3, + ... value_type="DOUBLE", + ... unit=SimulationValueUnitInput( + ... name="bar", + ... quantity="pressure", + ... ), + ... save_timeseries_external_id="TEST-ROUTINE-INPUT-THP", + ... ), + ... ], + ... outputs=[ + ... SimulatorRoutineOutput( + ... name="Bottom Hole Pressure", + ... reference_id="BHP", + ... unit=SimulationValueUnitInput( + ... name="bar", + ... quantity="pressure", + ... ), + ... value_type="DOUBLE", + ... save_timeseries_external_id="TEST-ROUTINE-OUTPUT-BHP", + ... ), + ... ], + ... ), + ... script=[ + ... SimulatorRoutineStage( + ... order=1, + ... description="Define simulation inputs", + ... steps=[ + ... SimulatorRoutineStep( + ... order=1, + ... step_type="Set", + ... arguments=SimulatorRoutineStepArguments( + ... { + ... "referenceId": "THP", + ... "objectName": "WELL", + ... "objectProperty": "WellHeadPressure", + ... } + ... ), + ... ), + ... ], + ... ), + ... SimulatorRoutineStage( + ... order=2, + ... description="Solve", + ... steps=[ + ... SimulatorRoutineStep( + ... order=1, + ... step_type="Command", + ... arguments=SimulatorRoutineStepArguments( + ... {"command": "Solve"} + ... ), + ... ), + ... ], + ... ), + ... SimulatorRoutineStage( + ... order=3, + ... description="Define simulation outputs", + ... steps=[ + ... SimulatorRoutineStep( + ... order=1, + ... step_type="Get", + ... arguments=SimulatorRoutineStepArguments( + ... { + ... "referenceId": "BHP", + ... "objectName": "WELL", + ... "objectProperty": "BottomHolePressure", + ... } + ... ), + ... ), + ... ], + ... ), + ... ], + ... ), + ... ] + >>> res = client.simulators.routines.revisions.create(routine_revisions) + """ + return run_sync(self.__async_client.simulators.routines.revisions.create(items=items)) + + def list( + self, + routine_external_ids: SequenceNotStr[str] | None = None, + model_external_ids: SequenceNotStr[str] | None = None, + simulator_integration_external_ids: SequenceNotStr[str] | None = None, + simulator_external_ids: SequenceNotStr[str] | None = None, + created_time: TimestampRange | None = None, + all_versions: bool = False, + include_all_fields: bool = False, + limit: int | None = None, + sort: PropertySort | None = None, + ) -> SimulatorRoutineRevisionList: + """ + `Filter simulator routine revisions `_ + + Retrieves a list of simulator routine revisions that match the given criteria. + + Args: + routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. + model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. + simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. + simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. + created_time (TimestampRange | None): Filter on created time. + all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. + sort (PropertySort | None): The criteria to sort by. + + Returns: + SimulatorRoutineRevisionList: List of simulator routine revisions + + Examples: + List simulator routine revisions: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.routines.revisions.list(limit=10) + + List simulator routine revisions with filter: + >>> res = client.simulators.routines.revisions.list( + ... routine_external_ids=["routine_1"], + ... all_versions=True, + ... sort=PropertySort(order="asc", property="createdTime"), + ... include_all_fields=True + ... ) + """ + return run_sync( + self.__async_client.simulators.routines.revisions.list( + routine_external_ids=routine_external_ids, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + simulator_external_ids=simulator_external_ids, + created_time=created_time, + all_versions=all_versions, + include_all_fields=include_all_fields, + limit=limit, + sort=sort, + ) + ) diff --git a/cognite/client/_sync_api/simulators/routines.py b/cognite/client/_sync_api/simulators/routines.py new file mode 100644 index 0000000000..b161a216ff --- /dev/null +++ b/cognite/client/_sync_api/simulators/routines.py @@ -0,0 +1,278 @@ +""" +=============================================================================== +3f63301cb49fd587302d590cf0e11794 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.simulators.routine_revisions import SyncSimulatorRoutineRevisionsAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.simulators.filters import PropertySort +from cognite.client.data_classes.simulators.routines import ( + SimulatorRoutine, + SimulatorRoutineList, + SimulatorRoutineWrite, +) +from cognite.client.data_classes.simulators.runs import SimulationInputOverride, SimulationRun +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorRoutinesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.revisions = SyncSimulatorRoutineRevisionsAPI(async_client) + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulatorRoutineList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulatorRoutine]: ... + + def __call__( + self, + chunk_size: int | None = None, + model_external_ids: Sequence[str] | None = None, + simulator_integration_external_ids: Sequence[str] | None = None, + limit: int | None = None, + ) -> Iterator[SimulatorRoutine | SimulatorRoutineList]: + """ + Iterate over simulator routines + + Fetches simulator routines as they are iterated over, so you keep a limited number of simulator routines in memory. + + Args: + chunk_size (int | None): Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. + model_external_ids (Sequence[str] | None): Filter on model external ids. + simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. + limit (int | None): Maximum number of simulator routines to return. Defaults to return all items. + + Yields: + SimulatorRoutine | SimulatorRoutineList: yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.routines( + chunk_size=chunk_size, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + limit=limit, + ) + ) + + @overload + def create(self, routine: Sequence[SimulatorRoutineWrite]) -> SimulatorRoutineList: ... + + @overload + def create(self, routine: SimulatorRoutineWrite) -> SimulatorRoutine: ... + + def create( + self, routine: SimulatorRoutineWrite | Sequence[SimulatorRoutineWrite] + ) -> SimulatorRoutine | SimulatorRoutineList: + """ + `Create simulator routines `_ + + Args: + routine (SimulatorRoutineWrite | Sequence[SimulatorRoutineWrite]): Simulator routine(s) to create. + + Returns: + SimulatorRoutine | SimulatorRoutineList: Created simulator routine(s) + + Examples: + Create new simulator routines: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.simulators.routines import SimulatorRoutineWrite + >>> client = CogniteClient() + >>> routines = [ + ... SimulatorRoutineWrite( + ... name="routine1", + ... external_id="routine_ext_id", + ... simulator_integration_external_id="integration_ext_id", + ... model_external_id="model_ext_id", + ... ), + ... SimulatorRoutineWrite( + ... name="routine2", + ... external_id="routine_ext_id_2", + ... simulator_integration_external_id="integration_ext_id_2", + ... model_external_id="model_ext_id_2", + ... ) + ... ] + >>> res = client.simulators.routines.create(routines) + """ + return run_sync(self.__async_client.simulators.routines.create(routine=routine)) + + def delete( + self, + ids: int | Sequence[int] | None = None, + external_ids: str | SequenceNotStr[str] | SequenceNotStr[str] | None = None, + ) -> None: + """ + `Delete simulator routines `_ + + Args: + ids (int | Sequence[int] | None): ids (or sequence of ids) for the routine(s) to delete. + external_ids (str | SequenceNotStr[str] | SequenceNotStr[str] | None): external ids (or sequence of external ids) for the routine(s) to delete. + + Examples: + Delete simulator routines by id or external id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.simulators.routines.delete(ids=[1,2,3], external_ids="foo") + """ + return run_sync(self.__async_client.simulators.routines.delete(ids=ids, external_ids=external_ids)) + + def list( + self, + limit: int = DEFAULT_LIMIT_READ, + model_external_ids: Sequence[str] | None = None, + simulator_integration_external_ids: Sequence[str] | None = None, + sort: PropertySort | None = None, + ) -> SimulatorRoutineList: + """ + `Filter simulator routines `_ + + Retrieves a list of simulator routines that match the given criteria. + + Args: + limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + model_external_ids (Sequence[str] | None): Filter on model external ids. + simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. + sort (PropertySort | None): The criteria to sort by. + + Returns: + SimulatorRoutineList: List of simulator routines + + Examples: + List simulator routines: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.routines.list(limit=10) + + Iterate over simulator routines, one-by-one: + >>> for routine in client.simulators.routines(): + ... routine # do something with the simulator routine + + Specify filter and sort order: + >>> from cognite.client.data_classes.simulators.filters import PropertySort + >>> res = client.simulators.routines.list( + ... simulator_integration_external_ids=["integration_ext_id"], + ... sort=PropertySort( + ... property="createdTime", + ... order="desc" + ... ) + ... ) + """ + return run_sync( + self.__async_client.simulators.routines.list( + limit=limit, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + sort=sort, + ) + ) + + @overload + def run( + self, + *, + routine_external_id: str, + inputs: Sequence[SimulationInputOverride] | None = None, + run_time: int | None = None, + queue: bool | None = None, + log_severity: Literal["Debug", "Information", "Warning", "Error"] | None = None, + wait: bool = True, + timeout: float = 60, + ) -> SimulationRun: ... + + @overload + def run( + self, + *, + routine_revision_external_id: str, + model_revision_external_id: str, + inputs: Sequence[SimulationInputOverride] | None = None, + run_time: int | None = None, + queue: bool | None = None, + log_severity: Literal["Debug", "Information", "Warning", "Error"] | None = None, + wait: bool = True, + timeout: float = 60, + ) -> SimulationRun: ... + + def run( + self, + routine_external_id: str | None = None, + routine_revision_external_id: str | None = None, + model_revision_external_id: str | None = None, + inputs: Sequence[SimulationInputOverride] | None = None, + run_time: int | None = None, + queue: bool | None = None, + log_severity: Literal["Debug", "Information", "Warning", "Error"] | None = None, + wait: bool = True, + timeout: float = 60, + ) -> SimulationRun: + """ + `Run a simulation `_ + + Run a simulation for a given simulator routine. Supports two modes: + 1. By routine external ID only + 2. By routine revision external ID + model revision external ID + + Args: + routine_external_id (str | None): External id of the simulator routine to run. + Cannot be specified together with routine_revision_external_id and model_revision_external_id. + routine_revision_external_id (str | None): External id of the simulator routine revision to run. + Must be specified together with model_revision_external_id. + model_revision_external_id (str | None): External id of the simulator model revision. + Must be specified together with routine_revision_external_id. + inputs (Sequence[SimulationInputOverride] | None): List of input overrides + run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. + queue (bool | None): Queue the simulation run when connector is down. + log_severity (Literal['Debug', 'Information', 'Warning', 'Error'] | None): Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. + wait (bool): Wait until the simulation run is finished. Defaults to True. + timeout (float): Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds. + + Returns: + SimulationRun: Created simulation run + + Examples: + Create new simulation run using routine external ID: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> run = client.simulators.routines.run( + ... routine_external_id="routine1", + ... log_severity="Debug" + ... ) + + Create new simulation run using routine and model revision external IDs: + >>> run = client.simulators.routines.run( + ... routine_revision_external_id="routine_revision1", + ... model_revision_external_id="model_revision1", + ... ) + """ + return run_sync( + self.__async_client.simulators.routines.run( + routine_external_id=routine_external_id, + routine_revision_external_id=routine_revision_external_id, + model_revision_external_id=model_revision_external_id, + inputs=inputs, + run_time=run_time, + queue=queue, + log_severity=log_severity, + wait=wait, + timeout=timeout, + ) + ) diff --git a/cognite/client/_sync_api/simulators/runs.py b/cognite/client/_sync_api/simulators/runs.py new file mode 100644 index 0000000000..6ecb9b22b4 --- /dev/null +++ b/cognite/client/_sync_api/simulators/runs.py @@ -0,0 +1,258 @@ +""" +=============================================================================== +e02d60ffd03fe36c05041b5bbdfd6458 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.shared import TimestampRange +from cognite.client.data_classes.simulators.filters import SimulationRunsSort +from cognite.client.data_classes.simulators.runs import ( + SimulationRun, + SimulationRunDataList, + SimulationRunList, + SimulationRunWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncSimulatorRunsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: int) -> Iterator[SimulationRunList]: ... + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[SimulationRun]: ... + + def __call__( + self, + chunk_size: int | None = None, + limit: int | None = None, + status: str | None = None, + run_type: str | None = None, + model_external_ids: SequenceNotStr[str] | None = None, + simulator_integration_external_ids: SequenceNotStr[str] | None = None, + simulator_external_ids: SequenceNotStr[str] | None = None, + routine_external_ids: SequenceNotStr[str] | None = None, + routine_revision_external_ids: SequenceNotStr[str] | None = None, + model_revision_external_ids: SequenceNotStr[str] | None = None, + created_time: TimestampRange | None = None, + simulation_time: TimestampRange | None = None, + sort: SimulationRunsSort | None = None, + ) -> Iterator[SimulationRun | SimulationRunList]: + """ + Iterate over simulation runs + + Fetches simulation runs as they are iterated over, so you keep a limited number of simulation runs in memory. + + Args: + chunk_size (int | None): Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time. + limit (int | None): The maximum number of simulation runs to return, pass None to return all. + status (str | None): Filter by simulation run status + run_type (str | None): Filter by simulation run type + model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids + simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids + simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids + routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids + routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids + model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids + created_time (TimestampRange | None): Filter by created time + simulation_time (TimestampRange | None): Filter by simulation time + sort (SimulationRunsSort | None): The criteria to sort by. + + Yields: + SimulationRun | SimulationRunList: yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects. + """ + yield from SyncIterator( + self.__async_client.simulators.runs( + chunk_size=chunk_size, + limit=limit, + status=status, + run_type=run_type, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + simulator_external_ids=simulator_external_ids, + routine_external_ids=routine_external_ids, + routine_revision_external_ids=routine_revision_external_ids, + model_revision_external_ids=model_revision_external_ids, + created_time=created_time, + simulation_time=simulation_time, + sort=sort, + ) + ) + + def list( + self, + limit: int | None = DEFAULT_LIMIT_READ, + status: str | None = None, + run_type: str | None = None, + model_external_ids: SequenceNotStr[str] | None = None, + simulator_integration_external_ids: SequenceNotStr[str] | None = None, + simulator_external_ids: SequenceNotStr[str] | None = None, + routine_external_ids: SequenceNotStr[str] | None = None, + routine_revision_external_ids: SequenceNotStr[str] | None = None, + model_revision_external_ids: SequenceNotStr[str] | None = None, + created_time: TimestampRange | None = None, + simulation_time: TimestampRange | None = None, + sort: SimulationRunsSort | None = None, + ) -> SimulationRunList: + """ + `Filter simulation runs `_ + + Retrieves a list of simulation runs that match the given criteria. + + Args: + limit (int | None): The maximum number of simulation runs to return, pass None to return all. + status (str | None): Filter by simulation run status + run_type (str | None): Filter by simulation run type + model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids + simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids + simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids + routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids + routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids + model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids + created_time (TimestampRange | None): Filter by created time + simulation_time (TimestampRange | None): Filter by simulation time + sort (SimulationRunsSort | None): The criteria to sort by. + + Returns: + SimulationRunList: List of simulation runs + + Examples: + List simulation runs: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.runs.list() + + Iterate over simulation runs, one-by-one: + >>> for run in client.simulators.runs(): + ... run # do something with the simulation run + + Filter runs by status and simulator external ids: + >>> res = client.simulators.runs.list( + ... simulator_external_ids=["PROSPER", "DWSIM"], + ... status="success" + ... ) + + Filter runs by time ranges: + >>> from cognite.client.data_classes.shared import TimestampRange + >>> res = client.simulators.runs.list( + ... created_time=TimestampRange(min=0, max=1_700_000_000_000), + ... simulation_time=TimestampRange(min=0, max=1_700_000_000_000), + ... ) + """ + return run_sync( + self.__async_client.simulators.runs.list( + limit=limit, + status=status, + run_type=run_type, + model_external_ids=model_external_ids, + simulator_integration_external_ids=simulator_integration_external_ids, + simulator_external_ids=simulator_external_ids, + routine_external_ids=routine_external_ids, + routine_revision_external_ids=routine_revision_external_ids, + model_revision_external_ids=model_revision_external_ids, + created_time=created_time, + simulation_time=simulation_time, + sort=sort, + ) + ) + + @overload + def retrieve(self, ids: int) -> SimulationRun | None: ... + + @overload + def retrieve(self, ids: Sequence[int]) -> SimulationRunList | None: ... + + def retrieve(self, ids: int | Sequence[int]) -> SimulationRun | SimulationRunList | None: + """ + `Retrieve simulation runs by ID `_ + + Args: + ids (int | Sequence[int]): The ID(s) of the simulation run(s) to retrieve. + + Returns: + SimulationRun | SimulationRunList | None: The simulation run(s) with the given ID(s) + + Examples: + Retrieve a single simulation run by id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> run = client.simulators.runs.retrieve(ids=2) + """ + return run_sync(self.__async_client.simulators.runs.retrieve(ids=ids)) + + @overload + def create(self, items: SimulationRunWrite) -> SimulationRun: ... + + @overload + def create(self, items: Sequence[SimulationRunWrite]) -> SimulationRunList: ... + + def create(self, items: SimulationRunWrite | Sequence[SimulationRunWrite]) -> SimulationRun | SimulationRunList: + """ + `Create simulation runs `_ + + Args: + items (SimulationRunWrite | Sequence[SimulationRunWrite]): The simulation run(s) to execute. + + Returns: + SimulationRun | SimulationRunList: Created simulation run(s) + + Examples: + Create new simulation run: + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.simulators.runs import SimulationRunWrite + >>> client = CogniteClient() + >>> run = [ + ... SimulationRunWrite( + ... routine_external_id="routine1", + ... log_severity="Debug", + ... run_type="external", + ... ), + ... ] + >>> res = client.simulators.runs.create(run) + """ + return run_sync(self.__async_client.simulators.runs.create(items=items)) + + def list_run_data(self, run_id: int) -> SimulationRunDataList: + """ + `Get simulation run data `_ + + Retrieve data associated with a simulation run by ID. + + Args: + run_id (int): Simulation run id. + + Returns: + SimulationRunDataList: List of simulation run data + + Examples: + Get simulation run data by run id: + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.runs.list_run_data(run_id=12345) + + Get simulation run data directly on a simulation run object: + >>> run = client.simulators.runs.retrieve(ids=2) + >>> res = run.get_data() + """ + return run_sync(self.__async_client.simulators.runs.list_run_data(run_id=run_id)) diff --git a/cognite/client/_sync_api/synthetic_time_series.py b/cognite/client/_sync_api/synthetic_time_series.py new file mode 100644 index 0000000000..5fb0584301 --- /dev/null +++ b/cognite/client/_sync_api/synthetic_time_series.py @@ -0,0 +1,144 @@ +""" +=============================================================================== +f068713c972aa9edaff075bc232ec4b0 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Mapping, Sequence +from datetime import datetime +from typing import TYPE_CHECKING, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import Datapoints, DatapointsList, TimeSeries, TimeSeriesWrite +from cognite.client.data_classes.data_modeling.ids import NodeId +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + import sympy + + +class SyncSyntheticDatapointsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def query( + self, + expressions: SequenceNotStr[str] | SequenceNotStr[sympy.Basic], + start: int | str | datetime, + end: int | str | datetime, + limit: int | None = None, + variables: Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None = None, + aggregate: str | None = None, + granularity: str | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + ) -> DatapointsList: ... + + @overload + def query( + self, + expressions: str | sympy.Basic, + start: int | str | datetime, + end: int | str | datetime, + limit: int | None = None, + variables: Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None = None, + aggregate: str | None = None, + granularity: str | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + ) -> Datapoints: ... + + def query( + self, + expressions: str | sympy.Basic | Sequence[str] | Sequence[sympy.Basic], + start: int | str | datetime, + end: int | str | datetime, + limit: int | None = None, + variables: Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None = None, + aggregate: str | None = None, + granularity: str | None = None, + target_unit: str | None = None, + target_unit_system: str | None = None, + ) -> Datapoints | DatapointsList: + """ + `Calculate the result of a function on time series. `_ + + Args: + expressions (str | sympy.Basic | Sequence[str] | Sequence[sympy.Basic]): Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter. + start (int | str | datetime): Inclusive start. + end (int | str | datetime): Exclusive end. + limit (int | None): Number of datapoints per expression to retrieve. + variables (Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None): An optional map of symbol replacements. + aggregate (str | None): use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + granularity (str | None): use this granularity with the aggregate. + target_unit (str | None): use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + target_unit_system (str | None): Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified. + + Returns: + Datapoints | DatapointsList: A DatapointsList object containing the calculated data. + + Examples: + + Execute a synthetic time series query with an expression. Here we sum three time series plus a constant. The first is referenced by ID, + the second by external ID, and the third by instance ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> expression = ''' + ... 123 + ... + ts{id:123} + ... + ts{externalId:'abc'} + ... + ts{space:'my-space',externalId:'my-ts-xid'} + ... ''' + >>> dps = client.time_series.data.synthetic.query( + ... expressions=expression, + ... start="2w-ago", + ... end="now") + + You can also specify variables for an easier query syntax: + + >>> from cognite.client.data_classes.data_modeling.ids import NodeId + >>> ts = client.time_series.retrieve(id=123) + >>> variables = { + ... "A": ts, + ... "B": "my_ts_external_id", + ... "C": NodeId("my-space", "my-ts-xid"), + ... } + >>> dps = client.time_series.data.synthetic.query( + ... expressions="A+B+C", start="2w-ago", end="2w-ahead", variables=variables) + + Use sympy to build complex expressions: + + >>> from sympy import symbols, cos, sin + >>> x, y = symbols("x y") + >>> dps = client.time_series.data.synthetic.query( + ... [sin(x), y*cos(x)], + ... start="2w-ago", + ... end="now", + ... variables={x: "foo", y: "bar"}, + ... aggregate="interpolation", + ... granularity="15m", + ... target_unit="temperature:deg_c") + """ + return run_sync( + self.__async_client.time_series.data.synthetic.query( + expressions=expressions, + start=start, + end=end, + limit=limit, + variables=variables, + aggregate=aggregate, + granularity=granularity, + target_unit=target_unit, + target_unit_system=target_unit_system, + ) + ) diff --git a/cognite/client/_sync_api/three_d/__init__.py b/cognite/client/_sync_api/three_d/__init__.py new file mode 100644 index 0000000000..4ff03d25da --- /dev/null +++ b/cognite/client/_sync_api/three_d/__init__.py @@ -0,0 +1,31 @@ +""" +=============================================================================== +66e3c7c6a5b970455f6ecbbc6efaa5d2 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.three_d.asset_mapping import Sync3DAssetMappingAPI +from cognite.client._sync_api.three_d.files import Sync3DFilesAPI +from cognite.client._sync_api.three_d.models import Sync3DModelsAPI +from cognite.client._sync_api.three_d.revisions import Sync3DRevisionsAPI +from cognite.client._sync_api_client import SyncAPIClient + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class Sync3DAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.models = Sync3DModelsAPI(async_client) + self.revisions = Sync3DRevisionsAPI(async_client) + self.files = Sync3DFilesAPI(async_client) + self.asset_mappings = Sync3DAssetMappingAPI(async_client) diff --git a/cognite/client/_sync_api/three_d/asset_mapping.py b/cognite/client/_sync_api/three_d/asset_mapping.py new file mode 100644 index 0000000000..0c143c2c76 --- /dev/null +++ b/cognite/client/_sync_api/three_d/asset_mapping.py @@ -0,0 +1,155 @@ +""" +=============================================================================== +6f66d1f456b805976e70894bba676c29 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + BoundingBox3D, + ThreeDAssetMapping, + ThreeDAssetMappingList, + ThreeDAssetMappingWrite, +) +from cognite.client.utils._async_helpers import run_sync + + +class Sync3DAssetMappingAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list( + self, + model_id: int, + revision_id: int, + node_id: int | None = None, + asset_id: int | None = None, + intersects_bounding_box: BoundingBox3D | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> ThreeDAssetMappingList: + """ + `List 3D node asset mappings. `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + node_id (int | None): List only asset mappings associated with this node. + asset_id (int | None): List only asset mappings associated with this asset. + intersects_bounding_box (BoundingBox3D | None): If given, only return asset mappings for assets whose bounding box intersects with the given bounding box. + limit (int | None): Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ThreeDAssetMappingList: The list of asset mappings. + + Example: + + List 3d node asset mappings: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.asset_mappings.list(model_id=1, revision_id=1) + + List 3d node asset mappings for assets whose bounding box intersects with a given bounding box: + + >>> from cognite.client.data_classes import BoundingBox3D + >>> bbox = BoundingBox3D(min=[0.0, 0.0, 0.0], max=[1.0, 1.0, 1.0]) + >>> res = client.three_d.asset_mappings.list( + ... model_id=1, revision_id=1, intersects_bounding_box=bbox) + """ + return run_sync( + self.__async_client.three_d.asset_mappings.list( + model_id=model_id, + revision_id=revision_id, + node_id=node_id, + asset_id=asset_id, + intersects_bounding_box=intersects_bounding_box, + limit=limit, + ) + ) + + @overload + def create( + self, model_id: int, revision_id: int, asset_mapping: ThreeDAssetMapping | ThreeDAssetMappingWrite + ) -> ThreeDAssetMapping: ... + + @overload + def create( + self, + model_id: int, + revision_id: int, + asset_mapping: Sequence[ThreeDAssetMapping] | Sequence[ThreeDAssetMappingWrite], + ) -> ThreeDAssetMappingList: ... + + def create( + self, + model_id: int, + revision_id: int, + asset_mapping: ThreeDAssetMapping + | ThreeDAssetMappingWrite + | Sequence[ThreeDAssetMapping] + | Sequence[ThreeDAssetMappingWrite], + ) -> ThreeDAssetMapping | ThreeDAssetMappingList: + """ + `Create 3d node asset mappings. `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + asset_mapping (ThreeDAssetMapping | ThreeDAssetMappingWrite | Sequence[ThreeDAssetMapping] | Sequence[ThreeDAssetMappingWrite]): The asset mapping(s) to create. + + Returns: + ThreeDAssetMapping | ThreeDAssetMappingList: The created asset mapping(s). + + Example: + + Create new 3d node asset mapping: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ThreeDAssetMappingWrite + >>> my_mapping = ThreeDAssetMappingWrite(node_id=1, asset_id=1) + >>> client = CogniteClient() + >>> res = client.three_d.asset_mappings.create(model_id=1, revision_id=1, asset_mapping=my_mapping) + """ + return run_sync( + self.__async_client.three_d.asset_mappings.create( + model_id=model_id, revision_id=revision_id, asset_mapping=asset_mapping + ) + ) + + def delete( + self, model_id: int, revision_id: int, asset_mapping: ThreeDAssetMapping | Sequence[ThreeDAssetMapping] + ) -> None: + """ + `Delete 3d node asset mappings. `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + asset_mapping (ThreeDAssetMapping | Sequence[ThreeDAssetMapping]): The asset mapping(s) to delete. + + Example: + + Delete 3d node asset mapping: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> mapping_to_delete = client.three_d.asset_mappings.list(model_id=1, revision_id=1)[0] + >>> res = client.three_d.asset_mappings.delete(model_id=1, revision_id=1, asset_mapping=mapping_to_delete) + """ + return run_sync( + self.__async_client.three_d.asset_mappings.delete( + model_id=model_id, revision_id=revision_id, asset_mapping=asset_mapping + ) + ) diff --git a/cognite/client/_sync_api/three_d/files.py b/cognite/client/_sync_api/three_d/files.py new file mode 100644 index 0000000000..ee64cf19c9 --- /dev/null +++ b/cognite/client/_sync_api/three_d/files.py @@ -0,0 +1,40 @@ +""" +=============================================================================== +c23bdbc81be7b11616bd3dae81e7ef44 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.utils._async_helpers import run_sync + + +class Sync3DFilesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def retrieve(self, id: int) -> bytes: + """ + `Retrieve the contents of a 3d file by id. `_ + + Args: + id (int): The id of the file to retrieve. + + Returns: + bytes: The contents of the file. + + Example: + + Retrieve the contents of a 3d file by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.files.retrieve(1) + """ + return run_sync(self.__async_client.three_d.files.retrieve(id=id)) diff --git a/cognite/client/_sync_api/three_d/models.py b/cognite/client/_sync_api/three_d/models.py new file mode 100644 index 0000000000..d1654634da --- /dev/null +++ b/cognite/client/_sync_api/three_d/models.py @@ -0,0 +1,224 @@ +""" +=============================================================================== +4284fe831151ba6fb3b13f60847c0c83 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ThreeDModel, ThreeDModelList, ThreeDModelUpdate, ThreeDModelWrite +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class Sync3DModelsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[ThreeDModel]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[ThreeDModelList]: ... + + def __call__( + self, chunk_size: int | None = None, published: bool | None = None, limit: int | None = None + ) -> Iterator[ThreeDModel | ThreeDModelList]: + """ + Iterate over 3d models + + Fetches 3d models as they are iterated over, so you keep a limited number of 3d models in memory. + + Args: + chunk_size (int | None): Number of 3d models to return in each chunk. Defaults to yielding one model a time. + published (bool | None): Filter based on whether or not the model has published revisions. + limit (int | None): Maximum number of 3d models to return. Defaults to return all items. + + Yields: + ThreeDModel | ThreeDModelList: yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects. + """ + yield from SyncIterator( + self.__async_client.three_d.models(chunk_size=chunk_size, published=published, limit=limit) + ) + + def retrieve(self, id: int) -> ThreeDModel | None: + """ + `Retrieve a 3d model by id `_ + + Args: + id (int): Get the model with this id. + + Returns: + ThreeDModel | None: The requested 3d model. + + Example: + + Get 3d model by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.models.retrieve(id=1) + """ + return run_sync(self.__async_client.three_d.models.retrieve(id=id)) + + def list(self, published: bool | None = None, limit: int | None = DEFAULT_LIMIT_READ) -> ThreeDModelList: + """ + `List 3d models. `_ + + Args: + published (bool | None): Filter based on whether or not the model has published revisions. + limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ThreeDModelList: The list of 3d models. + + Examples: + + List 3d models: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> model_list = client.three_d.models.list() + + Iterate over 3d models, one-by-one: + + >>> for model in client.three_d.models(): + ... model # do something with the 3d model + + Iterate over chunks of 3d models to reduce memory load: + + >>> for model in client.three_d.models(chunk_size=50): + ... model # do something with the 3d model + """ + return run_sync(self.__async_client.three_d.models.list(published=published, limit=limit)) + + @overload + def create( + self, name: str | ThreeDModelWrite, data_set_id: int | None = None, metadata: dict[str, str] | None = None + ) -> ThreeDModel: ... + + @overload + def create( + self, + name: SequenceNotStr[str | ThreeDModelWrite], + data_set_id: int | None = None, + metadata: dict[str, str] | None = None, + ) -> ThreeDModelList: ... + + def create( + self, + name: str | ThreeDModelWrite | SequenceNotStr[str | ThreeDModelWrite], + data_set_id: int | None = None, + metadata: dict[str, str] | None = None, + ) -> ThreeDModel | ThreeDModelList: + """ + `Create new 3d models. `_ + + Args: + name (str | ThreeDModelWrite | SequenceNotStr[str | ThreeDModelWrite]): The name of the 3d model(s) or 3D + model object to create. If a 3D model object is provided, the other arguments are ignored. + data_set_id (int | None): The id of the dataset this 3D model belongs to. + metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. + Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + + Returns: + ThreeDModel | ThreeDModelList: The created 3d model(s). + + Example: + + Create new 3d models: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.models.create(name="My Model", data_set_id=1, metadata={"key1": "value1", "key2": "value2"}) + + Create multiple new 3D Models: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ThreeDModelWrite + >>> client = CogniteClient() + >>> my_model = ThreeDModelWrite(name="My Model", data_set_id=1, metadata={"key1": "value1", "key2": "value2"}) + >>> my_other_model = ThreeDModelWrite(name="My Other Model", data_set_id=1, metadata={"key1": "value1", "key2": "value2"}) + >>> res = client.three_d.models.create([my_model, my_other_model]) + """ + return run_sync( + self.__async_client.three_d.models.create(name=name, data_set_id=data_set_id, metadata=metadata) + ) + + @overload + def update( + self, + item: ThreeDModel | ThreeDModelUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> ThreeDModel: ... + + @overload + def update( + self, + item: Sequence[ThreeDModel | ThreeDModelUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> ThreeDModelList: ... + + def update( + self, + item: ThreeDModel | ThreeDModelUpdate | Sequence[ThreeDModel | ThreeDModelUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> ThreeDModel | ThreeDModelList: + """ + `Update 3d models. `_ + + Args: + item (ThreeDModel | ThreeDModelUpdate | Sequence[ThreeDModel | ThreeDModelUpdate]): ThreeDModel(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + ThreeDModel | ThreeDModelList: Updated ThreeDModel(s) + + Examples: + + Update 3d model that you have fetched. This will perform a full update of the model: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> model = client.three_d.models.retrieve(id=1) + >>> model.name = "New Name" + >>> res = client.three_d.models.update(model) + + Perform a partial update on a 3d model: + + >>> from cognite.client.data_classes import ThreeDModelUpdate + >>> my_update = ThreeDModelUpdate(id=1).name.set("New Name") + >>> res = client.three_d.models.update(my_update) + """ + return run_sync(self.__async_client.three_d.models.update(item=item, mode=mode)) + + def delete(self, id: int | Sequence[int]) -> None: + """ + `Delete 3d models. `_ + + Args: + id (int | Sequence[int]): ID or list of IDs to delete. + + Example: + + Delete 3d model by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.models.delete(id=1) + """ + return run_sync(self.__async_client.three_d.models.delete(id=id)) diff --git a/cognite/client/_sync_api/three_d/revisions.py b/cognite/client/_sync_api/three_d/revisions.py new file mode 100644 index 0000000000..835486321f --- /dev/null +++ b/cognite/client/_sync_api/three_d/revisions.py @@ -0,0 +1,343 @@ +""" +=============================================================================== +0d794d9b77af8607e4e0d3217fae3ad8 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + ThreeDModelRevision, + ThreeDModelRevisionList, + ThreeDModelRevisionUpdate, + ThreeDModelRevisionWrite, + ThreeDNodeList, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class Sync3DRevisionsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, model_id: int, chunk_size: None = None) -> Iterator[ThreeDModelRevision]: ... + + @overload + def __call__(self, model_id: int, chunk_size: int) -> Iterator[ThreeDModelRevisionList]: ... + + def __call__( + self, model_id: int, chunk_size: int | None = None, published: bool = False, limit: int | None = None + ) -> Iterator[ThreeDModelRevision | ThreeDModelRevisionList]: + """ + Iterate over 3d model revisions + + Fetches 3d model revisions as they are iterated over, so you keep a limited number of 3d model revisions in memory. + + Args: + model_id (int): Iterate over revisions for the model with this id. + chunk_size (int | None): Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time. + published (bool): Filter based on whether or not the revision has been published. + limit (int | None): Maximum number of 3d model revisions to return. Defaults to return all items. + + Yields: + ThreeDModelRevision | ThreeDModelRevisionList: yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects. + """ + yield from SyncIterator( + self.__async_client.three_d.revisions( + model_id=model_id, chunk_size=chunk_size, published=published, limit=limit + ) + ) + + def retrieve(self, model_id: int, id: int) -> ThreeDModelRevision | None: + """ + `Retrieve a 3d model revision by id `_ + + Args: + model_id (int): Get the revision under the model with this id. + id (int): Get the model revision with this id. + + Returns: + ThreeDModelRevision | None: The requested 3d model revision. + + Example: + + Retrieve 3d model revision by model id and revision id:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.retrieve(model_id=1, id=1) + """ + return run_sync(self.__async_client.three_d.revisions.retrieve(model_id=model_id, id=id)) + + @overload + def create( + self, model_id: int, revision: ThreeDModelRevision | ThreeDModelRevisionWrite + ) -> ThreeDModelRevision: ... + + @overload + def create( + self, model_id: int, revision: Sequence[ThreeDModelRevision] | Sequence[ThreeDModelRevisionWrite] + ) -> ThreeDModelRevisionList: ... + + def create( + self, + model_id: int, + revision: ThreeDModelRevision + | ThreeDModelRevisionWrite + | Sequence[ThreeDModelRevision] + | Sequence[ThreeDModelRevisionWrite], + ) -> ThreeDModelRevision | ThreeDModelRevisionList: + """ + `Create a revisions for a specified 3d model. `_ + + Args: + model_id (int): Create revisions for this model. + revision (ThreeDModelRevision | ThreeDModelRevisionWrite | Sequence[ThreeDModelRevision] | Sequence[ThreeDModelRevisionWrite]): The revision(s) to create. + + Returns: + ThreeDModelRevision | ThreeDModelRevisionList: The created revision(s) + + Example: + + Create 3d model revision:: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ThreeDModelRevisionWrite + >>> client = CogniteClient() + >>> my_revision = ThreeDModelRevisionWrite(file_id=1) + >>> res = client.three_d.revisions.create(model_id=1, revision=my_revision) + """ + return run_sync(self.__async_client.three_d.revisions.create(model_id=model_id, revision=revision)) + + def list( + self, model_id: int, published: bool = False, limit: int | None = DEFAULT_LIMIT_READ + ) -> ThreeDModelRevisionList: + """ + `List 3d model revisions. `_ + + Args: + model_id (int): List revisions under the model with this id. + published (bool): Filter based on whether or not the revision is published. + limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ThreeDModelRevisionList: The list of 3d model revisions. + + Example: + + List 3d model revisions:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.list(model_id=1, published=True, limit=100) + """ + return run_sync(self.__async_client.three_d.revisions.list(model_id=model_id, published=published, limit=limit)) + + def update( + self, + model_id: int, + item: ThreeDModelRevision + | ThreeDModelRevisionUpdate + | Sequence[ThreeDModelRevision | ThreeDModelRevisionUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> ThreeDModelRevision | ThreeDModelRevisionList: + """ + `Update 3d model revisions. `_ + + Args: + model_id (int): Update the revision under the model with this id. + item (ThreeDModelRevision | ThreeDModelRevisionUpdate | Sequence[ThreeDModelRevision | ThreeDModelRevisionUpdate]): ThreeDModelRevision(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + ThreeDModelRevision | ThreeDModelRevisionList: Updated ThreeDModelRevision(s) + + Examples: + + Update a revision that you have fetched. This will perform a full update of the revision: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> revision = client.three_d.revisions.retrieve(model_id=1, id=1) + >>> revision.status = "New Status" + >>> res = client.three_d.revisions.update(model_id=1, item=revision) + + Perform a partial update on a revision, updating the published property and adding a new field to metadata: + + >>> from cognite.client.data_classes import ThreeDModelRevisionUpdate + >>> my_update = ThreeDModelRevisionUpdate(id=1).published.set(False).metadata.add({"key": "value"}) + >>> res = client.three_d.revisions.update(model_id=1, item=my_update) + """ + return run_sync(self.__async_client.three_d.revisions.update(model_id=model_id, item=item, mode=mode)) + + def delete(self, model_id: int, id: int | Sequence[int]) -> None: + """ + `Delete 3d model revisions. `_ + + Args: + model_id (int): Delete the revision under the model with this id. + id (int | Sequence[int]): ID or list of IDs to delete. + + Example: + + Delete 3d model revision by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.delete(model_id=1, id=1) + """ + return run_sync(self.__async_client.three_d.revisions.delete(model_id=model_id, id=id)) + + def update_thumbnail(self, model_id: int, revision_id: int, file_id: int) -> None: + """ + `Update a revision thumbnail. `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + file_id (int): Id of the thumbnail file in the Files API. + + Example: + + Update revision thumbnail:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.update_thumbnail(model_id=1, revision_id=1, file_id=1) + """ + return run_sync( + self.__async_client.three_d.revisions.update_thumbnail( + model_id=model_id, revision_id=revision_id, file_id=file_id + ) + ) + + def list_nodes( + self, + model_id: int, + revision_id: int, + node_id: int | None = None, + depth: int | None = None, + sort_by_node_id: bool = False, + partitions: int | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> ThreeDNodeList: + """ + `Retrieves a list of nodes from the hierarchy in the 3D Model. `_ + + You can also request a specific subtree with the 'nodeId' query parameter and limit the depth of + the resulting subtree with the 'depth' query parameter. + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + node_id (int | None): ID of the root node of the subtree you request (default is the root node). + depth (int | None): Get sub nodes up to this many levels below the specified node. Depth 0 is the root node. + sort_by_node_id (bool): Returns the nodes in `nodeId` order. + partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ThreeDNodeList: The list of 3d nodes. + + Example: + + List nodes from the hierarchy in the 3d model: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.list_nodes(model_id=1, revision_id=1, limit=10) + """ + return run_sync( + self.__async_client.three_d.revisions.list_nodes( + model_id=model_id, + revision_id=revision_id, + node_id=node_id, + depth=depth, + sort_by_node_id=sort_by_node_id, + partitions=partitions, + limit=limit, + ) + ) + + def filter_nodes( + self, + model_id: int, + revision_id: int, + properties: dict[str, dict[str, SequenceNotStr[str]]] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + partitions: int | None = None, + ) -> ThreeDNodeList: + """ + `List nodes in a revision, filtered by node property values. `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + properties (dict[str, dict[str, SequenceNotStr[str]]] | None): Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter. + limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + + Returns: + ThreeDNodeList: The list of 3d nodes. + + Example: + + Filter nodes from the hierarchy in the 3d model that have one of the values "AB76", "AB77" or "AB78" for property PDMS/Area AND that also have one of the values "PIPE", "BEND" or "PIPESUP" for the property PDMS/Type. + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.filter_nodes(model_id=1, revision_id=1, properties={ "PDMS": { "Area": ["AB76", "AB77", "AB78"], "Type": ["PIPE", "BEND", "PIPESUP"] } }, limit=10) + """ + return run_sync( + self.__async_client.three_d.revisions.filter_nodes( + model_id=model_id, revision_id=revision_id, properties=properties, limit=limit, partitions=partitions + ) + ) + + def list_ancestor_nodes( + self, model_id: int, revision_id: int, node_id: int | None = None, limit: int | None = DEFAULT_LIMIT_READ + ) -> ThreeDNodeList: + """ + `Retrieves a list of ancestor nodes of a given node, including itself, in the hierarchy of the 3D model `_ + + Args: + model_id (int): Id of the model. + revision_id (int): Id of the revision. + node_id (int | None): ID of the node to get the ancestors of. + limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + ThreeDNodeList: The list of 3d nodes. + + Example: + + Get a list of ancestor nodes of a given node: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.three_d.revisions.list_ancestor_nodes(model_id=1, revision_id=1, node_id=5, limit=10) + """ + return run_sync( + self.__async_client.three_d.revisions.list_ancestor_nodes( + model_id=model_id, revision_id=revision_id, node_id=node_id, limit=limit + ) + ) diff --git a/cognite/client/_sync_api/time_series.py b/cognite/client/_sync_api/time_series.py new file mode 100644 index 0000000000..74ce759169 --- /dev/null +++ b/cognite/client/_sync_api/time_series.py @@ -0,0 +1,726 @@ +""" +=============================================================================== +91e6f407c342c348165197e67f08753a +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.time_series import SortSpec +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.datapoints import SyncDatapointsAPI +from cognite.client._sync_api.datapoints_subscriptions import SyncDatapointsSubscriptionAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import TimeSeries, TimeSeriesFilter, TimeSeriesList, TimeSeriesUpdate +from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList +from cognite.client.data_classes.data_modeling import NodeId +from cognite.client.data_classes.filters import Filter +from cognite.client.data_classes.time_series import ( + TimeSeriesProperty, + TimeSeriesWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncTimeSeriesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.data = SyncDatapointsAPI(async_client) + self.subscriptions = SyncDatapointsSubscriptionAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[TimeSeries]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[TimeSeriesList]: ... + + def __call__( + self, + chunk_size: int | None = None, + name: str | None = None, + unit: str | None = None, + unit_external_id: str | None = None, + unit_quantity: str | None = None, + is_string: bool | None = None, + is_step: bool | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + metadata: dict[str, Any] | None = None, + external_id_prefix: str | None = None, + created_time: dict[str, Any] | None = None, + last_updated_time: dict[str, Any] | None = None, + limit: int | None = None, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | None = None, + ) -> Iterator[TimeSeries | TimeSeriesList]: + """ + Iterate over time series + + Fetches time series as they are iterated over, so you keep a limited number of objects in memory. + + Args: + chunk_size (int | None): Number of time series to return in each chunk. Defaults to yielding one time series a time. + name (str | None): Name of the time series. Often referred to as tag. + unit (str | None): Unit of the time series. + unit_external_id (str | None): Filter on unit external ID. + unit_quantity (str | None): Filter on unit quantity. + is_string (bool | None): Whether the time series is a string time series. + is_step (bool | None): Whether the time series is a step (piecewise constant) time series. + asset_ids (Sequence[int] | None): List time series related to these assets. + asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. + asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. + metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit (int | None): Maximum number of time series to return. Defaults to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Yields: + TimeSeries | TimeSeriesList: yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects. + """ + yield from SyncIterator( + self.__async_client.time_series( + chunk_size=chunk_size, + name=name, + unit=unit, + unit_external_id=unit_external_id, + unit_quantity=unit_quantity, + is_string=is_string, + is_step=is_step, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + metadata=metadata, + external_id_prefix=external_id_prefix, + created_time=created_time, + last_updated_time=last_updated_time, + limit=limit, + advanced_filter=advanced_filter, + sort=sort, + ) + ) + + def retrieve( + self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None + ) -> TimeSeries | None: + """ + `Retrieve a single time series by id. `_ + + Args: + id (int | None): ID + external_id (str | None): External ID + instance_id (NodeId | None): Instance ID + + Returns: + TimeSeries | None: Requested time series or None if it does not exist. + + Examples: + + Get time series by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.retrieve(id=1) + + Get time series by external id: + + >>> res = client.time_series.retrieve(external_id="1") + """ + return run_sync( + self.__async_client.time_series.retrieve(id=id, external_id=external_id, instance_id=instance_id) + ) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + instance_ids: Sequence[NodeId] | None = None, + ignore_unknown_ids: bool = False, + ) -> TimeSeriesList: + """ + `Retrieve multiple time series by id. `_ + + Args: + ids (Sequence[int] | None): IDs + external_ids (SequenceNotStr[str] | None): External IDs + instance_ids (Sequence[NodeId] | None): Instance IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + TimeSeriesList: The requested time series. + + Examples: + + Get time series by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.retrieve_multiple(ids=[1, 2, 3]) + + Get time series by external id: + + >>> res = client.time_series.retrieve_multiple(external_ids=["abc", "def"]) + """ + return run_sync( + self.__async_client.time_series.retrieve_multiple( + ids=ids, external_ids=external_ids, instance_ids=instance_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def aggregate_count( + self, + advanced_filter: Filter | dict[str, Any] | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Count of time series matching the specified filters and search. `_ + + Args: + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count. + filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down time series to count requiring exact match. + + Returns: + int: The number of time series matching the specified filters and search. + + Examples: + + Count the number of time series in your CDF project: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> count = client.time_series.aggregate_count() + + Count the number of numeric time series in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> is_numeric = filters.Equals(TimeSeriesProperty.is_string, False) + >>> count = client.time_series.aggregate_count(advanced_filter=is_numeric) + """ + return run_sync(self.__async_client.time_series.aggregate_count(advanced_filter=advanced_filter, filter=filter)) + + def aggregate_cardinality_values( + self, + property: TimeSeriesProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate property count for time series. `_ + + Args: + property (TimeSeriesProperty | str | list[str]): The property to count the cardinality of. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of different units used for time series in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> client = CogniteClient() + >>> unit_count = client.time_series.aggregate_cardinality_values(TimeSeriesProperty.unit) + + Count the number of timezones (metadata key) for time series with the word "critical" in the description + in your CDF project, but exclude timezones from america: + + >>> from cognite.client.data_classes import filters, aggregations as aggs + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> not_america = aggs.Not(aggs.Prefix("america")) + >>> is_critical = filters.Search(TimeSeriesProperty.description, "critical") + >>> timezone_count = client.time_series.aggregate_cardinality_values( + ... TimeSeriesProperty.metadata_key("timezone"), + ... advanced_filter=is_critical, + ... aggregate_filter=not_america) + """ + return run_sync( + self.__async_client.time_series.aggregate_cardinality_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_cardinality_properties( + self, + path: TimeSeriesProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + ) -> int: + """ + `Find approximate paths count for time series. `_ + + Args: + path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + Returns: + int: The number of properties matching the specified filters and search. + + Examples: + + Count the number of metadata keys in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> client = CogniteClient() + >>> key_count = client.time_series.aggregate_cardinality_properties(TimeSeriesProperty.metadata) + """ + return run_sync( + self.__async_client.time_series.aggregate_cardinality_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_values( + self, + property: TimeSeriesProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique properties with counts for time series. `_ + + Args: + property (TimeSeriesProperty | str | list[str]): The property to group by. + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + + Returns: + UniqueResultList: List of unique values of time series matching the specified filters and search. + + Examples: + + Get the timezones (metadata key) with count for your time series in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> client = CogniteClient() + >>> result = client.time_series.aggregate_unique_values(TimeSeriesProperty.metadata_key("timezone")) + >>> print(result.unique) + + Get the different units with count used for time series created after 2020-01-01 in your CDF project: + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> from cognite.client.utils import timestamp_to_ms + >>> from datetime import datetime + >>> created_after_2020 = filters.Range(TimeSeriesProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.time_series.aggregate_unique_values(TimeSeriesProperty.unit, advanced_filter=created_after_2020) + >>> print(result.unique) + + Get the different units with count for time series updated after 2020-01-01 in your CDF project, but exclude all units that + start with "test": + + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> from cognite.client.data_classes import aggregations as aggs, filters + >>> not_test = aggs.Not(aggs.Prefix("test")) + >>> created_after_2020 = filters.Range(TimeSeriesProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> result = client.time_series.aggregate_unique_values(TimeSeriesProperty.unit, advanced_filter=created_after_2020, aggregate_filter=not_test) + >>> print(result.unique) + """ + return run_sync( + self.__async_client.time_series.aggregate_unique_values( + property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + def aggregate_unique_properties( + self, + path: TimeSeriesProperty | str | list[str], + advanced_filter: Filter | dict[str, Any] | None = None, + aggregate_filter: AggregationFilter | dict[str, Any] | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + ) -> UniqueResultList: + """ + `Get unique paths with counts for time series. `_ + + Args: + path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. + aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + + Returns: + UniqueResultList: List of unique values of time series matching the specified filters and search. + + Examples: + + Get the metadata keys with count for your time series in your CDF project: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty + >>> client = CogniteClient() + >>> result = client.time_series.aggregate_unique_values(TimeSeriesProperty.metadata) + """ + return run_sync( + self.__async_client.time_series.aggregate_unique_properties( + path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter + ) + ) + + @overload + def create(self, time_series: Sequence[TimeSeries] | Sequence[TimeSeriesWrite]) -> TimeSeriesList: ... + + @overload + def create(self, time_series: TimeSeries | TimeSeriesWrite) -> TimeSeries: ... + + def create( + self, time_series: TimeSeries | TimeSeriesWrite | Sequence[TimeSeries] | Sequence[TimeSeriesWrite] + ) -> TimeSeries | TimeSeriesList: + """ + `Create one or more time series. `_ + + Args: + time_series (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries] | Sequence[TimeSeriesWrite]): TimeSeries or list of TimeSeries to create. + + Returns: + TimeSeries | TimeSeriesList: The created time series. + + Examples: + + Create a new time series: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TimeSeriesWrite + >>> client = CogniteClient() + >>> ts = client.time_series.create(TimeSeriesWrite(name="my_ts", data_set_id=123, external_id="foo")) + """ + return run_sync(self.__async_client.time_series.create(time_series=time_series)) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete one or more time series. `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete time series by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.time_series.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.time_series.delete( + id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def update( + self, + item: Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TimeSeriesList: ... + + @overload + def update( + self, + item: TimeSeries | TimeSeriesWrite | TimeSeriesUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TimeSeries: ... + + def update( + self, + item: TimeSeries + | TimeSeriesWrite + | TimeSeriesUpdate + | Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TimeSeries | TimeSeriesList: + """ + `Update one or more time series. `_ + + Args: + item (TimeSeries | TimeSeriesWrite | TimeSeriesUpdate | Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate]): Time series to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + TimeSeries | TimeSeriesList: Updated time series. + + Examples: + + Update a time series that you have fetched. This will perform a full update of the time series: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.retrieve(id=1) + >>> res.description = "New description" + >>> res = client.time_series.update(res) + + Perform a partial update on a time series, updating the description and adding a new field to metadata: + + >>> from cognite.client.data_classes import TimeSeriesUpdate + >>> my_update = TimeSeriesUpdate(id=1).description.set("New description").metadata.add({"key": "value"}) + >>> res = client.time_series.update(my_update) + + Perform a partial update on a time series by instance id: + + >>> from cognite.client.data_classes import TimeSeriesUpdate + >>> from cognite.client.data_classes.data_modeling import NodeId + + >>> my_update = ( + ... TimeSeriesUpdate(instance_id=NodeId("test", "hello")) + ... .external_id.set("test:hello") + ... .metadata.add({"test": "hello"}) + ... ) + >>> client.time_series.update(my_update) + """ + return run_sync(self.__async_client.time_series.update(item=item, mode=mode)) + + @overload + def upsert( + self, item: Sequence[TimeSeries | TimeSeriesWrite], mode: Literal["patch", "replace"] = "patch" + ) -> TimeSeriesList: ... + + @overload + def upsert(self, item: TimeSeries | TimeSeriesWrite, mode: Literal["patch", "replace"] = "patch") -> TimeSeries: ... + + def upsert( + self, + item: TimeSeries | TimeSeriesWrite | Sequence[TimeSeries | TimeSeriesWrite], + mode: Literal["patch", "replace"] = "patch", + ) -> TimeSeries | TimeSeriesList: + """ + Upsert time series, i.e., update if it exists, and create if it does not exist. + Note this is a convenience method that handles the upserting for you by first calling update on all items, + and if any of them fail because they do not exist, it will create them instead. + + For more details, see :ref:`appendix-upsert`. + + Args: + item (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries | TimeSeriesWrite]): TimeSeries or list of TimeSeries to upsert. + mode (Literal['patch', 'replace']): Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + + Returns: + TimeSeries | TimeSeriesList: The upserted time series(s). + + Examples: + + Upsert for TimeSeries: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TimeSeriesWrite + >>> client = CogniteClient() + >>> existing_time_series = client.time_series.retrieve(id=1) + >>> existing_time_series.description = "New description" + >>> new_time_series = TimeSeriesWrite(external_id="new_timeSeries", description="New timeSeries") + >>> res = client.time_series.upsert([existing_time_series, new_time_series], mode="replace") + """ + return run_sync(self.__async_client.time_series.upsert(item=item, mode=mode)) + + def search( + self, + name: str | None = None, + description: str | None = None, + query: str | None = None, + filter: TimeSeriesFilter | dict[str, Any] | None = None, + limit: int = DEFAULT_LIMIT_READ, + ) -> TimeSeriesList: + """ + `Search for time series. `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. + + Args: + name (str | None): Prefix and fuzzy search on name. + description (str | None): Prefix and fuzzy search on description. + query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter (TimeSeriesFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. + limit (int): Max number of results to return. + + Returns: + TimeSeriesList: List of requested time series. + + Examples: + + Search for a time series: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.search(name="some name") + + Search for all time series connected to asset with id 123: + + >>> res = client.time_series.search(filter={"asset_ids":[123]}) + """ + return run_sync( + self.__async_client.time_series.search( + name=name, description=description, query=query, filter=filter, limit=limit + ) + ) + + def list( + self, + name: str | None = None, + unit: str | None = None, + unit_external_id: str | None = None, + unit_quantity: str | None = None, + is_string: bool | None = None, + is_step: bool | None = None, + asset_ids: Sequence[int] | None = None, + asset_external_ids: SequenceNotStr[str] | None = None, + asset_subtree_ids: int | Sequence[int] | None = None, + asset_subtree_external_ids: str | SequenceNotStr[str] | None = None, + data_set_ids: int | Sequence[int] | None = None, + data_set_external_ids: str | SequenceNotStr[str] | None = None, + metadata: dict[str, Any] | None = None, + external_id_prefix: str | None = None, + created_time: dict[str, Any] | None = None, + last_updated_time: dict[str, Any] | None = None, + partitions: int | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + advanced_filter: Filter | dict[str, Any] | None = None, + sort: SortSpec | list[SortSpec] | TimeSeriesProperty | None = None, + ) -> TimeSeriesList: + """ + `List time series `_ + + Args: + name (str | None): Name of the time series. Often referred to as tag. + unit (str | None): Unit of the time series. + unit_external_id (str | None): Filter on unit external ID. + unit_quantity (str | None): Filter on unit quantity. + is_string (bool | None): Whether the time series is a string time series. + is_step (bool | None): Whether the time series is a step (piecewise constant) time series. + asset_ids (Sequence[int] | None): List time series related to these assets. + asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. + asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. + metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value + external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort (SortSpec | list[SortSpec] | TimeSeriesProperty | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + + Returns: + TimeSeriesList: The requested time series. + + .. note:: + When using `partitions`, there are few considerations to keep in mind: + * `limit` has to be set to `None` (or `-1`). + * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status. + * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed. + + Examples: + + List time series: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.time_series.list(limit=5) + + Iterate over time series, one-by-one: + + >>> for ts in client.time_series(): + ... ts # do something with the time series + + Iterate over chunks of time series to reduce memory load: + + >>> for ts_list in client.time_series(chunk_size=2500): + ... ts_list # do something with the time series + + Using advanced filter, find all time series that have a metadata key 'timezone' starting with 'Europe', + and sort by external id ascending: + + >>> from cognite.client.data_classes import filters + >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe") + >>> res = client.time_series.list(advanced_filter=in_timezone, sort=("external_id", "asc")) + + Note that you can check the API documentation above to see which properties you can filter on + with which filters. + + To make it easier to avoid spelling mistakes and easier to look up available properties + for filtering and sorting, you can also use the `TimeSeriesProperty` and `SortableTimeSeriesProperty` Enums. + + >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.time_series import TimeSeriesProperty, SortableTimeSeriesProperty + >>> in_timezone = filters.Prefix(TimeSeriesProperty.metadata_key("timezone"), "Europe") + >>> res = client.time_series.list( + ... advanced_filter=in_timezone, + ... sort=(SortableTimeSeriesProperty.external_id, "asc")) + + Combine filter and advanced filter: + + >>> from cognite.client.data_classes import filters + >>> not_instrument_lvl5 = filters.And( + ... filters.ContainsAny("labels", ["Level5"]), + ... filters.Not(filters.ContainsAny("labels", ["Instrument"])) + ... ) + >>> res = client.time_series.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5) + """ + return run_sync( + self.__async_client.time_series.list( + name=name, + unit=unit, + unit_external_id=unit_external_id, + unit_quantity=unit_quantity, + is_string=is_string, + is_step=is_step, + asset_ids=asset_ids, + asset_external_ids=asset_external_ids, + asset_subtree_ids=asset_subtree_ids, + asset_subtree_external_ids=asset_subtree_external_ids, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + metadata=metadata, + external_id_prefix=external_id_prefix, + created_time=created_time, + last_updated_time=last_updated_time, + partitions=partitions, + limit=limit, + advanced_filter=advanced_filter, + sort=sort, + ) + ) diff --git a/cognite/client/_sync_api/transformations/__init__.py b/cognite/client/_sync_api/transformations/__init__.py new file mode 100644 index 0000000000..3e596de50a --- /dev/null +++ b/cognite/client/_sync_api/transformations/__init__.py @@ -0,0 +1,535 @@ +""" +=============================================================================== +8a906daccdda7758e212ecd40d2ec2c4 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Any, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.transformations.jobs import SyncTransformationJobsAPI +from cognite.client._sync_api.transformations.notifications import SyncTransformationNotificationsAPI +from cognite.client._sync_api.transformations.schedules import SyncTransformationSchedulesAPI +from cognite.client._sync_api.transformations.schema import SyncTransformationSchemaAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import Transformation, TransformationJob, TransformationList +from cognite.client.data_classes.shared import TimestampRange +from cognite.client.data_classes.transformations import ( + TagsFilter, + TransformationPreviewResult, + TransformationUpdate, + TransformationWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncTransformationsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.jobs = SyncTransformationJobsAPI(async_client) + self.schedules = SyncTransformationSchedulesAPI(async_client) + self.schema = SyncTransformationSchemaAPI(async_client) + self.notifications = SyncTransformationNotificationsAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Transformation]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[TransformationList]: ... + + def __call__( + self, + chunk_size: int | None = None, + include_public: bool = True, + name_regex: str | None = None, + query_regex: str | None = None, + destination_type: str | None = None, + conflict_mode: str | None = None, + cdf_project_name: str | None = None, + has_blocked_error: bool | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + data_set_ids: int | list[int] | None = None, + data_set_external_ids: str | list[str] | None = None, + tags: TagsFilter | None = None, + limit: int | None = None, + ) -> Iterator[Transformation | TransformationList]: + """ + Iterate over transformations + + Args: + chunk_size (int | None): Number of transformations to return in each chunk. Defaults to yielding one transformation a time. + include_public (bool): Whether public transformations should be included in the results. (default true). + name_regex (str | None): Regex expression to match the transformation name + query_regex (str | None): Regex expression to match the transformation query + destination_type (str | None): Transformation destination resource name to filter by. + conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name (str | None): Project name to filter by configured source and destination project + has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). + data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). + tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit (int | None): Limits the number of results to be returned. Defaults to yielding all transformations. + + Yields: + Transformation | TransformationList: Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time. + """ + yield from SyncIterator( + self.__async_client.transformations( + chunk_size=chunk_size, + include_public=include_public, + name_regex=name_regex, + query_regex=query_regex, + destination_type=destination_type, + conflict_mode=conflict_mode, + cdf_project_name=cdf_project_name, + has_blocked_error=has_blocked_error, + created_time=created_time, + last_updated_time=last_updated_time, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + tags=tags, + limit=limit, + ) + ) + + @overload + def create(self, transformation: Transformation | TransformationWrite) -> Transformation: ... + + @overload + def create( + self, transformation: Sequence[Transformation] | Sequence[TransformationWrite] + ) -> TransformationList: ... + + def create( + self, + transformation: Transformation | TransformationWrite | Sequence[Transformation] | Sequence[TransformationWrite], + ) -> Transformation | TransformationList: + """ + `Create one or more transformations. `_ + + Args: + transformation (Transformation | TransformationWrite | Sequence[Transformation] | Sequence[TransformationWrite]): Transformation or list of transformations to create. + + Returns: + Transformation | TransformationList: Created transformation(s) + + Examples: + + Create new transformations: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TransformationWrite, TransformationDestination + >>> from cognite.client.data_classes.transformations.common import ViewInfo, EdgeType, DataModelInfo + >>> client = CogniteClient() + >>> transformations = [ + >>> TransformationWrite( + >>> external_id="transformation1", + >>> name="transformation1", + >>> ignore_null_fields=False, + >>> destination=TransformationDestination.assets() + >>> ), + >>> TransformationWrite( + >>> external_id="transformation2", + >>> name="transformation2", + >>> ignore_null_fields=False, + >>> destination=TransformationDestination.raw("myDatabase", "myTable") + >>> ), + >>> TransformationWrite( + >>> external_id="transformation3", + >>> name="transformation3", + >>> ignore_null_fields=False, + >>> view = ViewInfo(space="TypeSpace", external_id="TypeExtId", version="version"), + >>> destination=TransformationDestination.nodes(view, "InstanceSpace") + >>> ), + >>> TransformationWrite( + >>> external_id="transformation4", + >>> name="transformation4", + >>> ignore_null_fields=False, + >>> view = ViewInfo(space="TypeSpace", external_id="TypeExtId", version="version"), + >>> destination=TransformationDestination.edges(view, "InstanceSpace") + >>> ), + >>> TransformationWrite( + >>> external_id="transformation5", + >>> name="transformation5", + >>> ignore_null_fields=False, + >>> edge_type = EdgeType(space="TypeSpace", external_id="TypeExtId"), + >>> destination=TransformationDestination.edges(edge_type,"InstanceSpace") + >>> ), + >>> TransformationWrite( + >>> external_id="transformation6", + >>> name="transformation6", + >>> ignore_null_fields=False, + >>> data_model = DataModelInfo(space="modelSpace", external_id="modelExternalId",version="modelVersion",destination_type="viewExternalId"), + >>> destination=TransformationDestination.instances(data_model,"InstanceSpace") + >>> ), + >>> TransformationWrite( + >>> external_id="transformation7", + >>> name="transformation7", + >>> ignore_null_fields=False, + >>> data_model = DataModelInfo(space="modelSpace", external_id="modelExternalId",version="modelVersion",destination_type="viewExternalId", destination_relationship_from_type="connectionPropertyName"), + >>> destination=TransformationDestination.instances(data_model,"InstanceSpace") + >>> ), + >>> ] + >>> res = client.transformations.create(transformations) + """ + return run_sync(self.__async_client.transformations.create(transformation=transformation)) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete one or more transformations. `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids. + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Example: + + Delete transformations by id or external id:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.transformations.delete(id=[1,2,3], external_id="function3") + """ + return run_sync( + self.__async_client.transformations.delete( + id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list( + self, + include_public: bool = True, + name_regex: str | None = None, + query_regex: str | None = None, + destination_type: str | None = None, + conflict_mode: str | None = None, + cdf_project_name: str | None = None, + has_blocked_error: bool | None = None, + created_time: dict[str, Any] | TimestampRange | None = None, + last_updated_time: dict[str, Any] | TimestampRange | None = None, + data_set_ids: int | list[int] | None = None, + data_set_external_ids: str | list[str] | None = None, + tags: TagsFilter | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> TransformationList: + """ + `List all transformations. `_ + + Args: + include_public (bool): Whether public transformations should be included in the results. (default true). + name_regex (str | None): Regex expression to match the transformation name + query_regex (str | None): Regex expression to match the transformation query + destination_type (str | None): Transformation destination resource name to filter by. + conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name (str | None): Project name to filter by configured source and destination project + has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. + created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps + data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). + data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). + tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + + Returns: + TransformationList: List of transformations + + Example: + + List transformations:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> transformations_list = client.transformations.list() + """ + return run_sync( + self.__async_client.transformations.list( + include_public=include_public, + name_regex=name_regex, + query_regex=query_regex, + destination_type=destination_type, + conflict_mode=conflict_mode, + cdf_project_name=cdf_project_name, + has_blocked_error=has_blocked_error, + created_time=created_time, + last_updated_time=last_updated_time, + data_set_ids=data_set_ids, + data_set_external_ids=data_set_external_ids, + tags=tags, + limit=limit, + ) + ) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> Transformation | None: + """ + `Retrieve a single transformation by id. `_ + + Args: + id (int | None): ID + external_id (str | None): No description. + + Returns: + Transformation | None: Requested transformation or None if it does not exist. + + Examples: + + Get transformation by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.retrieve(id=1) + + Get transformation by external id: + + >>> res = client.transformations.retrieve(external_id="1") + """ + return run_sync(self.__async_client.transformations.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> TransformationList: + """ + `Retrieve multiple transformations. `_ + + Args: + ids (Sequence[int] | None): List of ids to retrieve. + external_ids (SequenceNotStr[str] | None): List of external ids to retrieve. + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + TransformationList: Requested transformation or None if it does not exist. + + Examples: + + Get multiple transformations: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.retrieve_multiple(ids=[1,2,3], external_ids=['transform-1','transform-2']) + """ + return run_sync( + self.__async_client.transformations.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def update( + self, + item: Transformation | TransformationWrite | TransformationUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Transformation: ... + + @overload + def update( + self, + item: Sequence[Transformation | TransformationWrite | TransformationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TransformationList: ... + + def update( + self, + item: Transformation + | TransformationWrite + | TransformationUpdate + | Sequence[Transformation | TransformationWrite | TransformationUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> Transformation | TransformationList: + """ + `Update one or more transformations `_ + + Args: + item (Transformation | TransformationWrite | TransformationUpdate | Sequence[Transformation | TransformationWrite | TransformationUpdate]): Transformation(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Transformation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + Transformation | TransformationList: Updated transformation(s) + + Examples: + + Update a transformation that you have fetched. This will perform a full update of the transformation: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> transformation = client.transformations.retrieve(id=1) + >>> transformation.query = "SELECT * FROM _cdf.assets" + >>> res = client.transformations.update(transformation) + + Perform a partial update on a transformation, updating the query and making it private: + + >>> from cognite.client.data_classes import TransformationUpdate + >>> my_update = TransformationUpdate(id=1).query.set("SELECT * FROM _cdf.assets").is_public.set(False) + >>> res = client.transformations.update(my_update) + + Update the session used for reading (source) and writing (destination) when authenticating for all + transformations in a given data set: + + >>> from cognite.client.data_classes import NonceCredentials + >>> to_update = client.transformations.list(data_set_external_ids=["foo"]) + >>> new_session = client.iam.sessions.create() + >>> new_nonce = NonceCredentials( + ... session_id=new_session.id, + ... nonce=new_session.nonce, + ... cdf_project_name=client.config.project + ... ) + >>> for tr in to_update: + ... tr.source_nonce = new_nonce + ... tr.destination_nonce = new_nonce + >>> res = client.transformations.update(to_update) + """ + return run_sync(self.__async_client.transformations.update(item=item, mode=mode)) + + def run( + self, + transformation_id: int | None = None, + transformation_external_id: str | None = None, + wait: bool = True, + timeout: float | None = None, + ) -> TransformationJob: + """ + `Run a transformation. `_ + + Args: + transformation_id (int | None): Transformation internal id + transformation_external_id (str | None): Transformation external id + wait (bool): Wait until the transformation run is finished. Defaults to True. + timeout (float | None): maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Won't have any effect if wait is False. + + Returns: + TransformationJob: Created transformation job + + Examples: + + Run transformation to completion by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> + >>> res = client.transformations.run(transformation_id = 1) + + Start running transformation by id: + + >>> res = client.transformations.run(transformation_id = 1, wait = False) + """ + return run_sync( + self.__async_client.transformations.run( + transformation_id=transformation_id, + transformation_external_id=transformation_external_id, + wait=wait, + timeout=timeout, + ) + ) + + def cancel(self, transformation_id: int | None = None, transformation_external_id: str | None = None) -> None: + """ + `Cancel a running transformation. `_ + + Args: + transformation_id (int | None): Transformation internal id + transformation_external_id (str | None): Transformation external id + + Examples: + + Wait transformation for 1 minute and cancel if still running: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TransformationJobStatus + >>> client = CogniteClient() + >>> + >>> res = client.transformations.run(transformation_id=1, timeout=60.0) + >>> if res.status == TransformationJobStatus.RUNNING: + >>> res.cancel() + """ + return run_sync( + self.__async_client.transformations.cancel( + transformation_id=transformation_id, transformation_external_id=transformation_external_id + ) + ) + + def preview( + self, + query: str | None = None, + convert_to_string: bool = False, + limit: int | None = 100, + source_limit: int | None = 100, + infer_schema_limit: int | None = 10000, + timeout: int | None = 240, + ) -> TransformationPreviewResult: + """ + `Preview the result of a query. `_ + + Args: + query (str | None): SQL query to run for preview. + convert_to_string (bool): Stringify values in the query results, default is False. + limit (int | None): Maximum number of rows to return in the final result, default is 100. + source_limit (int | None): Maximum number of items to read from the data source or None to run without limit, default is 100. + infer_schema_limit (int | None): Limit for how many rows that are used for inferring result schema, default is 10 000. + timeout (int | None): Number of seconds to wait before cancelling a query. The default, and maximum, is 240. + + Returns: + TransformationPreviewResult: Result of the executed query + + Examples: + + Preview transformation results as schema and list of rows: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> + >>> query_result = client.transformations.preview(query="select * from _cdf.assets") + + Preview transformation results as pandas dataframe: + + >>> df = client.transformations.preview(query="select * from _cdf.assets").to_pandas() + + Notice that the results are limited both by the `limit` and `source_limit` parameters. If you have + a query that converts one source row to one result row, you may need to increase the `source_limit`. + For example, given that you have a query that reads from a raw table with 10,903 rows + + >>> result = client.transformations.preview(query="select * from my_raw_db.my_raw_table", limit=None) + >>> print(result.results) # 100 + + To get all rows, you also need to set the `source_limit` to None: + + >>> result = client.transformations.preview(query="select * from my_raw_db.my_raw_table", limit=None, source_limit=None) + >>> print(result.results) # 10903 + """ + return run_sync( + self.__async_client.transformations.preview( + query=query, + convert_to_string=convert_to_string, + limit=limit, + source_limit=source_limit, + infer_schema_limit=infer_schema_limit, + timeout=timeout, + ) + ) diff --git a/cognite/client/_sync_api/transformations/jobs.py b/cognite/client/_sync_api/transformations/jobs.py new file mode 100644 index 0000000000..398baf537d --- /dev/null +++ b/cognite/client/_sync_api/transformations/jobs.py @@ -0,0 +1,132 @@ +""" +=============================================================================== +a80fccb60645595b2eb2e245964f25fb +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Sequence + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + TransformationJob, + TransformationJobList, + TransformationJobMetricList, +) +from cognite.client.utils._async_helpers import run_sync + + +class SyncTransformationJobsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list( + self, + limit: int | None = DEFAULT_LIMIT_READ, + transformation_id: int | None = None, + transformation_external_id: str | None = None, + ) -> TransformationJobList: + """ + `List all running transformation jobs. `_ + + Args: + limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + transformation_id (int | None): Filters the results by the internal transformation id. + transformation_external_id (str | None): Filters the results by the external transformation id. + + Returns: + TransformationJobList: List of transformation jobs + + Example: + + List transformation jobs:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> transformation_jobs_list = client.transformations.jobs.list() + + List transformation jobs of a single transformation:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> transformation_jobs_list = client.transformations.jobs.list(transformation_id=1) + """ + return run_sync( + self.__async_client.transformations.jobs.list( + limit=limit, transformation_id=transformation_id, transformation_external_id=transformation_external_id + ) + ) + + def retrieve(self, id: int) -> TransformationJob | None: + """ + `Retrieve a single transformation job by id. `_ + + Args: + id (int): Job internal Id + + Returns: + TransformationJob | None: Requested transformation job or None if it does not exist. + + Examples: + + Get transformation job by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.jobs.retrieve(id=1) + """ + return run_sync(self.__async_client.transformations.jobs.retrieve(id=id)) + + def list_metrics(self, id: int) -> TransformationJobMetricList: + """ + `List the metrics of a single transformation job. `_ + + Args: + id (int): Job internal Id + + Returns: + TransformationJobMetricList: List of updated metrics of the given job. + + Examples: + + Get metrics by transformation job id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.jobs.list_metrics(id=1) + """ + return run_sync(self.__async_client.transformations.jobs.list_metrics(id=id)) + + def retrieve_multiple(self, ids: Sequence[int], ignore_unknown_ids: bool = False) -> TransformationJobList: + """ + `Retrieve multiple transformation jobs by id. `_ + + Args: + ids (Sequence[int]): Job internal Ids + ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + + Returns: + TransformationJobList: Requested transformation jobs. + + Examples: + + Get jobs by id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.jobs.retrieve_multiple(ids=[1, 2, 3]) + """ + return run_sync( + self.__async_client.transformations.jobs.retrieve_multiple(ids=ids, ignore_unknown_ids=ignore_unknown_ids) + ) diff --git a/cognite/client/_sync_api/transformations/notifications.py b/cognite/client/_sync_api/transformations/notifications.py new file mode 100644 index 0000000000..046fad1913 --- /dev/null +++ b/cognite/client/_sync_api/transformations/notifications.py @@ -0,0 +1,165 @@ +""" +=============================================================================== +cbc3cbea063b7c98d3966a98835d337a +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + TransformationNotification, + TransformationNotificationList, + TransformationNotificationWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync + + +class SyncTransformationNotificationsAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[TransformationNotification]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[TransformationNotificationList]: ... + + def __call__( + self, + chunk_size: int | None = None, + transformation_id: int | None = None, + transformation_external_id: str | None = None, + destination: str | None = None, + limit: int | None = None, + ) -> Iterator[TransformationNotification | TransformationNotificationList]: + """ + Iterate over transformation notifications + + Args: + chunk_size (int | None): Number of notifications to yield per chunk. Defaults to yielding notifications one by one. + transformation_id (int | None): Filter by transformation internal numeric ID. + transformation_external_id (str | None): Filter by transformation externalId. + destination (str | None): Filter by notification destination. + limit (int | None): Limits the number of results to be returned. Defaults to yielding all notifications. + + Yields: + TransformationNotification | TransformationNotificationList: Yields notifications one by one if chunk_size is None, otherwise yields lists of notifications. + """ + yield from SyncIterator( + self.__async_client.transformations.notifications( + chunk_size=chunk_size, + transformation_id=transformation_id, + transformation_external_id=transformation_external_id, + destination=destination, + limit=limit, + ) + ) + + @overload + def create( + self, notification: TransformationNotification | TransformationNotificationWrite + ) -> TransformationNotification: ... + + @overload + def create( + self, notification: Sequence[TransformationNotification] | Sequence[TransformationNotificationWrite] + ) -> TransformationNotificationList: ... + + def create( + self, + notification: TransformationNotification + | TransformationNotificationWrite + | Sequence[TransformationNotification] + | Sequence[TransformationNotificationWrite], + ) -> TransformationNotification | TransformationNotificationList: + """ + `Subscribe for notifications on the transformation errors. `_ + + Args: + notification (TransformationNotification | TransformationNotificationWrite | Sequence[TransformationNotification] | Sequence[TransformationNotificationWrite]): Notification or list of notifications to create. + + Returns: + TransformationNotification | TransformationNotificationList: Created notification(s) + + Examples: + + Create new notifications: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TransformationNotification + >>> client = CogniteClient() + >>> notifications = [TransformationNotification(transformation_id = 1, destination="my@email.com"), TransformationNotification(transformation_external_id="transformation2", destination="other@email.com"))] + >>> res = client.transformations.notifications.create(notifications) + """ + return run_sync(self.__async_client.transformations.notifications.create(notification=notification)) + + def list( + self, + transformation_id: int | None = None, + transformation_external_id: str | None = None, + destination: str | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> TransformationNotificationList: + """ + `List notification subscriptions. `_ + + Args: + transformation_id (int | None): Filter by transformation internal numeric ID. + transformation_external_id (str | None): Filter by transformation externalId. + destination (str | None): Filter by notification destination. + limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + + Returns: + TransformationNotificationList: List of transformation notifications + + Example: + + List all notifications:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> notifications_list = client.transformations.notifications.list() + + List all notifications by transformation id:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> notifications_list = client.transformations.notifications.list(transformation_id = 1) + """ + return run_sync( + self.__async_client.transformations.notifications.list( + transformation_id=transformation_id, + transformation_external_id=transformation_external_id, + destination=destination, + limit=limit, + ) + ) + + def delete(self, id: int | Sequence[int] | None = None) -> None: + """ + `Deletes the specified notification subscriptions on the transformation. Does nothing when the subscriptions already don't exist `_ + + Args: + id (int | Sequence[int] | None): Id or list of transformation notification ids + + Examples: + + Delete schedules by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.transformations.notifications.delete(id=[1,2,3]) + """ + return run_sync(self.__async_client.transformations.notifications.delete(id=id)) diff --git a/cognite/client/_sync_api/transformations/schedules.py b/cognite/client/_sync_api/transformations/schedules.py new file mode 100644 index 0000000000..1c05bec005 --- /dev/null +++ b/cognite/client/_sync_api/transformations/schedules.py @@ -0,0 +1,259 @@ +""" +=============================================================================== +4e9b309cc7ad75e8c178dbc87a8a62ac +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + TransformationSchedule, + TransformationScheduleList, + TransformationScheduleUpdate, + TransformationScheduleWrite, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncTransformationSchedulesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[TransformationSchedule]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[TransformationScheduleList]: ... + + def __call__( + self, chunk_size: int | None = None, include_public: bool = True, limit: int | None = None + ) -> Iterator[TransformationSchedule | TransformationScheduleList]: + """ + Iterate over transformation schedules + + Args: + chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + include_public (bool): Whether public transformations should be included in the results. (default true). + limit (int | None): Limits the number of results to be returned. Defaults to yielding all schedules. + + Yields: + TransformationSchedule | TransformationScheduleList: Yields schedules one by one if chunk_size is None, otherwise yields lists of schedules. + """ + yield from SyncIterator( + self.__async_client.transformations.schedules( + chunk_size=chunk_size, include_public=include_public, limit=limit + ) + ) + + @overload + def create(self, schedule: TransformationSchedule | TransformationScheduleWrite) -> TransformationSchedule: ... + + @overload + def create( + self, schedule: Sequence[TransformationSchedule] | Sequence[TransformationScheduleWrite] + ) -> TransformationScheduleList: ... + + def create( + self, + schedule: TransformationSchedule + | TransformationScheduleWrite + | Sequence[TransformationSchedule] + | Sequence[TransformationScheduleWrite], + ) -> TransformationSchedule | TransformationScheduleList: + """ + `Schedule the specified transformation with the specified configuration(s). `_ + + Args: + schedule (TransformationSchedule | TransformationScheduleWrite | Sequence[TransformationSchedule] | Sequence[TransformationScheduleWrite]): Configuration or list of configurations of the schedules to create. + + Returns: + TransformationSchedule | TransformationScheduleList: Created schedule(s) + + Examples: + + Create new schedules: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TransformationScheduleWrite + >>> client = CogniteClient() + >>> schedules = [TransformationScheduleWrite(id = 1, interval = "0 * * * *"), TransformationScheduleWrite(external_id="transformation2", interval = "5 * * * *"))] + >>> res = client.transformations.schedules.create(schedules) + """ + return run_sync(self.__async_client.transformations.schedules.create(schedule=schedule)) + + def retrieve(self, id: int | None = None, external_id: str | None = None) -> TransformationSchedule | None: + """ + `Retrieve a single transformation schedule by the id or external id of its transformation. `_ + + Args: + id (int | None): transformation ID + external_id (str | None): transformation External ID + + Returns: + TransformationSchedule | None: Requested transformation schedule or None if it does not exist. + + Examples: + + Get transformation schedule by transformation id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.schedules.retrieve(id=1) + + Get transformation schedule by transformation external id: + + >>> res = client.transformations.schedules.retrieve(external_id="1") + """ + return run_sync(self.__async_client.transformations.schedules.retrieve(id=id, external_id=external_id)) + + def retrieve_multiple( + self, + ids: Sequence[int] | None = None, + external_ids: SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> TransformationScheduleList: + """ + `Retrieve multiple transformation schedules by the ids or external ids of the corresponding transformations. `_ + + Args: + ids (Sequence[int] | None): transformation IDs + external_ids (SequenceNotStr[str] | None): transformation External IDs + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Returns: + TransformationScheduleList: Requested transformation schedules. + + Examples: + + Get transformation schedules by transformation ids: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.transformations.schedules.retrieve_multiple(ids=[1, 2, 3]) + + Get transformation schedules by transformation external ids: + + >>> res = client.transformations.schedules.retrieve_multiple(external_ids=["t1", "t2"]) + """ + return run_sync( + self.__async_client.transformations.schedules.retrieve_multiple( + ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list(self, include_public: bool = True, limit: int | None = DEFAULT_LIMIT_READ) -> TransformationScheduleList: + """ + `List all transformation schedules. `_ + + Args: + include_public (bool): Whether public transformations should be included in the results. (default true). + limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + + Returns: + TransformationScheduleList: List of schedules + + Example: + + List schedules:: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> schedules_list = client.transformations.schedules.list() + """ + return run_sync(self.__async_client.transformations.schedules.list(include_public=include_public, limit=limit)) + + def delete( + self, + id: int | Sequence[int] | None = None, + external_id: str | SequenceNotStr[str] | None = None, + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Unschedule one or more transformations `_ + + Args: + id (int | Sequence[int] | None): Id or list of ids + external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + + Examples: + + Delete schedules by id or external id: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.transformations.schedules.delete(id=[1,2,3], external_id="3") + """ + return run_sync( + self.__async_client.transformations.schedules.delete( + id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def update( + self, + item: TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate, + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TransformationSchedule: ... + + @overload + def update( + self, + item: Sequence[TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TransformationScheduleList: ... + + def update( + self, + item: TransformationSchedule + | TransformationScheduleWrite + | TransformationScheduleUpdate + | Sequence[TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate], + mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null", + ) -> TransformationSchedule | TransformationScheduleList: + """ + `Update one or more transformation schedules `_ + + Args: + item (TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate | Sequence[TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate]): Transformation schedule(s) to update + mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TransformationSchedule or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + + Returns: + TransformationSchedule | TransformationScheduleList: Updated transformation schedule(s) + + Examples: + + Update a transformation schedule that you have fetched. This will perform a full update of the schedule: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> transformation_schedule = client.transformations.schedules.retrieve(id=1) + >>> transformation_schedule.is_paused = True + >>> res = client.transformations.schedules.update(transformation_schedule) + + Perform a partial update on a transformation schedule, updating the interval and unpausing it: + + >>> from cognite.client.data_classes import TransformationScheduleUpdate + >>> my_update = TransformationScheduleUpdate(id=1).interval.set("0 * * * *").is_paused.set(False) + >>> res = client.transformations.schedules.update(my_update) + """ + return run_sync(self.__async_client.transformations.schedules.update(item=item, mode=mode)) diff --git a/cognite/client/_sync_api/transformations/schema.py b/cognite/client/_sync_api/transformations/schema.py new file mode 100644 index 0000000000..5efa3fddb6 --- /dev/null +++ b/cognite/client/_sync_api/transformations/schema.py @@ -0,0 +1,49 @@ +""" +=============================================================================== +63827e363f4f15859ad49b1b4bae34e0 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes import ( + TransformationDestination, + TransformationSchemaColumnList, +) +from cognite.client.utils._async_helpers import run_sync + + +class SyncTransformationSchemaAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def retrieve( + self, destination: TransformationDestination, conflict_mode: str | None = None + ) -> TransformationSchemaColumnList: + """ + `Get expected schema for a transformation destination. `_ + + Args: + destination (TransformationDestination): destination for which the schema is requested. + conflict_mode (str | None): conflict mode for which the schema is requested. + + Returns: + TransformationSchemaColumnList: List of column descriptions + + Example: + + Get the schema for a transformation producing assets:: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import TransformationDestination + >>> client = CogniteClient() + >>> columns = client.transformations.schema.retrieve(destination = TransformationDestination.assets()) + """ + return run_sync( + self.__async_client.transformations.schema.retrieve(destination=destination, conflict_mode=conflict_mode) + ) diff --git a/cognite/client/_sync_api/unit_system.py b/cognite/client/_sync_api/unit_system.py new file mode 100644 index 0000000000..f6397335af --- /dev/null +++ b/cognite/client/_sync_api/unit_system.py @@ -0,0 +1,38 @@ +""" +=============================================================================== +ebf94ce24437dd81bab208403099ec24 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.units import UnitSystemList +from cognite.client.utils._async_helpers import run_sync + + +class SyncUnitSystemAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def list(self) -> UnitSystemList: + """ + `List all supported unit systems `_ + + Returns: + UnitSystemList: List of unit systems + + Examples: + + List all supported unit systems in CDF: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.units.systems.list() + """ + return run_sync(self.__async_client.units.systems.list()) diff --git a/cognite/client/_sync_api/units.py b/cognite/client/_sync_api/units.py new file mode 100644 index 0000000000..df16b3dee5 --- /dev/null +++ b/cognite/client/_sync_api/units.py @@ -0,0 +1,157 @@ +""" +=============================================================================== +341c216ca7805041eb8b81051219edd4 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.unit_system import SyncUnitSystemAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.units import Unit, UnitList +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncUnitAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.systems = SyncUnitSystemAPI(async_client) + + @overload + def retrieve(self, external_id: str, ignore_unknown_ids: bool = False) -> None | Unit: ... + + @overload + def retrieve(self, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> UnitList: ... + + def retrieve( + self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Unit | UnitList | None: + """ + `Retrieve one or more unit `_ + + Args: + external_id (str | SequenceNotStr[str]): External ID or list of external IDs + ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + + Returns: + Unit | UnitList | None: If a single external ID is specified: the requested unit, or None if it does not exist. If several external IDs are specified: the requested units. + + Examples: + + Retrive unit 'temperature:deg_c': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.units.retrieve('temperature:deg_c') + + Retrive units 'temperature:deg_c' and 'pressure:bar': + + >>> res = client.units.retrieve(['temperature:deg_c', 'pressure:bar']) + """ + return run_sync( + self.__async_client.units.retrieve(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + @overload + def from_alias( + self, + alias: str, + quantity: str | None = None, + *, + return_ambiguous: Literal[False] = False, + return_closest_matches: Literal[False] = False, + ) -> Unit: ... + + @overload + def from_alias( + self, + alias: str, + quantity: str | None = None, + *, + return_ambiguous: bool = False, + return_closest_matches: bool = False, + ) -> UnitList: ... + + def from_alias( + self, + alias: str, + quantity: str | None = None, + *, + return_ambiguous: bool = False, + return_closest_matches: bool = False, + ) -> Unit | UnitList: + """ + Look up a unit by alias, optionally for a given quantity. Aliases and quantities are case-sensitive. + + Note: + When just ``alias`` is given (i.e. ``quantity`` is not specified), some aliases are ambiguous as they are used + by several quantities, e.g. 'F' which can be both Farad (Capacitance) and Fahrenheit (Temperature). These raise + a ValueError by default unless also ``return_ambiguous=True`` is passed, in which case all matching units are returned. + + Tip: + You can use ``return_closest_matches=True`` to get the closest matching units if the lookup fails. Note that there + may not be any close matches, in which case an empty UnitList is returned. + + Args: + alias (str): Alias of the unit, like 'cmol / L' or 'meter per second'. + quantity (str | None): Quantity of the unit, like 'Temperature' or 'Pressure'. + return_ambiguous (bool): If False (default), when the alias is ambiguous (i.e. no quantity was given), raise a ValueError. If True, return the list of all matching units. + return_closest_matches (bool): If False (default), when the lookup fails, raise a ValueError (default). If True, return the closest matching units (even if empty). + + Returns: + Unit | UnitList: The unit if found, else a ValueError is raised. If one or both of ``return_ambiguous`` and ``return_closest_matches`` is passed as True, a UnitList may be returned. + + Examples: + + Look up a unit by alias only: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> unit = client.units.from_alias('cmol / L') + + Look up ambiguous alias 'F' by passing quantity 'Temperature': + + >>> unit = client.units.from_alias('F', 'Temperature') + + Search for the closest matching unit of 'kilo watt' (should be 'kilowatt'): + + >>> unit_matches = client.units.from_alias("kilo watt", return_closest_matches=True) + """ + return run_sync( + self.__async_client.units.from_alias( + alias=alias, + quantity=quantity, + return_ambiguous=return_ambiguous, + return_closest_matches=return_closest_matches, + ) + ) + + def list(self) -> UnitList: + """ + `List all supported units `_ + + Returns: + UnitList: List of units + + Examples: + + List all supported units in CDF: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.units.list() + """ + return run_sync(self.__async_client.units.list()) diff --git a/cognite/client/_sync_api/user_profiles.py b/cognite/client/_sync_api/user_profiles.py new file mode 100644 index 0000000000..0df0597d7b --- /dev/null +++ b/cognite/client/_sync_api/user_profiles.py @@ -0,0 +1,141 @@ +""" +=============================================================================== +5c43e7540fde166f93e8444a861270ae +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.user_profiles import UserProfile, UserProfileList, UserProfilesConfiguration +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + + +class SyncUserProfilesAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def enable(self) -> UserProfilesConfiguration: + """ + Enable user profiles for the project + """ + return run_sync(self.__async_client.iam.user_profiles.enable()) + + def disable(self) -> UserProfilesConfiguration: + """ + Disable user profiles for the project + """ + return run_sync(self.__async_client.iam.user_profiles.disable()) + + def me(self) -> UserProfile: + """ + `Retrieve your own user profile `_ + + Retrieves the user profile of the principal issuing the request, i.e. the principal *this* AsyncCogniteClient was instantiated with. + + Returns: + UserProfile: Your own user profile. + + Raises: + CogniteAPIError: If this principal doesn't have a user profile, you get a not found (404) response code. + + Examples: + + Get your own user profile: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.user_profiles.me() + """ + return run_sync(self.__async_client.iam.user_profiles.me()) + + @overload + def retrieve(self, user_identifier: str) -> UserProfile | None: ... + + @overload + def retrieve(self, user_identifier: SequenceNotStr[str]) -> UserProfileList: ... + + def retrieve(self, user_identifier: str | SequenceNotStr[str]) -> UserProfile | UserProfileList | None: + """ + `Retrieve user profiles by user identifier. `_ + + Retrieves one or more user profiles indexed by the user identifier in the same CDF project. + + Args: + user_identifier (str | SequenceNotStr[str]): The single user identifier (or sequence of) to retrieve profile(s) for. + + Returns: + UserProfile | UserProfileList | None: UserProfileList if a sequence of user identifier were requested, else UserProfile. If a single user identifier is requested and it is not found, None is returned. + + Raises: + CogniteNotFoundError: A sequences of user identifiers were requested, but one or more does not exist. + + Examples: + + Get a single user profile: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.user_profiles.retrieve("foo") + + Get multiple user profiles: + + >>> res = client.iam.user_profiles.retrieve(["bar", "baz"]) + """ + return run_sync(self.__async_client.iam.user_profiles.retrieve(user_identifier=user_identifier)) + + def search(self, name: str, limit: int = DEFAULT_LIMIT_READ) -> UserProfileList: + """ + `Search for user profiles `_ + Primarily meant for human-centric use-cases and data exploration, not for programs, as the result set ordering and match criteria threshold may change over time. + + Args: + name (str): Prefix search on name. + limit (int): Maximum number of results to return. + + Returns: + UserProfileList: User profiles search result + + Examples: + + Search for users with first (or second...) name starting with "Alex": + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.user_profiles.search(name="Alex") + """ + return run_sync(self.__async_client.iam.user_profiles.search(name=name, limit=limit)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> UserProfileList: + """ + `List user profiles `_ + + List all user profiles in the current CDF project. The results are ordered alphabetically by name. + + Args: + limit (int | None): Maximum number of user profiles to return. Defaults to 25. Set to -1, float("inf") or None to return all. + + Returns: + UserProfileList: List of user profiles. + + Examples: + + List all user profiles: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.iam.user_profiles.list(limit=None) + """ + return run_sync(self.__async_client.iam.user_profiles.list(limit=limit)) diff --git a/cognite/client/_sync_api/vision.py b/cognite/client/_sync_api/vision.py new file mode 100644 index 0000000000..3296e7e9a3 --- /dev/null +++ b/cognite/client/_sync_api/vision.py @@ -0,0 +1,82 @@ +""" +=============================================================================== +40535cb165ab439531b0e145ec36ee1f +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.contextualization import FeatureParameters, VisionExtractJob, VisionFeature +from cognite.client.utils._async_helpers import run_sync + + +class SyncVisionAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def extract( + self, + features: VisionFeature | list[VisionFeature], + file_ids: list[int] | None = None, + file_external_ids: list[str] | None = None, + parameters: FeatureParameters | None = None, + ) -> VisionExtractJob: + """ + `Start an asynchronous job to extract features from image files. `_ + + Args: + features (VisionFeature | list[VisionFeature]): The feature(s) to extract from the provided image files. + file_ids (list[int] | None): IDs of the image files to analyze. The images must already be uploaded in the same CDF project. + file_external_ids (list[str] | None): The external file ids of the image files to analyze. + parameters (FeatureParameters | None): No description. + Returns: + VisionExtractJob: Resulting queued job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + + Examples: + Start a job, wait for completion and then get the parsed results: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.contextualization import VisionFeature + >>> client = CogniteClient() + >>> extract_job = client.vision.extract(features=VisionFeature.ASSET_TAG_DETECTION, file_ids=[1]) + >>> extract_job.wait_for_completion() + >>> for item in extract_job.items: + ... predictions = item.predictions + ... # do something with the predictions + >>> # Save predictions in CDF using Annotations API: + >>> extract_job.save_predictions() + """ + return run_sync( + self.__async_client.vision.extract( + features=features, file_ids=file_ids, file_external_ids=file_external_ids, parameters=parameters + ) + ) + + def get_extract_job(self, job_id: int) -> VisionExtractJob: + """ + `Retrieve an existing extract job by ID. `_ + + Args: + job_id (int): ID of an existing feature extraction job. + + Returns: + VisionExtractJob: Vision extract job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + + Examples: + Retrieve a vision extract job by ID: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> extract_job = client.vision.get_extract_job(job_id=1) + >>> extract_job.wait_for_completion() + >>> for item in extract_job.items: + ... predictions = item.predictions + ... # do something with the predictions + """ + return run_sync(self.__async_client.vision.get_extract_job(job_id=job_id)) diff --git a/cognite/client/_sync_api/workflows/__init__.py b/cognite/client/_sync_api/workflows/__init__.py new file mode 100644 index 0000000000..7a27401c67 --- /dev/null +++ b/cognite/client/_sync_api/workflows/__init__.py @@ -0,0 +1,171 @@ +""" +=============================================================================== +8de1a06d164e2a472aa3c6911e41a991 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api.workflows.executions import SyncWorkflowExecutionAPI +from cognite.client._sync_api.workflows.tasks import SyncWorkflowTaskAPI +from cognite.client._sync_api.workflows.triggers import SyncWorkflowTriggerAPI +from cognite.client._sync_api.workflows.versions import SyncWorkflowVersionAPI +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.workflows import Workflow, WorkflowList, WorkflowUpsert +from cognite.client.utils._async_helpers import SyncIterator, run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncWorkflowAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + self.versions = SyncWorkflowVersionAPI(async_client) + self.executions = SyncWorkflowExecutionAPI(async_client) + self.tasks = SyncWorkflowTaskAPI(async_client) + self.triggers = SyncWorkflowTriggerAPI(async_client) + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[Workflow]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[WorkflowList]: ... + + def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Workflow | WorkflowList]: + """ + Iterate over workflows + + Args: + chunk_size (int | None): The number of workflows to return in each chunk. Defaults to yielding one workflow at a time. + limit (int | None): Maximum number of workflows to return. Defaults to returning all items. + + Yields: + Workflow | WorkflowList: Yields Workflow one by one if chunk_size is None, otherwise yields WorkflowList objects. + """ + yield from SyncIterator(self.__async_client.workflows(chunk_size=chunk_size, limit=limit)) + + @overload + def upsert(self, workflow: WorkflowUpsert, mode: Literal["replace"] = "replace") -> Workflow: ... + + @overload + def upsert(self, workflow: Sequence[WorkflowUpsert], mode: Literal["replace"] = "replace") -> WorkflowList: ... + + def upsert( + self, workflow: WorkflowUpsert | Sequence[WorkflowUpsert], mode: Literal["replace"] = "replace" + ) -> Workflow | WorkflowList: + """ + `Create one or more workflow(s). `_ + + Note this is an upsert endpoint, so workflows that already exist will be updated, and new ones will be created. + + Args: + workflow (WorkflowUpsert | Sequence[WorkflowUpsert]): The workflow(s) to upsert. + mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + + Returns: + Workflow | WorkflowList: The created workflow(s). + + Examples: + + Create one workflow with external id "my_workflow": + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import WorkflowUpsert + >>> client = CogniteClient() + >>> wf = WorkflowUpsert(external_id="my_workflow", description="my workflow description") + >>> res = client.workflows.upsert(wf) + + Create multiple workflows: + + >>> wf2 = WorkflowUpsert(external_id="other", data_set_id=123) + >>> res = client.workflows.upsert([wf, wf2]) + """ + return run_sync(self.__async_client.workflows.upsert(workflow=workflow, mode=mode)) + + @overload + def retrieve(self, external_id: str, ignore_unknown_ids: bool = False) -> Workflow | None: ... + + @overload + def retrieve(self, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> WorkflowList: ... + + def retrieve( + self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False + ) -> Workflow | WorkflowList | None: + """ + `Retrieve one or more workflows. `_ + + Args: + external_id (str | SequenceNotStr[str]): Identifier (or sequence of identifiers) for a Workflow. Must be unique. + ignore_unknown_ids (bool): When requesting multiple workflows, whether to ignore external IDs that are not found rather than throwing an exception. + + Returns: + Workflow | WorkflowList | None: If a single external ID is specified: the requested workflow, or None if it does not exist. If several external IDs are specified: the requested workflows. + + Examples: + + Retrieve workflow with external ID "my_workflow": + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> workflow = client.workflows.retrieve("my_workflow") + + Retrieve multiple workflows: + + >>> workflow_list = client.workflows.retrieve(["foo", "bar"]) + """ + return run_sync( + self.__async_client.workflows.retrieve(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None: + """ + `Delete one or more workflows with versions. `_ + + Args: + external_id (str | SequenceNotStr[str]): External id or list of external ids to delete. + ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + + Examples: + + Delete workflow with external_id "my_workflow": + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.workflows.delete("my_workflow") + """ + return run_sync( + self.__async_client.workflows.delete(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids) + ) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowList: + """ + `List workflows in the project. `_ + + Args: + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + + Returns: + WorkflowList: Workflows in the CDF project. + + Examples: + + List all workflows: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.list(limit=None) + """ + return run_sync(self.__async_client.workflows.list(limit=limit)) diff --git a/cognite/client/_sync_api/workflows/executions.py b/cognite/client/_sync_api/workflows/executions.py new file mode 100644 index 0000000000..d99df6d915 --- /dev/null +++ b/cognite/client/_sync_api/workflows/executions.py @@ -0,0 +1,225 @@ +""" +=============================================================================== +60c7e539a4068a90816b0ff964f3d4ab +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import MutableSequence +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._api.workflows import WorkflowVersionIdentifier +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.workflows import ( + WorkflowExecution, + WorkflowExecutionDetailed, + WorkflowExecutionList, + WorkflowStatus, +) +from cognite.client.utils._async_helpers import run_sync + +if TYPE_CHECKING: + from cognite.client._api.workflows import WorkflowVersionIdentifier +from cognite.client.data_classes import ClientCredentials + + +class SyncWorkflowExecutionAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def retrieve_detailed(self, id: str) -> WorkflowExecutionDetailed | None: + """ + `Retrieve a workflow execution with detailed information. `_ + + Args: + id (str): The server-generated id of the workflow execution. + + Returns: + WorkflowExecutionDetailed | None: The requested workflow execution if it exists, None otherwise. + + Examples: + + Retrieve workflow execution with id '000560bc-9080-4286-b242-a27bb4819253': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.executions.retrieve_detailed("000560bc-9080-4286-b242-a27bb4819253") + + List workflow executions and retrieve detailed information for the first one: + + >>> res = client.workflows.executions.list() + >>> res = client.workflows.executions.retrieve_detailed(res[0].id) + """ + return run_sync(self.__async_client.workflows.executions.retrieve_detailed(id=id)) + + def run( + self, + workflow_external_id: str, + version: str, + input: dict | None = None, + metadata: dict | None = None, + client_credentials: ClientCredentials | None = None, + nonce: str | None = None, + ) -> WorkflowExecution: + """ + `Run a workflow execution. `_ + + Args: + workflow_external_id (str): External id of the workflow. + version (str): Version of the workflow. + input (dict | None): The input to the workflow execution. This will be available for tasks that have specified it as an input with the string "${workflow.input}" See tip below for more information. + metadata (dict | None): Application specific metadata. Keys have a maximum length of 32 characters, values a maximum of 255, and there can be a maximum of 10 key-value pairs. + client_credentials (ClientCredentials | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + nonce (str | None): The nonce to use to bind the session. If not provided, a new session will be created using the given 'client_credentials'. If this is not given, the current credentials will be used. + + Tip: + The workflow input can be available in the workflow tasks. For example, if you have a Task with + function parameters then you can specify it as follows + + >>> from cognite.client.data_classes import WorkflowTask, FunctionTaskParameters + >>> task = WorkflowTask( + ... external_id="my_workflow-task1", + ... parameters=FunctionTaskParameters( + ... external_id="cdf_deployed_function:my_function", + ... data={"workflow_data": "${workflow.input}"})) + + Tip: + You can create a session via the Sessions API, using the client.iam.session.create() method. + + Returns: + WorkflowExecution: The created workflow execution. + + Examples: + + Trigger a workflow execution for the workflow "foo", version 1: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.executions.run("foo", "1") + + Trigger a workflow execution with input data: + + >>> res = client.workflows.executions.run("foo", "1", input={"a": 1, "b": 2}) + + Trigger a workflow execution using a specific set of client credentials (i.e. not your current credentials): + + >>> import os + >>> from cognite.client.data_classes import ClientCredentials + >>> credentials = ClientCredentials("my-client-id", os.environ["MY_CLIENT_SECRET"]) + >>> res = client.workflows.executions.run("foo", "1", client_credentials=credentials) + """ + return run_sync( + self.__async_client.workflows.executions.run( + workflow_external_id=workflow_external_id, + version=version, + input=input, + metadata=metadata, + client_credentials=client_credentials, + nonce=nonce, + ) + ) + + def list( + self, + workflow_version_ids: WorkflowVersionIdentifier | MutableSequence[WorkflowVersionIdentifier] | None = None, + created_time_start: int | None = None, + created_time_end: int | None = None, + statuses: WorkflowStatus | MutableSequence[WorkflowStatus] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> WorkflowExecutionList: + """ + `List workflow executions in the project. `_ + + Args: + workflow_version_ids (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionIdentifier] | None): Workflow version id or list of workflow version ids to filter on. + created_time_start (int | None): Filter out executions that was created before this time. Time is in milliseconds since epoch. + created_time_end (int | None): Filter out executions that was created after this time. Time is in milliseconds since epoch. + statuses (WorkflowStatus | MutableSequence[WorkflowStatus] | None): Workflow status or list of workflow statuses to filter on. + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + WorkflowExecutionList: The requested workflow executions. + + Examples: + + Get all workflow executions for workflows 'my_workflow' version '1': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.executions.list(("my_workflow", "1")) + + Get all workflow executions from the last 24 hours: + + >>> from cognite.client.utils import timestamp_to_ms + >>> res = client.workflows.executions.list( + ... created_time_start=timestamp_to_ms("1d-ago")) + """ + return run_sync( + self.__async_client.workflows.executions.list( + workflow_version_ids=workflow_version_ids, + created_time_start=created_time_start, + created_time_end=created_time_end, + statuses=statuses, + limit=limit, + ) + ) + + def cancel(self, id: str, reason: str | None) -> WorkflowExecution: + """ + `Cancel a workflow execution. `_ + + Note: + Cancelling a workflow will immediately cancel the `in_progress` tasks, but not their spawned work in + other services (like transformations and functions). + + Args: + id (str): The server-generated id of the workflow execution. + reason (str | None): The reason for the cancellation, this will be put within the execution's `reasonForIncompletion` field. It is defaulted to 'cancelled' if not provided. + + + Returns: + WorkflowExecution: The canceled workflow execution. + + Examples: + + Trigger a workflow execution for the workflow "foo", version 1 and cancel it: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.executions.run("foo", "1") + >>> client.workflows.executions.cancel(id="foo", reason="test cancellation") + """ + return run_sync(self.__async_client.workflows.executions.cancel(id=id, reason=reason)) + + def retry(self, id: str, client_credentials: ClientCredentials | None = None) -> WorkflowExecution: + """ + `Retry a workflow execution. `_ + + Args: + id (str): The server-generated id of the workflow execution. + client_credentials (ClientCredentials | None): Specific credentials that should be used to retry the workflow execution. When passed will take precedence over the current credentials. + + Returns: + WorkflowExecution: The retried workflow execution. + + Examples: + Retry a workflow execution that has been cancelled or failed: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.executions.run("foo", "1") + >>> client.workflows.executions.cancel(id=res.id, reason="test cancellation") + >>> client.workflows.executions.retry(res.id) + """ + return run_sync(self.__async_client.workflows.executions.retry(id=id, client_credentials=client_credentials)) diff --git a/cognite/client/_sync_api/workflows/tasks.py b/cognite/client/_sync_api/workflows/tasks.py new file mode 100644 index 0000000000..800ec2284c --- /dev/null +++ b/cognite/client/_sync_api/workflows/tasks.py @@ -0,0 +1,62 @@ +""" +=============================================================================== +7a6de6f53b84295cc837107d88421ba5 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import Literal + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.workflows import WorkflowTaskExecution +from cognite.client.utils._async_helpers import run_sync + + +class SyncWorkflowTaskAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def update( + self, + task_id: str, + status: Literal["completed", "failed", "failed_with_terminal_error"], + output: dict | None = None, + ) -> WorkflowTaskExecution: + """ + `Update status of async task. `_ + + For tasks that has been marked with 'is_async = True', the status must be updated by calling this endpoint with either 'completed', 'failed' or 'failed_with_terminal_error'. + + Args: + task_id (str): The server-generated id of the task. + status (Literal['completed', 'failed', 'failed_with_terminal_error']): The new status of the task. Must be either 'completed', 'failed' or 'failed_with_terminal_error'. + output (dict | None): The output of the task. This will be available for tasks that has specified it as an output with the string "${.output}" + + Returns: + WorkflowTaskExecution: The updated task execution. + + Examples: + + Update task with id '000560bc-9080-4286-b242-a27bb4819253' to status 'completed': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.tasks.update("000560bc-9080-4286-b242-a27bb4819253", "completed") + + Update task with id '000560bc-9080-4286-b242-a27bb4819253' to status 'failed' with output '{"a": 1, "b": 2}': + + >>> res = client.workflows.tasks.update("000560bc-9080-4286-b242-a27bb4819253", "failed", output={"a": 1, "b": 2}) + + Trigger workflow, retrieve detailed task execution and update status of the second task (assumed to be async) to 'completed': + + >>> res = client.workflows.executions.run("my workflow", "1") + >>> res = client.workflows.executions.retrieve_detailed(res.id) + >>> res = client.workflows.tasks.update(res.tasks[1].id, "completed") + """ + return run_sync(self.__async_client.workflows.tasks.update(task_id=task_id, status=status, output=output)) diff --git a/cognite/client/_sync_api/workflows/triggers.py b/cognite/client/_sync_api/workflows/triggers.py new file mode 100644 index 0000000000..d09316dc0b --- /dev/null +++ b/cognite/client/_sync_api/workflows/triggers.py @@ -0,0 +1,159 @@ +""" +=============================================================================== +f35a15982cf08e1dfcccc3a7bdcd4ac5 +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.workflows import ( + WorkflowTrigger, + WorkflowTriggerList, + WorkflowTriggerRunList, + WorkflowTriggerUpsert, +) +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils.useful_types import SequenceNotStr + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient +from cognite.client.data_classes import ClientCredentials + + +class SyncWorkflowTriggerAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + def upsert( + self, workflow_trigger: WorkflowTriggerUpsert, client_credentials: ClientCredentials | dict | None = None + ) -> WorkflowTrigger: + """ + `Create or update a trigger for a workflow. `_ + + Args: + workflow_trigger (WorkflowTriggerUpsert): The workflow trigger specification. + client_credentials (ClientCredentials | dict | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + + Returns: + WorkflowTrigger: The created or updated workflow trigger specification. + + Examples: + + Create or update a scheduled trigger for a workflow: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.workflows import WorkflowTriggerUpsert, WorkflowScheduledTriggerRule + >>> from zoneinfo import ZoneInfo + >>> client = CogniteClient() + >>> client.workflows.triggers.upsert( + ... WorkflowTriggerUpsert( + ... external_id="my_trigger", + ... trigger_rule=WorkflowScheduledTriggerRule(cron_expression="0 0 * * *", timezone=ZoneInfo("UTC")), + ... workflow_external_id="my_workflow", + ... workflow_version="1", + ... input={"a": 1, "b": 2}, + ... metadata={"key": "value"}, + ... ) + ... ) + + Create or update a data modeling trigger for a workflow: + + >>> from cognite.client.data_classes.workflows import WorkflowDataModelingTriggerRule, WorkflowTriggerDataModelingQuery + >>> from cognite.client.data_classes.data_modeling.query import NodeResultSetExpression, Select, SourceSelector + >>> from cognite.client.data_classes.data_modeling import ViewId + >>> from cognite.client.data_classes.filters import Equals + >>> view_id = ViewId("my_space_id", "view_external_id", "v1") + >>> client.workflows.triggers.upsert( + ... WorkflowTriggerUpsert( + ... external_id="my_trigger", + ... trigger_rule=WorkflowDataModelingTriggerRule( + ... data_modeling_query=WorkflowTriggerDataModelingQuery( + ... with_={"timeseries": NodeResultSetExpression(filter=Equals(view_id.as_property_ref("name"), value="my_name"))}, + ... select={"timeseries": Select([SourceSelector(view_id, ["name"])])}, + ... ), + ... batch_size=500, + ... batch_timeout=300, + ... ), + ... workflow_external_id="my_workflow", + ... workflow_version="1", + ... ) + ... ) + """ + return run_sync( + self.__async_client.workflows.triggers.upsert( + workflow_trigger=workflow_trigger, client_credentials=client_credentials + ) + ) + + def delete(self, external_id: str | SequenceNotStr[str]) -> None: + """ + `Delete one or more triggers for a workflow. `_ + + Args: + external_id (str | SequenceNotStr[str]): The external id(s) of the trigger(s) to delete. + + Examples: + + Delete a trigger with external id 'my_trigger': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.workflows.triggers.delete("my_trigger") + + Delete a list of triggers: + + >>> client.workflows.triggers.delete(["my_trigger", "another_trigger"]) + """ + return run_sync(self.__async_client.workflows.triggers.delete(external_id=external_id)) + + def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowTriggerList: + """ + `List the workflow triggers. `_ + + Args: + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + WorkflowTriggerList: The list of triggers. + + Examples: + + List all triggers: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.triggers.list(limit=None) + """ + return run_sync(self.__async_client.workflows.triggers.list(limit=limit)) + + def list_runs(self, external_id: str, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowTriggerRunList: + """ + `List the history of runs for a trigger. `_ + + Args: + external_id (str): The external id of the trigger to list runs for. + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + + Returns: + WorkflowTriggerRunList: The requested trigger runs. + + Examples: + + Get all runs for a trigger with external id 'my_trigger': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.triggers.list_runs("my_trigger", limit=None) + """ + return run_sync(self.__async_client.workflows.triggers.list_runs(external_id=external_id, limit=limit)) diff --git a/cognite/client/_sync_api/workflows/versions.py b/cognite/client/_sync_api/workflows/versions.py new file mode 100644 index 0000000000..cf475e15ca --- /dev/null +++ b/cognite/client/_sync_api/workflows/versions.py @@ -0,0 +1,234 @@ +""" +=============================================================================== +fc7deade2d4c65533947f757d573caca +This file is auto-generated from the Async API modules, - do not edit manually! +=============================================================================== +""" + +from __future__ import annotations + +from collections.abc import Iterator, MutableSequence, Sequence +from typing import TYPE_CHECKING, Literal, overload + +from cognite.client import AsyncCogniteClient +from cognite.client._api.workflows import WorkflowIdentifier, WorkflowVersionIdentifier +from cognite.client._constants import DEFAULT_LIMIT_READ +from cognite.client._sync_api_client import SyncAPIClient +from cognite.client.data_classes.workflows import ( + WorkflowIds, + WorkflowVersion, + WorkflowVersionId, + WorkflowVersionList, + WorkflowVersionUpsert, +) +from cognite.client.utils._async_helpers import SyncIterator, run_sync + +if TYPE_CHECKING: + from cognite.client import AsyncCogniteClient + + +class SyncWorkflowVersionAPI(SyncAPIClient): + """Auto-generated, do not modify manually.""" + + def __init__(self, async_client: AsyncCogniteClient): + self.__async_client = async_client + + @overload + def __call__(self, chunk_size: None = None) -> Iterator[WorkflowVersion]: ... + + @overload + def __call__(self, chunk_size: int) -> Iterator[WorkflowVersionList]: ... + + def __call__( + self, + chunk_size: int | None = None, + workflow_version_ids: WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None = None, + limit: int | None = None, + ) -> Iterator[WorkflowVersion | WorkflowVersionList]: + """ + Iterate over workflow versions + + Args: + chunk_size (int | None): The number of workflow versions to return in each chunk. Defaults to yielding one workflow version at a time. + workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. + limit (int | None): Maximum number of workflow versions to return. Defaults to returning all. + + Yields: + WorkflowVersion | WorkflowVersionList: Yields WorkflowVersion one by one if chunk_size is None, otherwise yields WorkflowVersionList objects. + """ + yield from SyncIterator( + self.__async_client.workflows.versions( + chunk_size=chunk_size, workflow_version_ids=workflow_version_ids, limit=limit + ) + ) + + @overload + def upsert(self, version: WorkflowVersionUpsert) -> WorkflowVersion: ... + + @overload + def upsert(self, version: Sequence[WorkflowVersionUpsert]) -> WorkflowVersionList: ... + + def upsert( + self, version: WorkflowVersionUpsert | Sequence[WorkflowVersionUpsert], mode: Literal["replace"] = "replace" + ) -> WorkflowVersion | WorkflowVersionList: + """ + `Create one or more workflow version(s). `_ + + Note this is an upsert endpoint, so workflow versions that already exist will be updated, and new ones will be created. + + Args: + version (WorkflowVersionUpsert | Sequence[WorkflowVersionUpsert]): The workflow version(s) to upsert. + mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + + Returns: + WorkflowVersion | WorkflowVersionList: The created workflow version(s). + + Examples: + + Create one workflow version with a single Function task: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import ( + ... WorkflowVersionUpsert, WorkflowDefinitionUpsert, + ... WorkflowTask, FunctionTaskParameters, + ... ) + >>> client = CogniteClient() + >>> function_task = WorkflowTask( + ... external_id="my_workflow-task1", + ... parameters=FunctionTaskParameters( + ... external_id="my_fn_xid", + ... data={"a": 1, "b": 2}, + ... ), + ... ) + >>> new_version = WorkflowVersionUpsert( + ... workflow_external_id="my_workflow", + ... version="1", + ... workflow_definition=WorkflowDefinitionUpsert( + ... tasks=[function_task], + ... description="This workflow has one step", + ... ), + ... ) + >>> res = client.workflows.versions.upsert(new_version) + """ + return run_sync(self.__async_client.workflows.versions.upsert(version=version, mode=mode)) + + def delete( + self, + workflow_version_id: WorkflowVersionIdentifier + | MutableSequence[WorkflowVersionId] + | MutableSequence[tuple[str, str]], + ignore_unknown_ids: bool = False, + ) -> None: + """ + `Delete a workflow version(s). `_ + + Args: + workflow_version_id (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionId] | MutableSequence[tuple[str, str]]): Workflow version id or list of workflow version ids to delete. + ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + + Examples: + + Delete workflow version "1" of workflow "my workflow" specified by using a tuple: + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> client.workflows.versions.delete(("my workflow", "1")) + + Delete workflow version "1" of workflow "my workflow" and workflow version "2" of workflow "my workflow 2" using the WorkflowVersionId class: + + >>> from cognite.client.data_classes import WorkflowVersionId + >>> client.workflows.versions.delete([WorkflowVersionId("my workflow", "1"), WorkflowVersionId("my workflow 2", "2")]) + """ + return run_sync( + self.__async_client.workflows.versions.delete( + workflow_version_id=workflow_version_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + @overload + def retrieve(self, workflow_external_id: WorkflowVersionIdentifier) -> WorkflowVersion | None: ... + + @overload + def retrieve( + self, workflow_external_id: Sequence[WorkflowVersionIdentifier] | WorkflowIds + ) -> WorkflowVersionList: ... + + def retrieve( + self, + workflow_external_id: WorkflowVersionIdentifier | Sequence[WorkflowVersionIdentifier] | WorkflowIds, + *, + ignore_unknown_ids: bool = False, + ) -> WorkflowVersion | WorkflowVersionList | None: + """ + `Retrieve a workflow version. `_ + + Args: + workflow_external_id (WorkflowVersionIdentifier | Sequence[WorkflowVersionIdentifier] | WorkflowIds): External id of the workflow. + ignore_unknown_ids (bool): When requesting multiple, whether to ignore external IDs that are not found rather than throwing an exception. + + Returns: + WorkflowVersion | WorkflowVersionList | None: If a single identifier is specified: the requested workflow version, or None if it does not exist. If several ids are specified: the requested workflow versions. + + Examples: + + Retrieve workflow version 'v1' of workflow "my_workflow": + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes import WorkflowVersionId + >>> client = CogniteClient() + >>> res = client.workflows.versions.retrieve(WorkflowVersionId("my_workflow", "v1")) + + Retrieve multiple workflow versions and ignore unknown: + + >>> res = client.workflows.versions.retrieve( + ... [WorkflowVersionId("my_workflow", "v1"), WorkflowVersionId("other", "v3.2")], + ... ignore_unknown_ids=True, + ... ) + >>> # A sequence of tuples is also accepted: + >>> res = client.workflows.versions.retrieve([("my_workflow", "v1"), ("other", "v3.2")]) + """ + return run_sync( + self.__async_client.workflows.versions.retrieve( + workflow_external_id=workflow_external_id, ignore_unknown_ids=ignore_unknown_ids + ) + ) + + def list( + self, + workflow_version_ids: WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None = None, + limit: int | None = DEFAULT_LIMIT_READ, + ) -> WorkflowVersionList: + """ + `List workflow versions in the project `_ + + Args: + workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. + limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + + Returns: + WorkflowVersionList: The requested workflow versions. + + Examples: + + Get all workflow version for workflows 'my_workflow' and 'my_workflow_2': + + >>> from cognite.client import CogniteClient, AsyncCogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.workflows.versions.list(["my_workflow", "my_workflow_2"]) + + Get all workflow versions for workflows 'my_workflow' and 'my_workflow_2' using the WorkflowVersionId class: + + >>> from cognite.client.data_classes import WorkflowVersionId + >>> res = client.workflows.versions.list( + ... [WorkflowVersionId("my_workflow"), WorkflowVersionId("my_workflow_2")]) + + Get all workflow versions for workflows 'my_workflow' version '1' and 'my_workflow_2' version '2' using tuples: + + >>> res = client.workflows.versions.list( + ... [("my_workflow", "1"), ("my_workflow_2", "2")]) + """ + return run_sync( + self.__async_client.workflows.versions.list(workflow_version_ids=workflow_version_ids, limit=limit) + ) diff --git a/cognite/client/_sync_api_client.py b/cognite/client/_sync_api_client.py new file mode 100644 index 0000000000..e8e0e8700a --- /dev/null +++ b/cognite/client/_sync_api_client.py @@ -0,0 +1,7 @@ +from abc import ABC + + +class SyncAPIClient(ABC): + """Base class for all synchronous API clients. No real use besides easy isinstance checks in e.g. testing.""" + + pass diff --git a/cognite/client/_sync_cognite_client.py b/cognite/client/_sync_cognite_client.py new file mode 100644 index 0000000000..4436a9c4df --- /dev/null +++ b/cognite/client/_sync_cognite_client.py @@ -0,0 +1,86 @@ +""" +=================================================== +This file is auto-generated - do not edit manually! +=================================================== +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from cognite.client import AsyncCogniteClient +from cognite.client._sync_api.agents.agents import SyncAgentsAPI +from cognite.client._sync_api.ai import SyncAIAPI +from cognite.client._sync_api.annotations import SyncAnnotationsAPI +from cognite.client._sync_api.assets import SyncAssetsAPI +from cognite.client._sync_api.data_modeling import SyncDataModelingAPI +from cognite.client._sync_api.data_sets import SyncDataSetsAPI +from cognite.client._sync_api.diagrams import SyncDiagramsAPI +from cognite.client._sync_api.documents import SyncDocumentsAPI +from cognite.client._sync_api.entity_matching import SyncEntityMatchingAPI +from cognite.client._sync_api.events import SyncEventsAPI +from cognite.client._sync_api.extractionpipelines import SyncExtractionPipelinesAPI +from cognite.client._sync_api.files import SyncFilesAPI +from cognite.client._sync_api.functions import SyncFunctionsAPI +from cognite.client._sync_api.geospatial import SyncGeospatialAPI +from cognite.client._sync_api.hosted_extractors import SyncHostedExtractorsAPI +from cognite.client._sync_api.iam import SyncIAMAPI +from cognite.client._sync_api.labels import SyncLabelsAPI +from cognite.client._sync_api.postgres_gateway import SyncPostgresGatewaysAPI +from cognite.client._sync_api.raw import SyncRawAPI +from cognite.client._sync_api.relationships import SyncRelationshipsAPI +from cognite.client._sync_api.sequences import SyncSequencesAPI +from cognite.client._sync_api.simulators import SyncSimulatorsAPI +from cognite.client._sync_api.three_d import Sync3DAPI +from cognite.client._sync_api.time_series import SyncTimeSeriesAPI +from cognite.client._sync_api.transformations import SyncTransformationsAPI +from cognite.client._sync_api.units import SyncUnitAPI +from cognite.client._sync_api.vision import SyncVisionAPI +from cognite.client._sync_api.workflows import SyncWorkflowAPI + +if TYPE_CHECKING: + from cognite.client import ClientConfig + + +class CogniteClient: + """Main entrypoint into the Cognite Python SDK. + + All Cognite Data Fusion APIs are accessible through this synchronous client. + For the asynchronous client, see :class:`~cognite.client._cognite_client.AsyncCogniteClient`. + + Args: + config (ClientConfig | None): The configuration for this client. + """ + + def __init__(self, config: ClientConfig | None = None) -> None: + self.__async_client = async_client = AsyncCogniteClient(config) + + # Initialize all sync. APIs: + self.ai = SyncAIAPI(async_client) + self.agents = SyncAgentsAPI(async_client) + self.annotations = SyncAnnotationsAPI(async_client) + self.assets = SyncAssetsAPI(async_client) + self.data_modeling = SyncDataModelingAPI(async_client) + self.data_sets = SyncDataSetsAPI(async_client) + self.diagrams = SyncDiagramsAPI(async_client) + self.documents = SyncDocumentsAPI(async_client) + self.entity_matching = SyncEntityMatchingAPI(async_client) + self.events = SyncEventsAPI(async_client) + self.extraction_pipelines = SyncExtractionPipelinesAPI(async_client) + self.files = SyncFilesAPI(async_client) + self.functions = SyncFunctionsAPI(async_client) + self.geospatial = SyncGeospatialAPI(async_client) + self.hosted_extractors = SyncHostedExtractorsAPI(async_client) + self.iam = SyncIAMAPI(async_client) + self.labels = SyncLabelsAPI(async_client) + self.postgres_gateway = SyncPostgresGatewaysAPI(async_client) + self.raw = SyncRawAPI(async_client) + self.relationships = SyncRelationshipsAPI(async_client) + self.sequences = SyncSequencesAPI(async_client) + self.simulators = SyncSimulatorsAPI(async_client) + self.three_d = Sync3DAPI(async_client) + self.time_series = SyncTimeSeriesAPI(async_client) + self.transformations = SyncTransformationsAPI(async_client) + self.units = SyncUnitAPI(async_client) + self.vision = SyncVisionAPI(async_client) + self.workflows = SyncWorkflowAPI(async_client) diff --git a/scripts/sync_client_codegen/main.py b/scripts/sync_client_codegen/main.py index e67e5fdd39..c97a15af21 100644 --- a/scripts/sync_client_codegen/main.py +++ b/scripts/sync_client_codegen/main.py @@ -30,13 +30,9 @@ from cognite.client.credentials import Token # noqa: E402 EIGHT_SPACES = " " * 8 -SKIP_API_NAMES = { - "PrincipalsAPI", -} KNOWN_FILES_SKIP_LIST = { Path("cognite/client/_api/datapoint_tasks.py"), Path("cognite/client/_api/functions/utils.py"), - Path("cognite/client/_api/org_apis/principals.py"), # TODO? } MAYBE_IMPORTS = ( "SortSpec: TypeAlias", @@ -66,15 +62,13 @@ from cognite.client._sync_api_client import SyncAPIClient from cognite.client.utils._async_helpers import SyncIterator, run_sync from cognite.client.utils._concurrency import ConcurrencySettings -from typing import Any, Iterator, TypeVar, TYPE_CHECKING, overload +from typing import Any, Iterator, TYPE_CHECKING, overload from collections.abc import Coroutine if TYPE_CHECKING: import pandas as pd {type_checking_imports} -_T = TypeVar("_T") - class Sync{class_name}(SyncAPIClient): """Auto-generated, do not modify manually.""" @@ -98,7 +92,7 @@ def get_api_class_by_attribute(cls_: object, parent_name: tuple[str, ...] = ()) def find_api_class_name(source_code: str, file: Path, raise_on_missing: bool = True) -> str | None: - match re.findall(r"class (\w+API)\(APIClient\):", source_code): + match re.findall(r"class (\w+API)\((?:Org)?APIClient\):", source_code): case []: return None case [cls_name]: @@ -164,12 +158,10 @@ def get_module_level_type_checking_imports(tree: ast.Module) -> str: continue match node.test: - # check for "if TYPE_CHECKING:" case ast.Name(id="TYPE_CHECKING"): - pass - # or "if typing.TYPE_CHECKING:" + pass # we found: `if TYPE_CHECKING:` case ast.Attribute(value=ast.Name(id="typing"), attr="TYPE_CHECKING"): - pass + pass # we found: `if typing.TYPE_CHECKING:` case _: continue @@ -232,7 +224,6 @@ def find_self_assignments(class_node: ast.ClassDef) -> tuple[list[str], list[str and t.value.id == "self" and isinstance(stmt.value, ast.Call) and stmt.value.func.id.endswith("API") - and stmt.value.func.id not in SKIP_API_NAMES ): names.append(cls_name := foolish_cls_name_rewrite(stmt.value.func.id)) nested_apis.append( @@ -374,7 +365,7 @@ def fix_imports_for_sync_apis(all_imports: str, lst_of_api_names: list[str]) -> if not lst_of_api_names: return all_imports - api_name_options = "|".join(map(re.escape, lst_of_api_names)) # escape is prob overkill + api_name_options = "|".join(map(inverse_foolish_cls_name_rewrite, lst_of_api_names)) pattern = re.compile(rf"^from cognite\.client\._api(\..*? import\s+)(.*?)({api_name_options})(.*)$", re.MULTILINE) def replacer(match: re.Match) -> str: @@ -391,7 +382,7 @@ def replacer(match: re.Match) -> str: "to:\n" "from cognite.client._sync_api.time_series import SyncDatapointsAPI\n" ) - return f"from cognite.client._sync_api{module_and_import}Sync{matched_api_name}" + return f"from cognite.client._sync_api{module_and_import}Sync{foolish_cls_name_rewrite(matched_api_name)}" # Perform the substitution in a single pass: return pattern.sub(replacer, all_imports) @@ -518,8 +509,9 @@ def create_sync_cognite_client( content = COGNITE_CLIENT_TEMPLATE.format( file_hash="TODO", all_api_imports="\n".join(all_imports), nested_apis_init=" ".join(all_apis) ) - SYNC_CLIENT_PATH.write_text(content) - print(f"- Generated sync CogniteClient in: '{SYNC_CLIENT_PATH}' ✅") + if content != SYNC_CLIENT_PATH.read_text(): + SYNC_CLIENT_PATH.write_text(content) + print(f"- Updated sync CogniteClient: '{SYNC_CLIENT_PATH}' ✅") def clean_up_files(all_expected_files: list[Path]) -> None: @@ -549,23 +541,26 @@ def setup_async_mock_client() -> AsyncCogniteClient: # Run convert on all AsyncSomethingAPIs: all_expected_files = [] - files_needing_lint = [] + files_needing_lint = [SYNC_CLIENT_PATH] + something_failed = False for read_file in filter(is_pyfile, list_apis()): try: write_file, was_modified = main(read_file, dot_path_lookup) if write_file is not None: all_expected_files.append(write_file) - if was_modified: - files_needing_lint.append(write_file) + if was_modified: + files_needing_lint.append(write_file) except Exception as e: print(f"- Failed to generate sync client code for: '{read_file}' ❌ {e}") + something_failed = True continue + # Gather all sync APIs into the CogniteClient class itself: + create_sync_cognite_client(dot_path_lookup, file_path_lookup) + # Invoke run via pre-commit (subprocess) as it doesn't have a python API interface: run_ruff(files_needing_lint) # Clean up files that are no longer needed: - clean_up_files(all_expected_files) - - # Finally, gather all sync APIs into the CogniteClient class itself: - create_sync_cognite_client(dot_path_lookup, file_path_lookup) + if not something_failed: + clean_up_files(all_expected_files) From 8d5b001375f3c1f28e5c3931a37f6caf6cc3d758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Wed, 15 Oct 2025 10:52:32 +0200 Subject: [PATCH 10/12] update API-conversion script to include sync methods + overloads --- scripts/sync_client_codegen/main.py | 87 +++++++++++++++++++---------- 1 file changed, 58 insertions(+), 29 deletions(-) diff --git a/scripts/sync_client_codegen/main.py b/scripts/sync_client_codegen/main.py index c97a15af21..6fa02bfe69 100644 --- a/scripts/sync_client_codegen/main.py +++ b/scripts/sync_client_codegen/main.py @@ -29,6 +29,7 @@ from cognite.client.config import ClientConfig, global_config # noqa: E402 from cognite.client.credentials import Token # noqa: E402 +FOUR_SPACES = " " * 4 EIGHT_SPACES = " " * 8 KNOWN_FILES_SKIP_LIST = { Path("cognite/client/_api/datapoint_tasks.py"), @@ -47,6 +48,20 @@ ASYNC_API_DIR = Path("cognite/client/_api") SYNC_API_DIR = Path("cognite/client/_sync_api") +# Why is dunder call in both "to keep" lists?! Story time: +# There's more to it than just an isinstance check: 'async def __call__' does not return +# a coroutine, but an async generator. This in turn means that mypy forces the overloads +# to NOT be 'async def' but just 'def'. Wait what?! I for sure had to Google it. So we need +# to treat e.g. __call__ as a special case in order to not lose all that typing goodies... +SYNC_METHODS_TO_KEEP = { + "compare_capabilities", + "__call__", +} +ASYNC_METHODS_TO_KEEP = { + "_unsafely_wipe_and_regenerate_dml", + "__call__", +} + # Template for the generated sync client code: # - we rely on other tools to clean up imports SYNC_API_TEMPLATE = '''\ @@ -73,7 +88,7 @@ class Sync{class_name}(SyncAPIClient): """Auto-generated, do not modify manually.""" - def __init__(self, async_client: AsyncCogniteClient): + def __init__(self, async_client: AsyncCogniteClient) -> None: self.__async_client = async_client {nested_apis_init} @@ -243,11 +258,13 @@ def inverse_foolish_cls_name_rewrite(class_name: str) -> str: def method_should_be_converted(node: ast.AST) -> bool: - # There's more to it than just an isinstance check: 'async def __call__' does not return - # a coroutine, but an async generator. This in turn means that mypy forces the overloads - # to NOT be 'async def' but just 'def'. Wait what?! I for sure had to Google it. So we need - # to treat __call__ as a special case in order to not lose all that typing goodies... - return isinstance(node, ast.AsyncFunctionDef) or getattr(node, "name", None) == "__call__" + match node: + case ast.AsyncFunctionDef(name=n) if not n.startswith("_") or n in ASYNC_METHODS_TO_KEEP: + return True + case ast.FunctionDef(name=n) if n in SYNC_METHODS_TO_KEEP: + return True + case _: + return False def generate_sync_client_code( @@ -278,58 +295,71 @@ def generate_sync_client_code( methods_by_name.setdefault(method_node.name, []).append(method_node) for name, method_nodes in methods_by_name.items(): - if name.startswith("_") and name != "__call__": - continue - - # The last definition is the implementation, the rest are overloads + # The last definition is the function implementation, the rest are overloads: overloads = method_nodes[:-1] - implementation = method_nodes[-1] + func = method_nodes[-1] for overload_node in overloads: - sync_def = "@overload\n def {name}({args}) -> {return_type}: ...".format( + sync_def = "{indent}@overload\n{indent}def {name}({args}) -> {return_type}: ...".format( + indent=FOUR_SPACES, name=name, args=ast.unparse(overload_node.args), return_type=ast.unparse(overload_node.returns).replace("AsyncIterator", "Iterator"), ) generated_methods.append(sync_def) - docstring = ast.get_docstring(implementation) + docstring = ast.get_docstring(func) # Create the list of arguments to pass to the async call call_parts = [] # 1. Handle positional-only arguments (e.g., func(a, /)) - call_parts.extend([arg.arg for arg in implementation.args.posonlyargs]) + call_parts.extend([arg.arg for arg in func.args.posonlyargs]) # 2. Handle regular arguments (can be pos or keyword) # We will pass these by keyword for safety. - regular_args = [f"{arg.arg}={arg.arg}" for arg in implementation.args.args if arg.arg != "self"] + regular_args = [f"{arg.arg}={arg.arg}" for arg in func.args.args if arg.arg != "self"] call_parts.extend(regular_args) # 3. Handle variadic positional arguments (*args) - if implementation.args.vararg: - call_parts.append(f"*{implementation.args.vararg.arg}") + if func.args.vararg: + call_parts.append(f"*{func.args.vararg.arg}") # 4. Handle keyword-only arguments (e.g., func(*, a)) - kw_only_args = [f"{arg.arg}={arg.arg}" for arg in implementation.args.kwonlyargs] + kw_only_args = [f"{arg.arg}={arg.arg}" for arg in func.args.kwonlyargs] call_parts.extend(kw_only_args) # 5. Handle variadic keyword arguments (**kwargs) - if implementation.args.kwarg: - call_parts.append(f"**{implementation.args.kwarg.arg}") + if func.args.kwarg: + call_parts.append(f"**{func.args.kwarg.arg}") # Check return type for AsyncIterator - return_type_str = ast.unparse(implementation.returns) + return_type_str = ast.unparse(func.returns) is_iterator = "AsyncIterator" in return_type_str + is_async_fn = isinstance(func, ast.AsyncFunctionDef) sync_return_type = return_type_str.replace("AsyncIterator", "Iterator") - method_body = "" + maybe_name = "" if name == "__call__" else f".{name}" + nested_client_call = f"self.__async_client.{dotted_path}{maybe_name}({', '.join(call_parts)})" if is_iterator: - # Skip name here (__call__): - method_body = f"yield from SyncIterator(self.__async_client.{dotted_path}({', '.join(call_parts)}))" + # We add type ignore because mypy fail at unions of coroutines... (pyright on the other hand) + method_body = f"yield from SyncIterator({nested_client_call}) # type: ignore [misc]" + elif is_async_fn: + method_body = f"return run_sync({nested_client_call})" else: - method_body = f"return run_sync(self.__async_client.{dotted_path}.{name}({', '.join(call_parts)}))" + method_body = f"return {nested_client_call}" + + # Decorators not typing.overload: + decorators = maybe_self = "" + if not is_async_fn: + for deco in func.decorator_list: + if deco.id == "staticmethod": + # Uhm.. what? Well, we delegate to self.__async_client... + maybe_self = "self, " + else: + decorators += f"{FOUR_SPACES}@{ast.unparse(deco)}\n" indented_docstring = "" if docstring: indented_docstring = f'{EIGHT_SPACES}"""\n{textwrap.indent(docstring, EIGHT_SPACES)}\n{EIGHT_SPACES}"""\n' - impl_def = ( - f"def {name}({ast.unparse(implementation.args)}) -> {sync_return_type}:\n" + + impl_def = decorators + ( + f"{FOUR_SPACES}def {name}({maybe_self}{ast.unparse(func.args)}) -> {sync_return_type}:\n" f"{indented_docstring}{EIGHT_SPACES}{method_body}" ) generated_methods.append(impl_def) @@ -350,8 +380,7 @@ def generate_sync_client_code( nested_apis_init="\n ".join(nested_apis), ) ) - + " " - + "\n\n ".join(generated_methods) + + "\n\n".join(generated_methods) + "\n" ) From f177f6a3c57ea90aabfc7f0c0bc9c484b2b39ec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Wed, 15 Oct 2025 10:53:25 +0200 Subject: [PATCH 11/12] run updated script to update sync APIs --- .../client/_sync_api/data_modeling/graphql.py | 15 ++++ .../data_modeling/space_statistics.py | 4 +- .../_sync_api/data_modeling/statistics.py | 4 +- cognite/client/_sync_api/datapoints.py | 2 +- cognite/client/_sync_api/entity_matching.py | 6 +- cognite/client/_sync_api/files.py | 4 +- cognite/client/_sync_api/geospatial.py | 2 +- cognite/client/_sync_api/iam/__init__.py | 70 +++++++++++++++++++ cognite/client/_sync_api/relationships.py | 4 +- .../_sync_api/simulators/models_revisions.py | 27 ++++++- .../client/_sync_api/simulators/routines.py | 3 + 11 files changed, 127 insertions(+), 14 deletions(-) diff --git a/cognite/client/_sync_api/data_modeling/graphql.py b/cognite/client/_sync_api/data_modeling/graphql.py index 69bd7a84e6..d1593ab2f4 100644 --- a/cognite/client/_sync_api/data_modeling/graphql.py +++ b/cognite/client/_sync_api/data_modeling/graphql.py @@ -22,6 +22,21 @@ class SyncDataModelingGraphQLAPI(SyncAPIClient): def __init__(self, async_client: AsyncCogniteClient): self.__async_client = async_client + def _unsafely_wipe_and_regenerate_dml(self, id: DataModelIdentifier) -> str: + """ + Wipe and regenerate the DML for a given data model. + + Note: + This removes all comments from the DML. + + Args: + id (DataModelIdentifier): The data model to apply DML to. + + Returns: + str: The new DML + """ + return run_sync(self.__async_client.data_modeling.graphql._unsafely_wipe_and_regenerate_dml(id=id)) + def apply_dml( self, id: DataModelIdentifier, diff --git a/cognite/client/_sync_api/data_modeling/space_statistics.py b/cognite/client/_sync_api/data_modeling/space_statistics.py index 569ad9a4b6..b6b802cfa2 100644 --- a/cognite/client/_sync_api/data_modeling/space_statistics.py +++ b/cognite/client/_sync_api/data_modeling/space_statistics.py @@ -1,6 +1,6 @@ """ =============================================================================== -773bd110da46d6c4ebcf2a0513906391 +b8446180971deded041820cbe43cb4ef This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -16,7 +16,7 @@ from cognite.client.utils.useful_types import SequenceNotStr if TYPE_CHECKING: - from cognite.client._cognite_client import AsyncCogniteClient + from cognite.client import AsyncCogniteClient class SyncSpaceStatisticsAPI(SyncAPIClient): diff --git a/cognite/client/_sync_api/data_modeling/statistics.py b/cognite/client/_sync_api/data_modeling/statistics.py index e385548578..16b2db9023 100644 --- a/cognite/client/_sync_api/data_modeling/statistics.py +++ b/cognite/client/_sync_api/data_modeling/statistics.py @@ -1,6 +1,6 @@ """ =============================================================================== -a4c510989006c674b4ae203ce2c6918d +22ce5358beca6d957d6ed92d895118ae This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -16,7 +16,7 @@ from cognite.client.utils._async_helpers import run_sync if TYPE_CHECKING: - from cognite.client._cognite_client import AsyncCogniteClient + from cognite.client import AsyncCogniteClient class SyncStatisticsAPI(SyncAPIClient): diff --git a/cognite/client/_sync_api/datapoints.py b/cognite/client/_sync_api/datapoints.py index 512d29ebcf..75ee920d12 100644 --- a/cognite/client/_sync_api/datapoints.py +++ b/cognite/client/_sync_api/datapoints.py @@ -146,7 +146,7 @@ def __call__( directly to an insert method. The only assumption below is that the time series have already been created in the target project. >>> from cognite.client.utils import MIN_TIMESTAMP_MS, MAX_TIMESTAMP_MS - >>> target_client = AsyncCogniteClient() + >>> target_client = CogniteClient() >>> ts_to_copy = client.time_series.list(data_set_external_ids="my-use-case") >>> queries = [ ... DatapointsQuery( diff --git a/cognite/client/_sync_api/entity_matching.py b/cognite/client/_sync_api/entity_matching.py index 73dbddaf1a..a57ac37db4 100644 --- a/cognite/client/_sync_api/entity_matching.py +++ b/cognite/client/_sync_api/entity_matching.py @@ -299,8 +299,8 @@ def refit( Args: true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. - id (int | None): ids of the model to use. - external_id (str | None): external ids of the model to use. + id (int | None): id of the model to use. + external_id (str | None): external id of the model to use. Returns: EntityMatchingModel: new model refitted to true_matches. @@ -311,7 +311,7 @@ def refit( >>> sources = [{'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}] >>> targets = [{'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}] >>> true_matches = [(1, 101)] - >>> model = client.entity_matching.refit(true_matches = true_matches, description="AssetMatchingJob1", id=1) + >>> model = client.entity_matching.refit(true_matches=true_matches, id=1) """ return run_sync( self.__async_client.entity_matching.refit(true_matches=true_matches, id=id, external_id=external_id) diff --git a/cognite/client/_sync_api/files.py b/cognite/client/_sync_api/files.py index 99ad8305a9..9b4b267e11 100644 --- a/cognite/client/_sync_api/files.py +++ b/cognite/client/_sync_api/files.py @@ -1,6 +1,6 @@ """ =============================================================================== -8ff3a2a3fc25ed22905564ffee81a017 +6c1c42132e1b371d885eb22ff47f5467 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -237,7 +237,7 @@ def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | None = N >>> from cognite.client import CogniteClient, AsyncCogniteClient >>> client = CogniteClient() >>> # async_client = AsyncCogniteClient() # another option - >>> aggregate_uploaded = client.files.aggregate(filter={"uploaded": True}) + >>> aggregate_uploaded = client.files.aggregate_count(filter={"uploaded": True}) """ return run_sync(self.__async_client.files.aggregate_count(filter=filter)) diff --git a/cognite/client/_sync_api/geospatial.py b/cognite/client/_sync_api/geospatial.py index 0078030a19..b37e327ccd 100644 --- a/cognite/client/_sync_api/geospatial.py +++ b/cognite/client/_sync_api/geospatial.py @@ -662,7 +662,7 @@ def stream_features( ... # do something with the features """ yield from SyncIterator( - self.__async_client.geospatial( + self.__async_client.geospatial.stream_features( feature_type_external_id=feature_type_external_id, filter=filter, properties=properties, diff --git a/cognite/client/_sync_api/iam/__init__.py b/cognite/client/_sync_api/iam/__init__.py index 8bfb27d66d..316affb07f 100644 --- a/cognite/client/_sync_api/iam/__init__.py +++ b/cognite/client/_sync_api/iam/__init__.py @@ -39,6 +39,76 @@ def __init__(self, async_client: AsyncCogniteClient): self.principals = SyncPrincipalsAPI(async_client) self.token = SyncTokenAPI(async_client) + def compare_capabilities( + self, + existing_capabilities: ComparableCapability, + desired_capabilities: ComparableCapability, + project: str | None = None, + ) -> list[Capability]: + """ + Helper method to compare capabilities across two groups (of capabilities) to find which are missing from the first. + + Note: + Capabilities that are no longer in use by the API will be ignored. These have names prefixed with `Legacy` and + all inherit from the base class `LegacyCapability`. If you want to check for these, you must do so manually. + + Tip: + If you just want to check against your existing capabilities, you may use the helper method + ``client.iam.verify_capabilities`` instead. + + Args: + existing_capabilities (ComparableCapability): List of existing capabilities. + desired_capabilities (ComparableCapability): List of wanted capabilities to check against existing. + project (str | None): If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project + to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList + is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect. + + Returns: + list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + + Examples: + + Ensure that a user's groups grant access to read- and write for assets in all scope, + and events write, scoped to a specific dataset with id=123: + + >>> from cognite.client import CogniteClient + >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl + >>> client = CogniteClient() + >>> my_groups = client.iam.groups.list(all=False) + >>> to_check = [ + ... AssetsAcl( + ... actions=[AssetsAcl.Action.Read, AssetsAcl.Action.Write], + ... scope=AssetsAcl.Scope.All()), + ... EventsAcl( + ... actions=[EventsAcl.Action.Write], + ... scope=EventsAcl.Scope.DataSet([123]), + ... )] + >>> missing = client.iam.compare_capabilities( + ... existing_capabilities=my_groups, + ... desired_capabilities=to_check) + >>> if missing: + ... pass # do something + + Capabilities can also be passed as dictionaries: + + >>> to_check = [ + ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}}, + ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}}, + ... ] + >>> missing = client.iam.compare_capabilities( + ... existing_capabilities=my_groups, + ... desired_capabilities=to_check) + + You may also load capabilities from a dict-representation directly into ACLs (access-control list) + by using ``Capability.load``. This will also ensure that the capabilities are valid. + + >>> from cognite.client.data_classes.capabilities import Capability + >>> acls = [Capability.load(cap) for cap in to_check] + """ + return self.__async_client.iam.compare_capabilities( + existing_capabilities=existing_capabilities, desired_capabilities=desired_capabilities, project=project + ) + def verify_capabilities(self, desired_capabilities: ComparableCapability) -> list[Capability]: """ Helper method to compare your current capabilities with a set of desired capabilities and return any missing. diff --git a/cognite/client/_sync_api/relationships.py b/cognite/client/_sync_api/relationships.py index 1ff354fb84..9d7df9f976 100644 --- a/cognite/client/_sync_api/relationships.py +++ b/cognite/client/_sync_api/relationships.py @@ -1,6 +1,6 @@ """ =============================================================================== -78502466323dd0f7badd4b11014e4c65 +c16ec03a20daca0713c052b395d984dc This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -382,7 +382,7 @@ def upsert( >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes import RelationshipWrite >>> client = CogniteClient() - >>> existing_relationship = client.relationships.retrieve(id=1) + >>> existing_relationship = client.relationships.retrieve(external_id="foo") >>> existing_relationship.description = "New description" >>> new_relationship = RelationshipWrite( ... external_id="new_relationship", diff --git a/cognite/client/_sync_api/simulators/models_revisions.py b/cognite/client/_sync_api/simulators/models_revisions.py index 8ca08faa00..31de84dc90 100644 --- a/cognite/client/_sync_api/simulators/models_revisions.py +++ b/cognite/client/_sync_api/simulators/models_revisions.py @@ -1,6 +1,6 @@ """ =============================================================================== -7619ac448bf753adf6658c74b2748fd6 +9391dfeb7797dd05db353b555112b6d1 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -17,6 +17,7 @@ from cognite.client.data_classes.simulators.filters import PropertySort from cognite.client.data_classes.simulators.models import ( SimulatorModelRevision, + SimulatorModelRevisionDataList, SimulatorModelRevisionList, SimulatorModelRevisionWrite, ) @@ -229,3 +230,27 @@ def create( >>> res = client.simulators.models.revisions.create(revisions) """ return run_sync(self.__async_client.simulators.models.revisions.create(items=items)) + + def retrieve_data(self, model_revision_external_id: str) -> SimulatorModelRevisionDataList: + """ + `Filter simulator model revision data `_ + + Retrieves a list of simulator model revisions data that match the given criteria. + + Args: + model_revision_external_id (str): The external id of the simulator model revision to filter by. + Returns: + SimulatorModelRevisionDataList: List of simulator model revision data + + Examples: + List simulator model revision data: + >>> from cognite.client import CogniteClient + >>> client = CogniteClient() + >>> # async_client = AsyncCogniteClient() # another option + >>> res = client.simulators.models.revisions.retrieve_data("model_revision_1") + """ + return run_sync( + self.__async_client.simulators.models.revisions.retrieve_data( + model_revision_external_id=model_revision_external_id + ) + ) diff --git a/cognite/client/_sync_api/simulators/routines.py b/cognite/client/_sync_api/simulators/routines.py index b161a216ff..157340ab2c 100644 --- a/cognite/client/_sync_api/simulators/routines.py +++ b/cognite/client/_sync_api/simulators/routines.py @@ -46,6 +46,7 @@ def __call__( chunk_size: int | None = None, model_external_ids: Sequence[str] | None = None, simulator_integration_external_ids: Sequence[str] | None = None, + sort: PropertySort | None = None, limit: int | None = None, ) -> Iterator[SimulatorRoutine | SimulatorRoutineList]: """ @@ -57,6 +58,7 @@ def __call__( chunk_size (int | None): Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. model_external_ids (Sequence[str] | None): Filter on model external ids. simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. + sort (PropertySort | None): The criteria to sort by. limit (int | None): Maximum number of simulator routines to return. Defaults to return all items. Yields: @@ -67,6 +69,7 @@ def __call__( chunk_size=chunk_size, model_external_ids=model_external_ids, simulator_integration_external_ids=simulator_integration_external_ids, + sort=sort, limit=limit, ) ) From 422623dfbeee3824977662e4fc9acd7578454f08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20V=2E=20Treider?= Date: Wed, 15 Oct 2025 21:40:41 +0200 Subject: [PATCH 12/12] update conversion script to add basic HTTP methods++ to CogniteClient --- cognite/client/_sync_cognite_client.py | 181 ++++++++++++++++++++++- scripts/sync_client_codegen/main.py | 189 ++++++++++++++++++++++++- 2 files changed, 365 insertions(+), 5 deletions(-) diff --git a/cognite/client/_sync_cognite_client.py b/cognite/client/_sync_cognite_client.py index 4436a9c4df..3a7defed35 100644 --- a/cognite/client/_sync_cognite_client.py +++ b/cognite/client/_sync_cognite_client.py @@ -6,7 +6,9 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any + +import httpx from cognite.client import AsyncCogniteClient from cognite.client._sync_api.agents.agents import SyncAgentsAPI @@ -37,6 +39,9 @@ from cognite.client._sync_api.units import SyncUnitAPI from cognite.client._sync_api.vision import SyncVisionAPI from cognite.client._sync_api.workflows import SyncWorkflowAPI +from cognite.client.credentials import CredentialProvider, OAuthClientCredentials, OAuthInteractive +from cognite.client.utils._async_helpers import run_sync +from cognite.client.utils._auxiliary import load_resource_to_dict if TYPE_CHECKING: from cognite.client import ClientConfig @@ -84,3 +89,177 @@ def __init__(self, config: ClientConfig | None = None) -> None: self.units = SyncUnitAPI(async_client) self.vision = SyncVisionAPI(async_client) self.workflows = SyncWorkflowAPI(async_client) + + def get( + self, url: str, params: dict[str, Any] | None = None, headers: dict[str, Any] | None = None + ) -> httpx.Response: + """Perform a GET request to an arbitrary path in the API.""" + return run_sync(self.__async_client.get(url, params=params, headers=headers)) + + def post( + self, + url: str, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + headers: dict[str, Any] | None = None, + ) -> httpx.Response: + """Perform a POST request to an arbitrary path in the API.""" + return run_sync(self.__async_client.post(url, json=json, params=params, headers=headers)) + + def put( + self, + url: str, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + headers: dict[str, Any] | None = None, + ) -> httpx.Response: + """Perform a PUT request to an arbitrary path in the API.""" + return run_sync(self.__async_client.put(url, json=json, params=params, headers=headers)) + + @property + def version(self) -> str: + """Returns the current SDK version. + + Returns: + str: The current SDK version + """ + from cognite.client import __version__ + + return __version__ + + @property + def config(self) -> ClientConfig: + """Returns a config object containing the configuration for the current client. + + Returns: + ClientConfig: The configuration object. + """ + return self.__async_client._config + + @classmethod + def default( + cls, + project: str, + cdf_cluster: str, + credentials: CredentialProvider, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration. + + The default configuration creates the URLs based on the project and cluster: + + * Base URL: "https://{cdf_cluster}.cognitedata.com/ + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + from cognite.client import ClientConfig + + return cls(ClientConfig.default(project, cdf_cluster, credentials, client_name=client_name)) + + @classmethod + def default_oauth_client_credentials( + cls, + project: str, + cdf_cluster: str, + tenant_id: str, + client_id: str, + client_secret: str, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration using a client credentials flow. + + The default configuration creates the URLs based on the project and cluster: + + * Base URL: "https://{cdf_cluster}.cognitedata.com/ + * Token URL: "https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token" + * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + tenant_id (str): The Azure tenant ID. + client_id (str): The Azure client ID. + client_secret (str): The Azure client secret. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + credentials = OAuthClientCredentials.default_for_azure_ad(tenant_id, client_id, client_secret, cdf_cluster) + return cls.default(project, cdf_cluster, credentials, client_name) + + @classmethod + def default_oauth_interactive( + cls, + project: str, + cdf_cluster: str, + tenant_id: str, + client_id: str, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration using the interactive flow. + + The default configuration creates the URLs based on the tenant_id and cluster: + + * Base URL: "https://{cdf_cluster}.cognitedata.com/ + * Authority URL: "https://login.microsoftonline.com/{tenant_id}" + * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + tenant_id (str): The Azure tenant ID. + client_id (str): The Azure client ID. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + credentials = OAuthInteractive.default_for_azure_ad(tenant_id, client_id, cdf_cluster) + return cls.default(project, cdf_cluster, credentials, client_name) + + @classmethod + def load(cls, config: dict[str, Any] | str) -> CogniteClient: + """Load a cognite client object from a YAML/JSON string or dict. + + Args: + config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the CogniteClient class. + + Returns: + CogniteClient: A cognite client object. + + Examples: + + Create a cognite client object from a dictionary input: + + >>> from cognite.client import CogniteClient + >>> import os + >>> config = { + ... "client_name": "abcd", + ... "project": "cdf-project", + ... "base_url": "https://api.cognitedata.com/", + ... "credentials": { + ... "client_credentials": { + ... "client_id": "abcd", + ... "client_secret": os.environ["OAUTH_CLIENT_SECRET"], + ... "token_url": "https://login.microsoftonline.com/xyz/oauth2/v2.0/token", + ... "scopes": ["https://api.cognitedata.com/.default"], + ... }, + ... }, + ... } + >>> client = CogniteClient.load(config) + """ + from cognite.client import ClientConfig + + loaded = load_resource_to_dict(config) + return cls(config=ClientConfig.load(loaded)) diff --git a/scripts/sync_client_codegen/main.py b/scripts/sync_client_codegen/main.py index 6fa02bfe69..f7913ec0db 100644 --- a/scripts/sync_client_codegen/main.py +++ b/scripts/sync_client_codegen/main.py @@ -423,10 +423,10 @@ def run_ruff(file_paths: list[Path]) -> None: # We exit nonzero if ruff fixes anything, so we run with check=False to not raise: base = f"poetry run pre-commit run ruff-{{}} --files {shlex.join(map(str, file_paths))}" command = shlex.split(base.format("check")) - print(shlex.join(command)) + print("Now running command\n", shlex.join(command)) subprocess.run(command, check=False, capture_output=True) command = shlex.split(base.format("format")) - print(shlex.join(command)) + print("Now running command\n", shlex.join(command)) subprocess.run(command, check=False, capture_output=True) @@ -490,8 +490,13 @@ def main(read_file: Path, dot_path_lookup: dict[str, str]) -> tuple[Path | None, """ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any + +import httpx +from cognite.client.credentials import CredentialProvider, OAuthClientCredentials, OAuthInteractive +from cognite.client.utils._auxiliary import load_resource_to_dict from cognite.client import AsyncCogniteClient +from cognite.client.utils._async_helpers import run_sync {all_api_imports} if TYPE_CHECKING: @@ -513,6 +518,181 @@ def __init__(self, config: ClientConfig | None = None) -> None: # Initialize all sync. APIs: {nested_apis_init} + + def get( + self, url: str, params: dict[str, Any] | None = None, headers: dict[str, Any] | None = None + ) -> httpx.Response: + """Perform a GET request to an arbitrary path in the API.""" + return run_sync(self.__async_client.get(url, params=params, headers=headers)) + + def post( + self, + url: str, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + headers: dict[str, Any] | None = None, + ) -> httpx.Response: + """Perform a POST request to an arbitrary path in the API.""" + return run_sync(self.__async_client.post(url, json=json, params=params, headers=headers)) + + def put( + self, + url: str, + json: dict[str, Any] | None = None, + params: dict[str, Any] | None = None, + headers: dict[str, Any] | None = None, + ) -> httpx.Response: + """Perform a PUT request to an arbitrary path in the API.""" + return run_sync(self.__async_client.put(url, json=json, params=params, headers=headers)) + + @property + def version(self) -> str: + """Returns the current SDK version. + + Returns: + str: The current SDK version + """ + from cognite.client import __version__ + + return __version__ + + @property + def config(self) -> ClientConfig: + """Returns a config object containing the configuration for the current client. + + Returns: + ClientConfig: The configuration object. + """ + return self.__async_client._config + + @classmethod + def default( + cls, + project: str, + cdf_cluster: str, + credentials: CredentialProvider, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration. + + The default configuration creates the URLs based on the project and cluster: + + * Base URL: "https://{{cdf_cluster}}.cognitedata.com/ + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + from cognite.client import ClientConfig + + return cls(ClientConfig.default(project, cdf_cluster, credentials, client_name=client_name)) + + @classmethod + def default_oauth_client_credentials( + cls, + project: str, + cdf_cluster: str, + tenant_id: str, + client_id: str, + client_secret: str, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration using a client credentials flow. + + The default configuration creates the URLs based on the project and cluster: + + * Base URL: "https://{{cdf_cluster}}.cognitedata.com/ + * Token URL: "https://login.microsoftonline.com/{{tenant_id}}/oauth2/v2.0/token" + * Scopes: [f"https://{{cdf_cluster}}.cognitedata.com/.default"] + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + tenant_id (str): The Azure tenant ID. + client_id (str): The Azure client ID. + client_secret (str): The Azure client secret. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + credentials = OAuthClientCredentials.default_for_azure_ad(tenant_id, client_id, client_secret, cdf_cluster) + return cls.default(project, cdf_cluster, credentials, client_name) + + @classmethod + def default_oauth_interactive( + cls, + project: str, + cdf_cluster: str, + tenant_id: str, + client_id: str, + client_name: str | None = None, + ) -> CogniteClient: + """ + Create an CogniteClient with default configuration using the interactive flow. + + The default configuration creates the URLs based on the tenant_id and cluster: + + * Base URL: "https://{{cdf_cluster}}.cognitedata.com/ + * Authority URL: "https://login.microsoftonline.com/{{tenant_id}}" + * Scopes: [f"https://{{cdf_cluster}}.cognitedata.com/.default"] + + Args: + project (str): The CDF project. + cdf_cluster (str): The CDF cluster where the CDF project is located. + tenant_id (str): The Azure tenant ID. + client_id (str): The Azure client ID. + client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + + Returns: + CogniteClient: An CogniteClient instance with default configurations. + """ + credentials = OAuthInteractive.default_for_azure_ad(tenant_id, client_id, cdf_cluster) + return cls.default(project, cdf_cluster, credentials, client_name) + + @classmethod + def load(cls, config: dict[str, Any] | str) -> CogniteClient: + """Load a cognite client object from a YAML/JSON string or dict. + + Args: + config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the CogniteClient class. + + Returns: + CogniteClient: A cognite client object. + + Examples: + + Create a cognite client object from a dictionary input: + + >>> from cognite.client import CogniteClient + >>> import os + >>> config = {{ + ... "client_name": "abcd", + ... "project": "cdf-project", + ... "base_url": "https://api.cognitedata.com/", + ... "credentials": {{ + ... "client_credentials": {{ + ... "client_id": "abcd", + ... "client_secret": os.environ["OAUTH_CLIENT_SECRET"], + ... "token_url": "https://login.microsoftonline.com/xyz/oauth2/v2.0/token", + ... "scopes": ["https://api.cognitedata.com/.default"], + ... }}, + ... }}, + ... }} + >>> client = CogniteClient.load(config) + """ + from cognite.client import ClientConfig + + loaded = load_resource_to_dict(config) + return cls(config=ClientConfig.load(loaded)) + ''' @@ -536,7 +716,8 @@ def create_sync_cognite_client( all_imports.append(f"from {import_path} import Sync{override_api_name}") content = COGNITE_CLIENT_TEMPLATE.format( - file_hash="TODO", all_api_imports="\n".join(all_imports), nested_apis_init=" ".join(all_apis) + all_api_imports="\n".join(all_imports), + nested_apis_init=" ".join(all_apis).rstrip(), ) if content != SYNC_CLIENT_PATH.read_text(): SYNC_CLIENT_PATH.write_text(content)