diff --git a/cognite/client/_basic_api_client.py b/cognite/client/_basic_api_client.py
index 81f2ec0faa..102013045a 100644
--- a/cognite/client/_basic_api_client.py
+++ b/cognite/client/_basic_api_client.py
@@ -213,13 +213,31 @@ async def _request(
method: Literal["GET", "PUT", "HEAD"],
/,
full_url: str,
- content: str | bytes | Iterable[bytes] | None = None,
+ content: bytes | AsyncIterator[bytes] | None = None,
headers: dict[str, Any] | None = None,
timeout: float | None = None,
- api_subversion: str | None = None,
include_cdf_headers: bool = False,
+ api_subversion: str | None = None,
) -> httpx.Response:
- """Make a request to something that is outside Cognite Data Fusion"""
+ """
+ Make a request to something that is outside Cognite Data Fusion, with retry enabled.
+ Requires the caller to handle errors coming from non-2xx response status codes.
+
+ Args:
+ method (Literal['GET', 'PUT', 'HEAD']): HTTP method.
+ full_url (str): Full URL to make the request to.
+ content (bytes | AsyncIterator[bytes] | None): Optional body content to send along with the request.
+ headers (dict[str, Any] | None): Optional headers to include in the request.
+ timeout (float | None): Override the default timeout for this request.
+ include_cdf_headers (bool): Whether to include Cognite Data Fusion headers in the request. Defaults to False.
+ api_subversion (str | None): When include_cdf_headers=True, override the API subversion to use for the request. Has no effect otherwise.
+
+ Returns:
+ httpx.Response: The response from the server.
+
+ Raises:
+ httpx.HTTPStatusError: If the response status code is 4xx or 5xx.
+ """
client = self._select_async_http_client(method in {"GET", "PUT", "HEAD"})
if include_cdf_headers:
headers = self._configure_headers(additional_headers=headers, api_subversion=api_subversion)
@@ -227,11 +245,13 @@ async def _request(
res = await client(
method, full_url, content=content, headers=headers, timeout=timeout or self._config.timeout
)
- except httpx.HTTPStatusError as err:
- await self._handle_status_error(err)
+ self._log_successful_request(res)
+ return res
- self._log_successful_request(res)
- return res
+ except httpx.HTTPStatusError as err:
+ handler = await FailedRequestHandler.from_status_error(err, stream=False)
+ handler.log_failed_request()
+ raise
@asynccontextmanager
async def _stream(
@@ -241,14 +261,14 @@ async def _stream(
*,
url_path: str | None = None,
full_url: str | None = None,
- json: Any = None,
+ json: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
full_headers: dict[str, Any] | None = None,
timeout: float | None = None,
api_subversion: str | None = None,
) -> AsyncIterator[httpx.Response]:
assert url_path or full_url, "Either url_path or full_url must be provided"
- full_url = full_url or resolve_url(self, "GET", cast(str, url_path))[1]
+ full_url = full_url or resolve_url(self, method, cast(str, url_path))[1]
if full_headers is None:
full_headers = self._configure_headers(headers, api_subversion)
@@ -262,7 +282,7 @@ async def _stream(
yield resp
except httpx.HTTPStatusError as err:
- await self._handle_status_error(err, stream=True)
+ await self._handle_status_error(err, payload=json, stream=True)
async def _get(
self,
@@ -319,7 +339,7 @@ async def _post(
semaphore=semaphore,
)
except httpx.HTTPStatusError as err:
- await self._handle_status_error(err)
+ await self._handle_status_error(err, payload=json)
self._log_successful_request(res, payload=json)
return res
@@ -327,7 +347,7 @@ async def _post(
async def _put(
self,
url_path: str,
- content: str | bytes | Iterable[bytes] | None = None,
+ content: str | bytes | AsyncIterator[bytes] | None = None,
json: dict[str, Any] | None = None,
params: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
@@ -337,10 +357,10 @@ async def _put(
semaphore: asyncio.BoundedSemaphore | None = None,
) -> httpx.Response:
_, full_url = resolve_url(self, "PUT", url_path)
+
full_headers = self._configure_headers(additional_headers=headers, api_subversion=api_subversion)
if content is None:
content = self._handle_json_dump(json, full_headers)
-
try:
res = await self._http_client_with_retry(
"PUT",
@@ -353,7 +373,7 @@ async def _put(
semaphore=semaphore,
)
except httpx.HTTPStatusError as err:
- await self._handle_status_error(err)
+ await self._handle_status_error(err, payload=json)
self._log_successful_request(res, payload=json)
return res
@@ -381,7 +401,7 @@ def _refresh_auth_header(self, headers: MutableMapping[str, Any]) -> None:
headers[auth_header_name] = auth_header_value
async def _handle_status_error(
- self, error: httpx.HTTPStatusError, payload: dict | None = None, stream: bool = False
+ self, error: httpx.HTTPStatusError, payload: dict[str, Any] | None = None, stream: bool = False
) -> NoReturn:
"""The response had an HTTP status code of 4xx or 5xx"""
handler = await FailedRequestHandler.from_status_error(error, stream=stream)
diff --git a/cognite/client/_cognite_client.py b/cognite/client/_cognite_client.py
index c93ec938c2..b31f4c2c68 100644
--- a/cognite/client/_cognite_client.py
+++ b/cognite/client/_cognite_client.py
@@ -261,17 +261,3 @@ def load(cls, config: dict[str, Any] | str) -> AsyncCogniteClient:
"""
loaded = load_resource_to_dict(config)
return cls(config=ClientConfig.load(loaded))
-
-
-class CogniteClient:
- """Main entrypoint into the Cognite Python SDK.
-
- All Cognite Data Fusion APIs are accessible through this synchronous client.
- For the asynchronous client, see :class:`~cognite.client._cognite_client.AsyncCogniteClient`.
-
- Args:
- config (ClientConfig | None): The configuration for this client.
- """
-
- def __init__(self, config: ClientConfig | None = None) -> None:
- raise NotImplementedError
diff --git a/cognite/client/_sync_api/__init__.py b/cognite/client/_sync_api/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/cognite/client/_sync_api/agents/__init__.py b/cognite/client/_sync_api/agents/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/cognite/client/_sync_api/agents/agents.py b/cognite/client/_sync_api/agents/agents.py
new file mode 100644
index 0000000000..3b1772d852
--- /dev/null
+++ b/cognite/client/_sync_api/agents/agents.py
@@ -0,0 +1,327 @@
+"""
+===============================================================================
+0c2ad8063135aae7d3d1190f8c6c8939
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.agents import Agent, AgentList, AgentUpsert
+from cognite.client.data_classes.agents.chat import Action, ActionResult, AgentChatResponse, Message
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncAgentsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def upsert(self, agents: AgentUpsert) -> Agent: ...
+
+ @overload
+ def upsert(self, agents: Sequence[AgentUpsert]) -> AgentList: ...
+
+ def upsert(self, agents: AgentUpsert | Sequence[AgentUpsert]) -> Agent | AgentList:
+ """
+ `Create or update (upsert) one or more agents. `_
+
+ Args:
+ agents (AgentUpsert | Sequence[AgentUpsert]): Agent or list of agents to create or update.
+
+ Returns:
+ Agent | AgentList: The created or updated agent(s).
+
+ Examples:
+
+ Create a new agent with a query knowledge graph tool to find assets:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.agents import (
+ ... AgentUpsert,
+ ... QueryKnowledgeGraphAgentToolUpsert,
+ ... QueryKnowledgeGraphAgentToolConfiguration,
+ ... DataModelInfo
+ ... )
+ >>> client = CogniteClient()
+ ...
+ >>> find_assets_tool = QueryKnowledgeGraphAgentToolUpsert(
+ ... name="find assets",
+ ... description="Use this tool to find assets",
+ ... configuration=QueryKnowledgeGraphAgentToolConfiguration(
+ ... data_models=[
+ ... DataModelInfo(
+ ... space="cdf_idm",
+ ... external_id="CogniteProcessIndustries",
+ ... version="v1",
+ ... view_external_ids=["CogniteAsset"],
+ ... )
+ ... ]
+ ... )
+ ... )
+ >>> agent = AgentUpsert(
+ ... external_id="my_agent",
+ ... name="My Agent",
+ ... labels=["published"],
+ ... tools=[find_assets_tool]
+ ... )
+ >>> client.agents.upsert(agents=[agent])
+
+ Create an agent with multiple different tools:
+
+ >>> from cognite.client.data_classes.agents import (
+ ... AgentUpsert,
+ ... QueryKnowledgeGraphAgentToolUpsert,
+ ... QueryKnowledgeGraphAgentToolConfiguration,
+ ... DataModelInfo,
+ ... SummarizeDocumentAgentToolUpsert,
+ ... AskDocumentAgentToolUpsert,
+ ... QueryTimeSeriesDatapointsAgentToolUpsert
+ ... )
+ ...
+ >>> find_assets_tool = QueryKnowledgeGraphAgentToolUpsert(
+ ... name="find assets",
+ ... description="Use this tool to query the knowledge graph for assets",
+ ... configuration=QueryKnowledgeGraphAgentToolConfiguration(
+ ... data_models=[
+ ... DataModelInfo(
+ ... space="cdf_idm",
+ ... external_id="CogniteProcessIndustries",
+ ... version="v1",
+ ... view_external_ids=["CogniteAsset"],
+ ... )
+ ... ]
+ ... )
+ ... )
+ >>> find_files_tool = QueryKnowledgeGraphAgentToolUpsert(
+ ... name="find files",
+ ... description="Use this tool to query the knowledge graph for files",
+ ... configuration=QueryKnowledgeGraphAgentToolConfiguration(
+ ... data_models=[
+ ... DataModelInfo(
+ ... space="cdf_idm",
+ ... external_id="CogniteProcessIndustries",
+ ... version="v1",
+ ... view_external_ids=["CogniteFile"],
+ ... )
+ ... ]
+ ... )
+ ... )
+ >>> find_time_series_tool = QueryKnowledgeGraphAgentToolUpsert(
+ ... name="find time series",
+ ... description="Use this tool to query the knowledge graph for time series",
+ ... configuration=QueryKnowledgeGraphAgentToolConfiguration(
+ ... data_models=[
+ ... DataModelInfo(
+ ... space="cdf_idm",
+ ... external_id="CogniteProcessIndustries",
+ ... version="v1",
+ ... view_external_ids=["CogniteTimeSeries"],
+ ... )
+ ... ]
+ ... )
+ ... )
+ >>> summarize_tool = SummarizeDocumentAgentToolUpsert(
+ ... name="summarize document",
+ ... description="Use this tool to get a summary of a document"
+ ... )
+ >>> ask_doc_tool = AskDocumentAgentToolUpsert(
+ ... name="ask document",
+ ... description="Use this tool to ask questions about specific documents"
+ ... )
+ >>> ts_tool = QueryTimeSeriesDatapointsAgentToolUpsert(
+ ... name="query time series",
+ ... description="Use this tool to query time series data points"
+ ... )
+ >>> agent = AgentUpsert(
+ ... external_id="my_agent",
+ ... name="My agent",
+ ... description="An agent with many tools",
+ ... instructions="You are a helpful assistant that can query knowledge graphs, summarize documents, answer questions about documents, and query time series data points.",
+ ... labels=["published"],
+ ... tools=[find_assets_tool, find_files_tool, find_time_series_tool, summarize_tool, ask_doc_tool, ts_tool]
+ ... )
+ >>> client.agents.upsert(agents=[agent])
+ """
+ return run_sync(self.__async_client.agents.upsert(agents=agents))
+
+ @overload
+ def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Agent | None: ...
+
+ @overload
+ def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> AgentList: ...
+
+ def retrieve(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> Agent | AgentList | None:
+ """
+ `Retrieve one or more agents by external ID. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external id of the agent(s) to retrieve.
+ ignore_unknown_ids (bool): Whether to ignore unknown IDs. Defaults to False.
+
+ Returns:
+ Agent | AgentList | None: The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found.
+
+ Examples:
+
+ Retrieve an agent by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.agents.retrieve(external_ids="my_agent")
+
+ Retrieve multiple agents:
+
+ >>> res = client.agents.retrieve(external_ids=["my_agent_1", "my_agent_2"])
+ """
+ return run_sync(
+ self.__async_client.agents.retrieve(external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete one or more agents. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): External ID of the agent or a list of external ids.
+ ignore_unknown_ids (bool): If `True`, the call will ignore unknown external IDs. Defaults to False.
+
+ Examples:
+
+ Delete an agent by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.agents.delete(external_ids="my_agent")
+ """
+ return run_sync(
+ self.__async_client.agents.delete(external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ def list(self) -> AgentList:
+ """
+ `List agents. `_
+
+ Returns:
+ AgentList: The list of agents.
+
+ Examples:
+
+ List all agents:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> agent_list = client.agents.list()
+ """
+ return run_sync(self.__async_client.agents.list())
+
+ def chat(
+ self,
+ agent_external_id: str,
+ messages: Message | ActionResult | Sequence[Message | ActionResult],
+ cursor: str | None = None,
+ actions: Sequence[Action] | None = None,
+ ) -> AgentChatResponse:
+ """
+ `Chat with an agent. `_
+
+ Given a user query, the Atlas AI agent responds by reasoning and using the tools associated with it.
+ Users can ensure conversation continuity by including the cursor from the previous response in subsequent requests.
+
+ Args:
+ agent_external_id (str): External ID that uniquely identifies the agent.
+ messages (Message | ActionResult | Sequence[Message | ActionResult]): A list of one or many input messages to the agent. Can include regular messages and action results.
+ cursor (str | None): The cursor to use for continuation of a conversation. Use this to
+ create multi-turn conversations, as the cursor will keep track of the conversation state.
+ actions (Sequence[Action] | None): A list of client-side actions that can be called by the agent.
+
+ Returns:
+ AgentChatResponse: The response from the agent.
+
+ Examples:
+
+ Start a simple chat with an agent:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.agents import Message
+ >>> client = CogniteClient()
+ >>> response = client.agents.chat(
+ ... agent_external_id="my_agent",
+ ... messages=Message("What can you help me with?")
+ ... )
+ >>> print(response.text)
+
+ Continue a conversation using the cursor:
+
+ >>> follow_up = client.agents.chat(
+ ... agent_external_id="my_agent",
+ ... messages=Message("Tell me more about that"),
+ ... cursor=response.cursor
+ ... )
+
+ Send multiple messages at once:
+
+ >>> response = client.agents.chat(
+ ... agent_external_id="my_agent",
+ ... messages=[
+ ... Message("Help me find the 1st stage compressor."),
+ ... Message("Once you have found it, find related time series.")
+ ... ]
+ ... )
+
+ Chat with client-side actions:
+
+ >>> from cognite.client.data_classes.agents import ClientToolAction, ClientToolResult
+ >>> add_numbers_action = ClientToolAction(
+ ... name="add",
+ ... description="Add two numbers together",
+ ... parameters={
+ ... "type": "object",
+ ... "properties": {
+ ... "a": {"type": "number", "description": "First number"},
+ ... "b": {"type": "number", "description": "Second number"},
+ ... },
+ ... "required": ["a", "b"]
+ ... }
+ ... )
+ >>> response = client.agents.chat(
+ ... agent_external_id="my_agent",
+ ... messages=Message("What is 42 plus 58?"),
+ ... actions=[add_numbers_action]
+ ... )
+ >>> if response.action_calls:
+ ... for call in response.action_calls:
+ ... # Execute the action
+ ... result = call.arguments["a"] + call.arguments["b"]
+ ... # Send result back
+ ... response = client.agents.chat(
+ ... agent_external_id="my_agent",
+ ... messages=ClientToolResult(
+ ... action_id=call.action_id,
+ ... content=f"The result is {result}"
+ ... ),
+ ... cursor=response.cursor,
+ ... actions=[add_numbers_action]
+ ... )
+ """
+ return run_sync(
+ self.__async_client.agents.chat(
+ agent_external_id=agent_external_id, messages=messages, cursor=cursor, actions=actions
+ )
+ )
diff --git a/cognite/client/_sync_api/ai/__init__.py b/cognite/client/_sync_api/ai/__init__.py
new file mode 100644
index 0000000000..b6cd24f1f4
--- /dev/null
+++ b/cognite/client/_sync_api/ai/__init__.py
@@ -0,0 +1,25 @@
+"""
+===============================================================================
+9f3dd7689d375651560a7f9b91cb94e7
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.ai.tools import SyncAIToolsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncAIAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.tools = SyncAIToolsAPI(async_client)
diff --git a/cognite/client/_sync_api/ai/tools/__init__.py b/cognite/client/_sync_api/ai/tools/__init__.py
new file mode 100644
index 0000000000..0d5061765f
--- /dev/null
+++ b/cognite/client/_sync_api/ai/tools/__init__.py
@@ -0,0 +1,25 @@
+"""
+===============================================================================
+c07271fd270ca0c1a19ea1dd1cff15af
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.ai.tools.documents import SyncAIDocumentsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncAIToolsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.documents = SyncAIDocumentsAPI(async_client)
diff --git a/cognite/client/_sync_api/ai/tools/documents.py b/cognite/client/_sync_api/ai/tools/documents.py
new file mode 100644
index 0000000000..874a290cc2
--- /dev/null
+++ b/cognite/client/_sync_api/ai/tools/documents.py
@@ -0,0 +1,141 @@
+"""
+===============================================================================
+e53e36c6b07aeb024b15078c9e4ca307
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Literal
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.ai import Answer, AnswerLanguage, Summary
+from cognite.client.data_classes.data_modeling import NodeId
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncAIDocumentsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def summarize(
+ self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> Summary:
+ """
+ `Summarize a document using a Large Language Model. `_
+
+ Note:
+ Currently only supports summarizing a single document at a time, but
+ this may be extended in the future.
+
+ Args:
+ id (int | None): The ID of the document
+ external_id (str | None): The external ID of the document
+ instance_id (NodeId | None): The instance ID of the document
+
+ Returns:
+ Summary: A summary of the document.
+
+ Examples:
+
+ Summarize a single document using ID:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.ai.tools.documents.summarize(id=123)
+
+ You can also use external ID or instance ID:
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> client.ai.tools.documents.summarize(
+ ... instance_id=NodeId("my-space", "my-xid")
+ ... )
+ """
+ return run_sync(
+ self.__async_client.ai.tools.documents.summarize(id=id, external_id=external_id, instance_id=instance_id)
+ )
+
+ def ask_question(
+ self,
+ question: str,
+ *,
+ id: int | Sequence[int] | None = None,
+ external_id: str | Sequence[str] | None = None,
+ instance_id: NodeId | Sequence[NodeId] | None = None,
+ language: AnswerLanguage
+ | Literal[
+ "Chinese",
+ "Dutch",
+ "English",
+ "French",
+ "German",
+ "Italian",
+ "Japanese",
+ "Korean",
+ "Latvian",
+ "Norwegian",
+ "Portuguese",
+ "Spanish",
+ "Swedish",
+ ] = AnswerLanguage.English,
+ additional_context: str | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> Answer:
+ """
+ `Ask a question about one or more documents using a Large Language Model. `_
+
+ Supports up to 100 documents at a time.
+
+ Args:
+ question (str): The question.
+ id (int | Sequence[int] | None): The ID(s) of the document(s)
+ external_id (str | Sequence[str] | None): The external ID(s) of the document(s)
+ instance_id (NodeId | Sequence[NodeId] | None): The instance ID(s) of the document(s)
+ language (AnswerLanguage | Literal['Chinese', 'Dutch', 'English', 'French', 'German', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Norwegian', 'Portuguese', 'Spanish', 'Swedish']): The desired language of the answer, defaults to English.
+ additional_context (str | None): Additional context that you want the LLM to take into account.
+ ignore_unknown_ids (bool): Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised.
+
+ Returns:
+ Answer: The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references.
+
+ Examples:
+
+ Ask a question about a single document with id=123 and get the answer in English (default):
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.ai.tools.documents.ask_question(
+ ... question="What model pump was used?",
+ ... id=123,
+ ... )
+
+ Ask a question about multiple documents referenced using external IDs, and instance ID
+ and get the answer in German:
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> from cognite.client.data_classes.ai import AnswerLanguage
+ >>> client.ai.tools.documents.ask_question(
+ ... question="What other pumps are available?",
+ ... external_id=["foo", "bar"],
+ ... instance_id=NodeId("my-space", "my-xid"),
+ ... language=AnswerLanguage.German,
+ ... )
+ """
+ return run_sync(
+ self.__async_client.ai.tools.documents.ask_question(
+ question=question,
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ language=language,
+ additional_context=additional_context,
+ ignore_unknown_ids=ignore_unknown_ids,
+ )
+ )
diff --git a/cognite/client/_sync_api/annotations.py b/cognite/client/_sync_api/annotations.py
new file mode 100644
index 0000000000..73b3a8dec6
--- /dev/null
+++ b/cognite/client/_sync_api/annotations.py
@@ -0,0 +1,185 @@
+"""
+===============================================================================
+c9ad6444c7e9b577c7eadb5b29053fc1
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import Annotation, AnnotationFilter, AnnotationList, AnnotationUpdate
+from cognite.client.data_classes.annotations import AnnotationReverseLookupFilter, AnnotationWrite
+from cognite.client.data_classes.contextualization import ResourceReferenceList
+from cognite.client.utils._async_helpers import run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncAnnotationsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def create(self, annotations: Annotation | AnnotationWrite) -> Annotation: ...
+
+ @overload
+ def create(self, annotations: Sequence[Annotation | AnnotationWrite]) -> AnnotationList: ...
+
+ def create(
+ self, annotations: Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]
+ ) -> Annotation | AnnotationList:
+ """
+ `Create annotations `_
+
+ Args:
+ annotations (Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]): Annotation(s) to create
+
+ Returns:
+ Annotation | AnnotationList: Created annotation(s)
+ """
+ return run_sync(self.__async_client.annotations.create(annotations=annotations))
+
+ @overload
+ def suggest(self, annotations: Annotation | AnnotationWrite) -> Annotation: ...
+
+ @overload
+ def suggest(self, annotations: Sequence[Annotation] | Sequence[AnnotationWrite]) -> AnnotationList: ...
+
+ def suggest(
+ self, annotations: Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]
+ ) -> Annotation | AnnotationList:
+ """
+ `Suggest annotations `_
+
+ Args:
+ annotations (Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]): annotation(s) to suggest. They must have status set to "suggested".
+
+ Returns:
+ Annotation | AnnotationList: suggested annotation(s)
+ """
+ return run_sync(self.__async_client.annotations.suggest(annotations=annotations))
+
+ @overload
+ def update(
+ self,
+ item: Annotation | AnnotationWrite | AnnotationUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Annotation: ...
+
+ @overload
+ def update(
+ self,
+ item: Sequence[Annotation | AnnotationWrite | AnnotationUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> AnnotationList: ...
+
+ def update(
+ self,
+ item: Annotation
+ | AnnotationWrite
+ | AnnotationUpdate
+ | Sequence[Annotation | AnnotationWrite | AnnotationUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Annotation | AnnotationList:
+ """
+ `Update annotations `_
+
+ Args:
+ item (Annotation | AnnotationWrite | AnnotationUpdate | Sequence[Annotation | AnnotationWrite | AnnotationUpdate]): Annotation or list of annotations to update (or patch or list of patches to apply)
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Annotation | AnnotationList: No description.
+ """
+ return run_sync(self.__async_client.annotations.update(item=item, mode=mode))
+
+ def delete(self, id: int | Sequence[int]) -> None:
+ """
+ `Delete annotations `_
+
+ Args:
+ id (int | Sequence[int]): ID or list of IDs to be deleted
+ """
+ return run_sync(self.__async_client.annotations.delete(id=id))
+
+ def retrieve_multiple(self, ids: Sequence[int]) -> AnnotationList:
+ """
+ `Retrieve annotations by IDs `_`
+
+ Args:
+ ids (Sequence[int]): list of IDs to be retrieved
+
+ Returns:
+ AnnotationList: list of annotations
+ """
+ return run_sync(self.__async_client.annotations.retrieve_multiple(ids=ids))
+
+ def retrieve(self, id: int) -> Annotation | None:
+ """
+ `Retrieve an annotation by id `_
+
+ Args:
+ id (int): id of the annotation to be retrieved
+
+ Returns:
+ Annotation | None: annotation requested
+ """
+ return run_sync(self.__async_client.annotations.retrieve(id=id))
+
+ def reverse_lookup(self, filter: AnnotationReverseLookupFilter, limit: int | None = None) -> ResourceReferenceList:
+ """
+ Reverse lookup annotated resources based on having annotations matching the filter.
+
+ Args:
+ filter (AnnotationReverseLookupFilter): Filter to apply
+ limit (int | None): Maximum number of results to return. Defaults to None (all).
+
+ Returns:
+ ResourceReferenceList: List of resource references
+
+ Examples:
+
+ Retrieve the first 100 ids of annotated resources mathing the 'file' resource type:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AnnotationReverseLookupFilter
+ >>> client = CogniteClient()
+ >>> flt = AnnotationReverseLookupFilter(annotated_resource_type="file")
+ >>> res = client.annotations.reverse_lookup(flt, limit=100)
+ """
+ return run_sync(self.__async_client.annotations.reverse_lookup(filter=filter, limit=limit))
+
+ def list(self, filter: AnnotationFilter | dict, limit: int | None = DEFAULT_LIMIT_READ) -> AnnotationList:
+ """
+ `List annotations. `_
+
+ Note:
+ Passing a filter with both 'annotated_resource_type' and 'annotated_resource_ids' is always required.
+
+ Args:
+ filter (AnnotationFilter | dict): Return annotations with parameter values that match what is specified.
+ limit (int | None): Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ AnnotationList: list of annotations
+
+ Example:
+
+ List all annotations for the file with id=123:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AnnotationFilter
+ >>> client = CogniteClient()
+ >>> flt = AnnotationFilter(annotated_resource_type="file", annotated_resource_ids=[{"id": 123}])
+ >>> res = client.annotations.list(flt, limit=None)
+ """
+ return run_sync(self.__async_client.annotations.list(filter=filter, limit=limit))
diff --git a/cognite/client/_sync_api/assets.py b/cognite/client/_sync_api/assets.py
new file mode 100644
index 0000000000..9f1f902eef
--- /dev/null
+++ b/cognite/client/_sync_api/assets.py
@@ -0,0 +1,909 @@
+"""
+===============================================================================
+0d85e1a0727792cc34a2b348d9a25113
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.assets import AggregateAssetProperty, SortSpec
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ Asset,
+ AssetFilter,
+ AssetHierarchy,
+ AssetList,
+ AssetUpdate,
+ GeoLocationFilter,
+ LabelFilter,
+ TimestampRange,
+)
+from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList
+from cognite.client.data_classes.assets import (
+ AssetPropertyLike,
+ AssetWrite,
+)
+from cognite.client.data_classes.filters import Filter
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncAssetsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Asset]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[AssetList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ parent_ids: Sequence[int] | None = None,
+ parent_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ metadata: dict[str, str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ labels: LabelFilter | None = None,
+ geo_location: GeoLocationFilter | None = None,
+ source: str | None = None,
+ created_time: TimestampRange | dict[str, Any] | None = None,
+ last_updated_time: TimestampRange | dict[str, Any] | None = None,
+ root: bool | None = None,
+ external_id_prefix: str | None = None,
+ aggregated_properties: Sequence[AggregateAssetProperty] | None = None,
+ limit: int | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ sort: SortSpec | list[SortSpec] | None = None,
+ ) -> Iterator[Asset | AssetList]:
+ """
+ Iterate over assets
+
+ Fetches assets as they are iterated over, so you keep a limited number of assets in memory.
+
+ Args:
+ chunk_size (int | None): Number of assets to return in each chunk. Defaults to yielding one asset a time.
+ name (str | None): Name of asset. Often referred to as tag.
+ parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets.
+ parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets.
+ asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
+ data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids.
+ labels (LabelFilter | None): Return only the assets matching the specified label.
+ geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
+ source (str | None): The source of this asset
+ created_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ root (bool | None): filtered assets are root assets or not
+ external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
+ aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth.
+ limit (int | None): Maximum number of assets to return. Defaults to return all items.
+ advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+
+ Yields:
+ Asset | AssetList: yields Asset one by one if chunk_size is not specified, else AssetList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.assets(
+ chunk_size=chunk_size,
+ name=name,
+ parent_ids=parent_ids,
+ parent_external_ids=parent_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ metadata=metadata,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ labels=labels,
+ geo_location=geo_location,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ root=root,
+ external_id_prefix=external_id_prefix,
+ aggregated_properties=aggregated_properties,
+ limit=limit,
+ advanced_filter=advanced_filter,
+ sort=sort,
+ )
+ )
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Asset | None:
+ """
+ `Retrieve a single asset by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ Asset | None: Requested asset or None if it does not exist.
+
+ Examples:
+
+ Get asset by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.assets.retrieve(id=1)
+
+ Get asset by external id:
+
+ >>> res = client.assets.retrieve(external_id="1")
+ """
+ return run_sync(self.__async_client.assets.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> AssetList:
+ """
+ `Retrieve multiple assets by id. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ AssetList: The requested assets.
+
+ Examples:
+
+ Get assets by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.assets.retrieve_multiple(ids=[1, 2, 3])
+
+ Get assets by external id:
+
+ >>> res = client.assets.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.assets.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def aggregate_count(
+ self,
+ property: AssetPropertyLike | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Count of assets matching the specified filters. `_
+
+ Args:
+ property (AssetPropertyLike | None): If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters.
+ advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down the assets to count.
+ filter (AssetFilter | dict[str, Any] | None): The filter to narrow down the assets to count (strict matching).
+
+ Returns:
+ int: The number of assets matching the specified filters.
+
+ Examples:
+
+ Count the number of assets in your CDF project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> count = client.assets.aggregate_count()
+
+ Count the number of assets with the metadata key "timezone" in your CDF project:
+
+ >>> from cognite.client.data_classes.filters import ContainsAny
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> has_timezone = ContainsAny(AssetProperty.metadata, "timezone")
+ >>> asset_count = client.assets.aggregate_count(advanced_filter=has_timezone)
+ """
+ return run_sync(
+ self.__async_client.assets.aggregate_count(
+ property=property, advanced_filter=advanced_filter, filter=filter
+ )
+ )
+
+ def aggregate_cardinality_values(
+ self,
+ property: AssetPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate property count for assets. `_
+
+ Args:
+ property (AssetPropertyLike): The property to count the cardinality of.
+ advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ Returns:
+ int: The number of properties matching the specified filters and search.
+
+ Examples:
+
+ Count the number of labels used by assets in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> client = CogniteClient()
+ >>> label_count = client.assets.aggregate_cardinality_values(AssetProperty.labels)
+
+ Count the number of timezones (metadata key) for assets with the word "critical" in the description in your CDF project:
+
+ >>> from cognite.client.data_classes.filters import Search
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> is_critical = Search(AssetProperty.description, "critical")
+ >>> critical_assets = client.assets.aggregate_cardinality_values(
+ ... AssetProperty.metadata_key("timezone"),
+ ... advanced_filter=is_critical)
+ """
+ return run_sync(
+ self.__async_client.assets.aggregate_cardinality_values(
+ property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_cardinality_properties(
+ self,
+ path: AssetPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate paths count for assets. `_
+
+ Args:
+ path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
+ It means to aggregate only metadata properties (aka keys).
+ advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ Returns:
+ int: The number of properties matching the specified filters.
+
+ Examples:
+
+ Count the number of unique metadata keys used by assets in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> client = CogniteClient()
+ >>> key_count = client.assets.aggregate_cardinality_properties(AssetProperty.metadata)
+ """
+ return run_sync(
+ self.__async_client.assets.aggregate_cardinality_properties(
+ path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_unique_values(
+ self,
+ property: AssetPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ ) -> UniqueResultList:
+ """
+ `Get unique properties with counts for assets. `_
+
+ Note:
+ In the case of text fields, the values are aggregated in a case-insensitive manner.
+
+ Args:
+ property (AssetPropertyLike): The property to group by.
+ advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+
+ Returns:
+ UniqueResultList: List of unique values of assets matching the specified filters and search.
+
+ Examples:
+
+ Get the timezones (metadata key) with count for your assets in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> client = CogniteClient()
+ >>> result = client.assets.aggregate_unique_values(AssetProperty.metadata_key("timezone"))
+ >>> print(result.unique)
+
+ Get the different labels with count used for assets created after 2020-01-01 in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> from cognite.client.utils import timestamp_to_ms
+ >>> from datetime import datetime
+ >>> created_after_2020 = filters.Range(AssetProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1)))
+ >>> result = client.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020)
+ >>> print(result.unique)
+
+ Get the different labels with count for assets updated after 2020-01-01 in your CDF project, but exclude all labels that
+ starts with "test":
+
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> from cognite.client.data_classes import aggregations
+ >>> from cognite.client.data_classes import filters
+ >>> not_test = aggregations.Not(aggregations.Prefix("test"))
+ >>> created_after_2020 = filters.Range(AssetProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1)))
+ >>> result = client.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020, aggregate_filter=not_test)
+ >>> print(result.unique)
+ """
+ return run_sync(
+ self.__async_client.assets.aggregate_unique_values(
+ property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_unique_properties(
+ self,
+ path: AssetPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ ) -> UniqueResultList:
+ """
+ `Get unique paths with counts for assets. `_
+
+ Note:
+ In the case of text fields, the values are aggregated in a case-insensitive manner.
+
+ Args:
+ path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
+ It means to aggregate only metadata properties (aka keys).
+ advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+
+ Returns:
+ UniqueResultList: List of unique values of assets matching the specified filters and search.
+
+ Examples:
+
+ Get the metadata keys with counts for your assets in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.assets import AssetProperty
+ >>> client = CogniteClient()
+ >>> result = client.assets.aggregate_unique_properties(AssetProperty.metadata)
+ """
+ return run_sync(
+ self.__async_client.assets.aggregate_unique_properties(
+ path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ @overload
+ def create(self, asset: Sequence[Asset] | Sequence[AssetWrite]) -> AssetList: ...
+
+ @overload
+ def create(self, asset: Asset | AssetWrite) -> Asset: ...
+
+ def create(self, asset: Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]) -> Asset | AssetList:
+ """
+ `Create one or more assets. `_
+
+ You can create an arbitrary number of assets, and the SDK will split the request into multiple requests.
+ When specifying parent-child relation between assets using `parentExternalId` the link will be resvoled into an internal ID and stored as `parentId`.
+
+ Args:
+ asset (Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]): Asset or list of assets to create.
+
+ Returns:
+ Asset | AssetList: Created asset(s)
+
+ Examples:
+
+ Create new assets:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AssetWrite
+ >>> client = CogniteClient()
+ >>> assets = [AssetWrite(name="asset1"), AssetWrite(name="asset2")]
+ >>> res = client.assets.create(assets)
+
+ Create asset with label:
+
+ >>> from cognite.client.data_classes import AssetWrite, Label
+ >>> asset = AssetWrite(name="my_pump", labels=[Label(external_id="PUMP")])
+ >>> res = client.assets.create(asset)
+ """
+ return run_sync(self.__async_client.assets.create(asset=asset))
+
+ def create_hierarchy(
+ self,
+ assets: Sequence[AssetWrite] | AssetHierarchy,
+ *,
+ upsert: bool = False,
+ upsert_mode: Literal["patch", "replace"] = "patch",
+ ) -> AssetList:
+ """
+ Create an asset hierarchy with validation.
+
+ This helper function makes it easy to insert large asset hierarchies. It solves the problem of topological
+ insertion order, i.e. a parent asset must exist before it can be referenced by any 'children' assets.
+ You may pass any number of partial- or full hierarchies: there are no requirements on the number of root
+ assets, so you may pass zero, one or many (same goes for the non-root assets).
+
+ Args:
+ assets (Sequence[AssetWrite] | AssetHierarchy): List of assets to create or an instance of AssetHierarchy.
+ upsert (bool): If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument.
+ upsert_mode (Literal['patch', 'replace']): Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty).
+
+ Returns:
+ AssetList: Created (and possibly updated) asset hierarchy
+
+ Prior to insertion, this function will run validation on the given assets and raise an error if any of
+ the following issues are found:
+
+ 1. Any assets are invalid (category: ``invalid``):
+
+ - Missing external ID.
+ - Missing a valid name.
+ - Has an ID set (note: you may not pass Asset, use AssetWrite)
+ 2. Any asset duplicates exist (category: ``duplicates``)
+ 3. Any assets have an ambiguous parent link (category: ``unsure_parents``)
+ 4. Any group of assets form a cycle, e.g. A->B->A (category: ``cycles``)
+
+ As part of validation there is a fifth category that is ignored when using this method (for backwards compatibility) and that
+ is orphan assets. These are assets linking a parent by an identifier that is not present among the given assets, and as such,
+ might contain links we are unable to vet ahead of insertion. These are thus assumed to be valid, but may fail.
+
+ Tip:
+ The different categories specified above corresponds to the name of the attribute you might access on the raised error to
+ get the collection of 'bad' assets falling in that group, e.g. ``error.duplicates``.
+
+ Note:
+ Updating ``external_id`` via upsert is not supported (and will not be supported). Use ``AssetsAPI.update`` instead.
+
+ Warning:
+ The API does not natively support upsert, so the SDK has to simulate the behaviour at the cost of some insertion speed.
+
+ Be careful when moving assets to new parents via upsert: Please do so only by specifying ``parent_external_id``
+ (instead of ``parent_id``) to avoid race conditions in insertion order (temporary cycles might form since we
+ can only make changes to 1000 assets at the time).
+
+ Examples:
+
+ Create an asset hierarchy:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AssetWrite
+ >>> client = CogniteClient()
+ >>> assets = [
+ ... AssetWrite(external_id="root", name="root"),
+ ... AssetWrite(external_id="child1", parent_external_id="root", name="child1"),
+ ... AssetWrite(external_id="child2", parent_external_id="root", name="child2")]
+ >>> res = client.assets.create_hierarchy(assets)
+
+ Create an asset hierarchy, but run update for existing assets:
+
+ >>> res = client.assets.create_hierarchy(assets, upsert=True, upsert_mode="patch")
+
+ Patch will only update the parameters you have defined on your assets. Note that specifically setting
+ something to ``None`` is the same as not setting it. For ``metadata``, this will extend your existing
+ data, only overwriting when keys overlap. For ``labels`` the behaviour is mostly the same, existing are
+ left untouched, and your new ones are simply added.
+
+ You may also pass ``upsert_mode="replace"`` to make sure the updated assets look identical to the ones
+ you passed to the method. For both ``metadata`` and ``labels`` this will clear out all existing,
+ before (potentially) adding the new ones.
+
+ If the hierarchy validation for some reason fail, you may inspect all the issues that were found by
+ catching :class:`~cognite.client.exceptions.CogniteAssetHierarchyError`:
+
+ >>> from cognite.client.exceptions import CogniteAssetHierarchyError
+ >>> try:
+ ... res = client.assets.create_hierarchy(assets)
+ ... except CogniteAssetHierarchyError as err:
+ ... if err.invalid:
+ ... ... # do something
+
+ In addition to ``invalid``, you may inspect ``duplicates``, ``unsure_parents``, ``orphans`` and ``cycles``.
+ Note that cycles are not available if any of the other basic issues exist, as the search for cyclical
+ references requires a clean asset hierarchy to begin with.
+
+ You may also wrap the ``create_hierarchy()`` call in a try-except to get information if any of the assets
+ fails to be created (assuming a valid hierarchy):
+
+ >>> from cognite.client.exceptions import CogniteAPIError
+ >>> try:
+ ... client.assets.create_hierarchy(assets)
+ ... except CogniteAPIError as err:
+ ... created = err.successful
+ ... maybe_created = err.unknown
+ ... not_created = err.failed
+
+ Here's a slightly longer explanation of the different groups:
+
+ - ``err.successful``: Which assets were created (request yielded a 201)
+ - ``err.unknown``: Which assets *may* have been created (request yielded 5xx)
+ - ``err.failed``: Which assets were *not* created (request yielded 4xx, or was a descendant of an asset with unknown status)
+
+ The preferred way to create an asset hierarchy, is to run validation *prior to insertion*. You may do this by
+ using the :class:`~cognite.client.data_classes.assets.AssetHierarchy` class. It will by default consider orphan
+ assets to be problematic (but accepts the boolean parameter ``ignore_orphans``), contrary to how ``create_hierarchy``
+ works (which accepts them in order to be backwards-compatible). It also provides helpful methods to create reports
+ of any issues found, check out ``validate_and_report``:
+
+ >>> from cognite.client.data_classes import AssetHierarchy
+ >>> from pathlib import Path
+ >>> hierarchy = AssetHierarchy(assets)
+ >>> if hierarchy.is_valid():
+ ... res = client.assets.create_hierarchy(hierarchy)
+ ... else:
+ ... hierarchy.validate_and_report(output_file=Path("report.txt"))
+ """
+ return run_sync(
+ self.__async_client.assets.create_hierarchy(assets=assets, upsert=upsert, upsert_mode=upsert_mode)
+ )
+
+ def delete(
+ self,
+ id: int | Sequence[int] | None = None,
+ external_id: str | SequenceNotStr[str] | None = None,
+ recursive: bool = False,
+ ignore_unknown_ids: bool = False,
+ ) -> None:
+ """
+ `Delete one or more assets `_
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ recursive (bool): Recursively delete whole asset subtrees under given ids. Defaults to False.
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Examples:
+
+ Delete assets by id or external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.assets.delete(id=[1,2,3], external_id="3")
+ """
+ return run_sync(
+ self.__async_client.assets.delete(
+ id=id, external_id=external_id, recursive=recursive, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ @overload
+ def update(
+ self,
+ item: Sequence[Asset | AssetWrite | AssetUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> AssetList: ...
+
+ @overload
+ def update(
+ self,
+ item: Asset | AssetWrite | AssetUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Asset: ...
+
+ def update(
+ self,
+ item: Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Asset | AssetList:
+ """
+ `Update one or more assets `_
+ Labels can be added, removed or replaced (set). Note that set operation deletes all the existing labels and adds the new specified labels.
+
+ Args:
+ item (Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate]): Asset(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ Returns:
+ Asset | AssetList: Updated asset(s)
+
+ Examples:
+ Perform a partial update on an asset, updating the description and adding a new field to metadata:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AssetUpdate
+ >>> client = CogniteClient()
+ >>> my_update = AssetUpdate(id=1).description.set("New description").metadata.add({"key": "value"})
+ >>> res1 = client.assets.update(my_update)
+ >>> # Remove an already set field like so
+ >>> another_update = AssetUpdate(id=1).description.set(None)
+ >>> res2 = client.assets.update(another_update)
+
+ Remove the metadata on an asset:
+
+ >>> from cognite.client.data_classes import AssetUpdate
+ >>> my_update = AssetUpdate(id=1).metadata.add({"key": "value"})
+ >>> res1 = client.assets.update(my_update)
+ >>> another_update = AssetUpdate(id=1).metadata.set(None)
+ >>> # The same result can be achieved with:
+ >>> another_update2 = AssetUpdate(id=1).metadata.set({})
+ >>> res2 = client.assets.update(another_update)
+
+ Attach labels to an asset:
+
+ >>> from cognite.client.data_classes import AssetUpdate
+ >>> my_update = AssetUpdate(id=1).labels.add(["PUMP", "VERIFIED"])
+ >>> res = client.assets.update(my_update)
+
+ Detach a single label from an asset:
+
+ >>> from cognite.client.data_classes import AssetUpdate
+ >>> my_update = AssetUpdate(id=1).labels.remove("PUMP")
+ >>> res = client.assets.update(my_update)
+
+ Replace all labels for an asset:
+
+ >>> from cognite.client.data_classes import AssetUpdate
+ >>> my_update = AssetUpdate(id=1).labels.set("PUMP")
+ >>> res = client.assets.update(my_update)
+ """
+ return run_sync(self.__async_client.assets.update(item=item, mode=mode))
+
+ @overload
+ def upsert(self, item: Sequence[Asset | AssetWrite], mode: Literal["patch", "replace"] = "patch") -> AssetList: ...
+
+ @overload
+ def upsert(self, item: Asset | AssetWrite, mode: Literal["patch", "replace"] = "patch") -> Asset: ...
+
+ def upsert(
+ self, item: Asset | AssetWrite | Sequence[Asset | AssetWrite], mode: Literal["patch", "replace"] = "patch"
+ ) -> Asset | AssetList:
+ """
+ Upsert assets, i.e., update if it exists, and create if it does not exist.
+ Note this is a convenience method that handles the upserting for you by first calling update on all items,
+ and if any of them fail because they do not exist, it will create them instead.
+
+ For more details, see :ref:`appendix-upsert`.
+
+ Args:
+ item (Asset | AssetWrite | Sequence[Asset | AssetWrite]): Asset or list of assets to upsert.
+ mode (Literal['patch', 'replace']): Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+
+ Returns:
+ Asset | AssetList: The upserted asset(s).
+
+ Examples:
+
+ Upsert for assets:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import AssetWrite
+ >>> client = CogniteClient()
+ >>> existing_asset = client.assets.retrieve(id=1)
+ >>> existing_asset.description = "New description"
+ >>> new_asset = AssetWrite(external_id="new_asset", name="my asset", description="New asset")
+ >>> res = client.assets.upsert([existing_asset, new_asset], mode="replace")
+ """
+ return run_sync(self.__async_client.assets.upsert(item=item, mode=mode))
+
+ def search(
+ self,
+ name: str | None = None,
+ description: str | None = None,
+ query: str | None = None,
+ filter: AssetFilter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> AssetList:
+ """
+ `Search for assets `_
+ Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
+
+ Args:
+ name (str | None): Fuzzy match on name.
+ description (str | None): Fuzzy match on description.
+ query (str | None): Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance.
+ filter (AssetFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
+ limit (int): Maximum number of results to return.
+
+ Returns:
+ AssetList: List of requested assets
+
+ Examples:
+
+ Search for assets by fuzzy search on name:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.assets.search(name="some name")
+
+ Search for assets by exact search on name:
+
+ >>> res = client.assets.search(filter={"name": "some name"})
+
+ Search for assets by improved multi-field fuzzy search:
+
+ >>> res = client.assets.search(query="TAG 30 XV")
+
+ Search for assets using multiple filters, finding all assets with name similar to `xyz` with parent asset `123` or `456` with source `some source`:
+
+ >>> res = client.assets.search(name="xyz",filter={"parent_ids": [123,456],"source": "some source"})
+
+ Search for an asset with an attached label:
+
+ >>> my_label_filter = LabelFilter(contains_all=["PUMP"])
+ >>> res = client.assets.search(name="xyz",filter=AssetFilter(labels=my_label_filter))
+ """
+ return run_sync(
+ self.__async_client.assets.search(
+ name=name, description=description, query=query, filter=filter, limit=limit
+ )
+ )
+
+ def retrieve_subtree(
+ self, id: int | None = None, external_id: str | None = None, depth: int | None = None
+ ) -> AssetList:
+ """
+ Retrieve the subtree for this asset up to a specified depth.
+
+ Args:
+ id (int | None): Id of the root asset in the subtree.
+ external_id (str | None): External id of the root asset in the subtree.
+ depth (int | None): Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree.
+
+ Returns:
+ AssetList: The requested assets or empty AssetList if asset does not exist.
+ """
+ return run_sync(self.__async_client.assets.retrieve_subtree(id=id, external_id=external_id, depth=depth))
+
+ def list(
+ self,
+ name: str | None = None,
+ parent_ids: Sequence[int] | None = None,
+ parent_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ labels: LabelFilter | None = None,
+ geo_location: GeoLocationFilter | None = None,
+ metadata: dict[str, str] | None = None,
+ source: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ root: bool | None = None,
+ external_id_prefix: str | None = None,
+ aggregated_properties: Sequence[AggregateAssetProperty] | None = None,
+ partitions: int | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ sort: SortSpec | list[SortSpec] | None = None,
+ ) -> AssetList:
+ """
+ `List assets `_
+
+ Args:
+ name (str | None): Name of asset. Often referred to as tag.
+ parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets.
+ parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets.
+ asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids.
+ labels (LabelFilter | None): Return only the assets matching the specified label filter.
+ geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
+ metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value.
+ source (str | None): The source of this asset.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ root (bool | None): filtered assets are root assets or not.
+ external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
+ aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth.
+ partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ limit (int | None): Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+ sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+
+ Returns:
+ AssetList: List of requested assets
+
+ .. note::
+ When using `partitions`, there are few considerations to keep in mind:
+ * `limit` has to be set to `None` (or `-1`).
+ * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status.
+ * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed.
+
+ Examples:
+
+ List assets:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> asset_list = client.assets.list(limit=5)
+
+ Iterate over assets, one-by-one:
+
+ >>> for asset in client.assets():
+ ... asset # do something with the asset
+
+ Iterate over chunks of assets to reduce memory load:
+
+ >>> for asset_list in client.assets(chunk_size=2500):
+ ... asset_list # do something with the assets
+
+ Filter assets based on labels:
+
+ >>> from cognite.client.data_classes import LabelFilter
+ >>> my_label_filter = LabelFilter(contains_all=["PUMP", "VERIFIED"])
+ >>> asset_list = client.assets.list(labels=my_label_filter)
+
+ Using advanced filter, find all assets that have a metadata key 'timezone' starting with 'Europe',
+ and sort by external id ascending:
+
+ >>> from cognite.client.data_classes import filters
+ >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe")
+ >>> res = client.assets.list(advanced_filter=in_timezone, sort=("external_id", "asc"))
+
+ Note that you can check the API documentation above to see which properties you can filter on
+ with which filters.
+
+ To make it easier to avoid spelling mistakes and easier to look up available properties
+ for filtering and sorting, you can also use the `AssetProperty` and `SortableAssetProperty` Enums.
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.assets import AssetProperty, SortableAssetProperty
+ >>> in_timezone = filters.Prefix(AssetProperty.metadata_key("timezone"), "Europe")
+ >>> res = client.assets.list(
+ ... advanced_filter=in_timezone,
+ ... sort=(SortableAssetProperty.external_id, "asc"))
+
+ Combine filter and advanced filter:
+
+ >>> from cognite.client.data_classes import filters
+ >>> not_instrument_lvl5 = filters.And(
+ ... filters.ContainsAny("labels", ["Level5"]),
+ ... filters.Not(filters.ContainsAny("labels", ["Instrument"]))
+ ... )
+ >>> res = client.assets.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5)
+ """
+ return run_sync(
+ self.__async_client.assets.list(
+ name=name,
+ parent_ids=parent_ids,
+ parent_external_ids=parent_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ labels=labels,
+ geo_location=geo_location,
+ metadata=metadata,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ root=root,
+ external_id_prefix=external_id_prefix,
+ aggregated_properties=aggregated_properties,
+ partitions=partitions,
+ limit=limit,
+ advanced_filter=advanced_filter,
+ sort=sort,
+ )
+ )
diff --git a/cognite/client/_sync_api/data_modeling/__init__.py b/cognite/client/_sync_api/data_modeling/__init__.py
new file mode 100644
index 0000000000..41eb939a72
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/__init__.py
@@ -0,0 +1,37 @@
+"""
+===============================================================================
+1fe95c1878f11bc0bee617a86e1dc4a4
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.data_modeling.containers import SyncContainersAPI
+from cognite.client._sync_api.data_modeling.data_models import SyncDataModelsAPI
+from cognite.client._sync_api.data_modeling.graphql import SyncDataModelingGraphQLAPI
+from cognite.client._sync_api.data_modeling.instances import SyncInstancesAPI
+from cognite.client._sync_api.data_modeling.spaces import SyncSpacesAPI
+from cognite.client._sync_api.data_modeling.statistics import SyncStatisticsAPI
+from cognite.client._sync_api.data_modeling.views import SyncViewsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDataModelingAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.containers = SyncContainersAPI(async_client)
+ self.data_models = SyncDataModelsAPI(async_client)
+ self.spaces = SyncSpacesAPI(async_client)
+ self.views = SyncViewsAPI(async_client)
+ self.instances = SyncInstancesAPI(async_client)
+ self.graphql = SyncDataModelingGraphQLAPI(async_client)
+ self.statistics = SyncStatisticsAPI(async_client)
diff --git a/cognite/client/_sync_api/data_modeling/containers.py b/cognite/client/_sync_api/data_modeling/containers.py
new file mode 100644
index 0000000000..7ca9098574
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/containers.py
@@ -0,0 +1,328 @@
+"""
+===============================================================================
+8a40bb13bf895e182628f40657186557
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.containers import (
+ Container,
+ ContainerApply,
+ ContainerList,
+)
+from cognite.client.data_classes.data_modeling.ids import (
+ ConstraintIdentifier,
+ ContainerId,
+ ContainerIdentifier,
+ IndexIdentifier,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncContainersAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Container]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[ContainerList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ space: str | None = None,
+ include_global: bool = False,
+ limit: int | None = None,
+ ) -> Iterator[Container | ContainerList]:
+ """
+ Iterate over containers
+
+ Fetches containers as they are iterated over, so you keep a limited number of containers in memory.
+
+ Args:
+ chunk_size (int | None): Number of containers to return in each chunk. Defaults to yielding one container a time.
+ space (str | None): The space to query.
+ include_global (bool): Whether the global containers should be returned.
+ limit (int | None): Maximum number of containers to return. Defaults to returning all items.
+
+ Yields:
+ Container | ContainerList: yields Container one by one if chunk_size is not specified, else ContainerList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.data_modeling.containers(
+ chunk_size=chunk_size, space=space, include_global=include_global, limit=limit
+ )
+ )
+
+ @overload
+ def retrieve(self, ids: ContainerIdentifier) -> Container | None: ...
+
+ @overload
+ def retrieve(self, ids: Sequence[ContainerIdentifier]) -> ContainerList: ...
+
+ def retrieve(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> Container | ContainerList | None:
+ """
+ `Retrieve one or more container by id(s). `_
+
+ Args:
+ ids (ContainerIdentifier | Sequence[ContainerIdentifier]): Identifier for container(s).
+
+ Returns:
+ Container | ContainerList | None: Requested container or None if it does not exist.
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.containers.retrieve(('mySpace', 'myContainer'))
+
+ Fetch using the ContainerId:
+
+ >>> from cognite.client.data_classes.data_modeling import ContainerId
+ >>> res = client.data_modeling.containers.retrieve(
+ ... ContainerId(space='mySpace', external_id='myContainer'))
+ """
+ return run_sync(self.__async_client.data_modeling.containers.retrieve(ids=ids))
+
+ def delete(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> list[ContainerId]:
+ """
+ `Delete one or more containers `_
+
+ Args:
+ ids (ContainerIdentifier | Sequence[ContainerIdentifier]): The container identifier(s).
+ Returns:
+ list[ContainerId]: The container(s) which has been deleted. Empty list if nothing was deleted.
+ Examples:
+
+ Delete containers by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.containers.delete(("mySpace", "myContainer"))
+ """
+ return run_sync(self.__async_client.data_modeling.containers.delete(ids=ids))
+
+ def delete_constraints(self, ids: Sequence[ConstraintIdentifier]) -> list[ConstraintIdentifier]:
+ """
+ `Delete one or more constraints `_
+
+ Args:
+ ids (Sequence[ConstraintIdentifier]): The constraint identifier(s).
+ Returns:
+ list[ConstraintIdentifier]: The constraints(s) which have been deleted.
+ Examples:
+
+ Delete constraints by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.containers.delete_constraints(
+ ... [(ContainerId("mySpace", "myContainer"), "myConstraint")]
+ ... )
+ """
+ return run_sync(self.__async_client.data_modeling.containers.delete_constraints(ids=ids))
+
+ def delete_indexes(self, ids: Sequence[IndexIdentifier]) -> list[IndexIdentifier]:
+ """
+ `Delete one or more indexes `_
+
+ Args:
+ ids (Sequence[IndexIdentifier]): The index identifier(s).
+ Returns:
+ list[IndexIdentifier]: The indexes(s) which has been deleted.
+ Examples:
+
+ Delete indexes by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.containers.delete_indexes(
+ ... [(ContainerId("mySpace", "myContainer"), "myIndex")]
+ ... )
+ """
+ return run_sync(self.__async_client.data_modeling.containers.delete_indexes(ids=ids))
+
+ def list(
+ self,
+ space: str | None = None,
+ limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ,
+ include_global: bool = False,
+ ) -> ContainerList:
+ """
+ `List containers `_
+
+ Args:
+ space (str | None): The space to query
+ limit (int | None): Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ include_global (bool): Whether the global containers should be returned.
+
+ Returns:
+ ContainerList: List of requested containers
+
+ Examples:
+
+ List containers and limit to 5:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> container_list = client.data_modeling.containers.list(limit=5)
+
+ Iterate over containers, one-by-one:
+
+ >>> for container in client.data_modeling.containers():
+ ... container # do something with the container
+
+ Iterate over chunks of containers to reduce memory load:
+
+ >>> for container_list in client.data_modeling.containers(chunk_size=10):
+ ... container_list # do something with the containers
+ """
+ return run_sync(
+ self.__async_client.data_modeling.containers.list(space=space, limit=limit, include_global=include_global)
+ )
+
+ @overload
+ def apply(self, container: Sequence[ContainerApply]) -> ContainerList: ...
+
+ @overload
+ def apply(self, container: ContainerApply) -> Container: ...
+
+ def apply(self, container: ContainerApply | Sequence[ContainerApply]) -> Container | ContainerList:
+ """
+ `Add or update (upsert) containers. `_
+
+ Args:
+ container (ContainerApply | Sequence[ContainerApply]): Container(s) to create or update.
+
+ Returns:
+ Container | ContainerList: Created container(s)
+
+ Examples:
+
+ Create a new container:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import (
+ ... ContainerApply, ContainerProperty, Text, Float64)
+ >>> client = CogniteClient()
+ >>> container = ContainerApply(
+ ... space="mySpace",
+ ... external_id="myContainer",
+ ... properties={
+ ... "name": ContainerProperty(type=Text, name="name"),
+ ... "numbers": ContainerProperty(
+ ... type=Float64(is_list=True, max_list_size=200),
+ ... description="very important numbers",
+ ... ),
+ ... },
+ ... ),
+ >>> res = client.data_modeling.containers.apply(container)
+
+ Create new container with unit-aware properties:
+
+ >>> from cognite.client.data_classes.data_modeling import Float64
+ >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference
+ >>> container = ContainerApply(
+ ... space="mySpace",
+ ... external_id="myContainer",
+ ... properties={
+ ... "maxPressure": ContainerProperty(
+ ... nullable=True,
+ ... description="Maximum Pump Pressure",
+ ... name="maxPressure",
+ ... type=Float64(
+ ... unit=UnitReference(
+ ... external_id="pressure:bar",
+ ... source_unit="BAR"
+ ... )
+ ... )
+ ... ),
+ ... "rotationConfigurations": ContainerProperty(
+ ... nullable=True,
+ ... description="Rotation Configurations",
+ ... name="rotationConfigurations",
+ ... type=Float64(
+ ... is_list=True,
+ ... unit=UnitReference(
+ ... external_id="angular_velocity:rev-per-min"
+ ... )
+ ... )
+ ... )
+ ... }
+ ... )
+ >>> res = client.data_modeling.containers.apply(container)
+
+ Example container with all available properties (for illustration purposes). Note that
+ ``ContainerProperty`` has several options not shown here, like ``name``, ``description``,
+ ``nullable``, ``auto_increment``, ``default_value`` and ``immutable`` that may be specified,
+ depending on the choice of property type (e.g. ``auto_increment`` only works with integer types).
+
+ >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference, EnumValue
+ >>> from cognite.client.data_classes.data_modeling.data_types import (
+ ... Boolean, Date, DirectRelation, Enum, FileReference, Float32, Float64,
+ ... Int32, Int64, Json, SequenceReference, Text, TimeSeriesReference, Timestamp
+ ... )
+ >>> container_properties = {
+ ... "prop01": ContainerProperty(Boolean),
+ ... "prop02": ContainerProperty(Boolean(is_list=True)),
+ ... "prop03": ContainerProperty(Date),
+ ... "prop04": ContainerProperty(Date(is_list=True)),
+ ... "prop05": ContainerProperty(Timestamp),
+ ... "prop06": ContainerProperty(Timestamp(is_list=True)),
+ ... "prop07": ContainerProperty(Text),
+ ... "prop08": ContainerProperty(Text(is_list=True)),
+ ... # Note: DirectRelation(list) support `container`: The (optional) required type for the node
+ ... # the direct relation points to.
+ ... "prop09": ContainerProperty(DirectRelation),
+ ... "prop10": ContainerProperty(DirectRelation(is_list=True)),
+ ... # Note: Enum also support `unknown_value`: The value to use when the enum value is unknown.
+ ... "prop11": ContainerProperty(
+ ... Enum({"Closed": EnumValue("Valve is closed"),
+ ... "Opened": EnumValue("Valve is opened")})),
+ ... # Note: Floats support unit references, e.g. `unit=UnitReference("pressure:bar")`:
+ ... "prop12": ContainerProperty(Float32),
+ ... "prop13": ContainerProperty(Float32(is_list=True)),
+ ... "prop14": ContainerProperty(Float64),
+ ... "prop15": ContainerProperty(Float64(is_list=True)),
+ ... "prop16": ContainerProperty(Int32),
+ ... "prop17": ContainerProperty(Int32(is_list=True)),
+ ... "prop18": ContainerProperty(Int64),
+ ... "prop19": ContainerProperty(Int64(is_list=True)),
+ ... "prop20": ContainerProperty(Json),
+ ... "prop21": ContainerProperty(Json(is_list=True)),
+ ... "prop22": ContainerProperty(SequenceReference),
+ ... "prop23": ContainerProperty(SequenceReference(is_list=True)),
+ ... # Note: It is adviced to represent files and time series directly as nodes
+ ... # instead of referencing existing:
+ ... "prop24": ContainerProperty(FileReference),
+ ... "prop25": ContainerProperty(FileReference(is_list=True)),
+ ... "prop26": ContainerProperty(TimeSeriesReference),
+ ... "prop27": ContainerProperty(TimeSeriesReference(is_list=True)),
+ ... }
+ >>> container = ContainerApply(
+ ... space="my-space",
+ ... external_id="my-everything-container",
+ ... properties=container_properties,
+ ... )
+ """
+ return run_sync(self.__async_client.data_modeling.containers.apply(container=container))
diff --git a/cognite/client/_sync_api/data_modeling/data_models.py b/cognite/client/_sync_api/data_modeling/data_models.py
new file mode 100644
index 0000000000..c0c649f5c3
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/data_models.py
@@ -0,0 +1,226 @@
+"""
+===============================================================================
+d6b0080645719a3270447e9f773c8ff2
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.data_models import (
+ DataModel,
+ DataModelApply,
+ DataModelList,
+)
+from cognite.client.data_classes.data_modeling.ids import DataModelId, DataModelIdentifier, ViewId
+from cognite.client.data_classes.data_modeling.views import View
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDataModelsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[DataModel]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[DataModelList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ limit: int | None = None,
+ space: str | None = None,
+ inline_views: bool = False,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> Iterator[DataModel | DataModelList]:
+ """
+ Iterate over data model
+
+ Fetches data model as they are iterated over, so you keep a limited number of data model in memory.
+
+ Args:
+ chunk_size (int | None): Number of data model to return in each chunk. Defaults to yielding one data_model a time.
+ limit (int | None): Maximum number of data model to return. Defaults to returning all items.
+ space (str | None): The space to query.
+ inline_views (bool): Whether to expand the referenced views inline in the returned result.
+ all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global (bool): Whether to include global views.
+
+ Yields:
+ DataModel | DataModelList: yields DataModel one by one if chunk_size is not specified, else DataModelList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.data_modeling.data_models(
+ chunk_size=chunk_size,
+ limit=limit,
+ space=space,
+ inline_views=inline_views,
+ all_versions=all_versions,
+ include_global=include_global,
+ )
+ )
+
+ @overload
+ def retrieve(
+ self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: Literal[True]
+ ) -> DataModelList[View]: ...
+
+ @overload
+ def retrieve(
+ self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: Literal[False] = False
+ ) -> DataModelList[ViewId]: ...
+
+ def retrieve(
+ self, ids: DataModelIdentifier | Sequence[DataModelIdentifier], inline_views: bool = False
+ ) -> DataModelList[ViewId] | DataModelList[View]:
+ """
+ `Retrieve data_model(s) by id(s). `_
+
+ Args:
+ ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s).
+ inline_views (bool): Whether to expand the referenced views inline in the returned result.
+
+ Returns:
+ DataModelList[ViewId] | DataModelList[View]: Requested data model(s) or empty if none exist.
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.data_models.retrieve(("mySpace", "myDataModel", "v1"))
+ """
+ return run_sync(self.__async_client.data_modeling.data_models.retrieve(ids=ids, inline_views=inline_views))
+
+ def delete(self, ids: DataModelIdentifier | Sequence[DataModelIdentifier]) -> list[DataModelId]:
+ """
+ `Delete one or more data model `_
+
+ Args:
+ ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s).
+ Returns:
+ list[DataModelId]: The data_model(s) which has been deleted. None if nothing was deleted.
+ Examples:
+
+ Delete data model by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.data_models.delete(("mySpace", "myDataModel", "v1"))
+ """
+ return run_sync(self.__async_client.data_modeling.data_models.delete(ids=ids))
+
+ @overload
+ def list(
+ self,
+ inline_views: Literal[True],
+ limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ,
+ space: str | None = None,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> DataModelList[View]: ...
+
+ @overload
+ def list(
+ self,
+ inline_views: Literal[False] = False,
+ limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ,
+ space: str | None = None,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> DataModelList[ViewId]: ...
+
+ def list(
+ self,
+ inline_views: bool = False,
+ limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ,
+ space: str | None = None,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> DataModelList[View] | DataModelList[ViewId]:
+ """
+ `List data models `_
+
+ Args:
+ inline_views (bool): Whether to expand the referenced views inline in the returned result.
+ limit (int | None): Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ space (str | None): The space to query.
+ all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global (bool): Whether to include global data models.
+
+ Returns:
+ DataModelList[View] | DataModelList[ViewId]: List of requested data models
+
+ Examples:
+
+ List 5 data model:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> data_model_list = client.data_modeling.data_models.list(limit=5)
+
+ Iterate over data model, one-by-one:
+
+ >>> for data_model in client.data_modeling.data_models():
+ ... data_model # do something with the data model
+
+ Iterate over chunks of data model to reduce memory load:
+
+ >>> for data_model_list in client.data_modeling.data_models(chunk_size=10):
+ ... data_model_list # do something with the data model
+ """
+ return run_sync(
+ self.__async_client.data_modeling.data_models.list(
+ inline_views=inline_views,
+ limit=limit,
+ space=space,
+ all_versions=all_versions,
+ include_global=include_global,
+ )
+ )
+
+ @overload
+ def apply(self, data_model: Sequence[DataModelApply]) -> DataModelList: ...
+
+ @overload
+ def apply(self, data_model: DataModelApply) -> DataModel: ...
+
+ def apply(self, data_model: DataModelApply | Sequence[DataModelApply]) -> DataModel | DataModelList:
+ """
+ `Create or update one or more data model. `_
+
+ Args:
+ data_model (DataModelApply | Sequence[DataModelApply]): Data model(s) to create or update (upsert).
+
+ Returns:
+ DataModel | DataModelList: Created data model(s)
+
+ Examples:
+
+ Create new data model:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import DataModelApply, ViewId
+ >>> client = CogniteClient()
+ >>> data_models = [
+ ... DataModelApply(space="mySpace",external_id="myDataModel",version="v1",views=[ViewId("mySpace","myView","v1")]),
+ ... DataModelApply(space="mySpace",external_id="myOtherDataModel",version="v1",views=[ViewId("mySpace","myView","v1")])]
+ >>> res = client.data_modeling.data_models.apply(data_models)
+ """
+ return run_sync(self.__async_client.data_modeling.data_models.apply(data_model=data_model))
diff --git a/cognite/client/_sync_api/data_modeling/graphql.py b/cognite/client/_sync_api/data_modeling/graphql.py
new file mode 100644
index 0000000000..d1593ab2f4
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/graphql.py
@@ -0,0 +1,105 @@
+"""
+===============================================================================
+f825b382430ead59cfa24b671298f05b
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling import DataModelIdentifier
+from cognite.client.data_classes.data_modeling.graphql import DMLApplyResult
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncDataModelingGraphQLAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def _unsafely_wipe_and_regenerate_dml(self, id: DataModelIdentifier) -> str:
+ """
+ Wipe and regenerate the DML for a given data model.
+
+ Note:
+ This removes all comments from the DML.
+
+ Args:
+ id (DataModelIdentifier): The data model to apply DML to.
+
+ Returns:
+ str: The new DML
+ """
+ return run_sync(self.__async_client.data_modeling.graphql._unsafely_wipe_and_regenerate_dml(id=id))
+
+ def apply_dml(
+ self,
+ id: DataModelIdentifier,
+ dml: str,
+ name: str | None = None,
+ description: str | None = None,
+ previous_version: str | None = None,
+ ) -> DMLApplyResult:
+ """
+ Apply the DML for a given data model.
+
+ Args:
+ id (DataModelIdentifier): The data model to apply DML to.
+ dml (str): The DML to apply.
+ name (str | None): The name of the data model.
+ description (str | None): The description of the data model.
+ previous_version (str | None): The previous version of the data model. Specify to reuse view versions from previous data model version.
+
+ Returns:
+ DMLApplyResult: The id of the updated data model.
+
+ Examples:
+
+ Apply DML:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.graphql.apply_dml(
+ ... id=("mySpaceExternalId", "myModelExternalId", "1"),
+ ... dml="type MyType { id: String! }",
+ ... name="My model name",
+ ... description="My model description"
+ ... )
+ """
+ return run_sync(
+ self.__async_client.data_modeling.graphql.apply_dml(
+ id=id, dml=dml, name=name, description=description, previous_version=previous_version
+ )
+ )
+
+ def query(self, id: DataModelIdentifier, query: str, variables: dict[str, Any] | None = None) -> dict[str, Any]:
+ """
+ Execute a GraphQl query against a given data model.
+
+ Args:
+ id (DataModelIdentifier): The data model to query.
+ query (str): The query to issue.
+ variables (dict[str, Any] | None): An optional dict of variables to pass to the query.
+
+ Returns:
+ dict[str, Any]: The query result
+
+ Examples:
+
+ Execute a graphql query against a given data model:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.graphql.query(
+ ... id=("mySpace", "myDataModel", "v1"),
+ ... query="listThings { items { thingProperty } }",
+ ... )
+ """
+ return run_sync(self.__async_client.data_modeling.graphql.query(id=id, query=query, variables=variables))
diff --git a/cognite/client/_sync_api/data_modeling/instances.py b/cognite/client/_sync_api/data_modeling/instances.py
new file mode 100644
index 0000000000..674e73b999
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/instances.py
@@ -0,0 +1,1250 @@
+"""
+===============================================================================
+60db155d3f4c97368b986049d3a1b66f
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Awaitable, Callable, Iterator, Sequence
+from typing import TYPE_CHECKING, Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.data_modeling.instances import Source
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.aggregations import (
+ AggregatedNumberedValue,
+ Histogram,
+ HistogramValue,
+ MetricAggregation,
+)
+from cognite.client.data_classes.data_modeling.ids import EdgeId, NodeId, ViewId
+from cognite.client.data_classes.data_modeling.instances import (
+ Edge,
+ EdgeApply,
+ EdgeList,
+ InstanceAggregationResultList,
+ InstanceInspectResults,
+ InstancesApplyResult,
+ InstancesDeleteResult,
+ InstanceSort,
+ InstancesResult,
+ InvolvedContainers,
+ InvolvedViews,
+ Node,
+ NodeApply,
+ NodeList,
+ T_Edge,
+ T_Node,
+ TargetUnit,
+)
+from cognite.client.data_classes.data_modeling.query import (
+ Query,
+ QueryResult,
+)
+from cognite.client.data_classes.data_modeling.sync import SubscriptionContext
+from cognite.client.data_classes.filters import Filter
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+from cognite.client.data_classes.data_modeling.debug import DebugParameters
+
+
+class SyncInstancesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None, instance_type: Literal["node"] = "node") -> Iterator[Node]: ...
+
+ @overload
+ def __call__(self, chunk_size: None, instance_type: Literal["edge"]) -> Iterator[Edge]: ...
+
+ @overload
+ def __call__(self, chunk_size: int, instance_type: Literal["node"] = "node") -> Iterator[NodeList]: ...
+
+ @overload
+ def __call__(self, chunk_size: int, instance_type: Literal["edge"]) -> Iterator[EdgeList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ instance_type: Literal["node", "edge"] = "node",
+ limit: int | None = None,
+ include_typing: bool = False,
+ sources: Source | Sequence[Source] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ sort: list[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> Iterator[Edge | EdgeList | Node | NodeList]:
+ """
+ Iterate over nodes or edges.
+ Fetches instances as they are iterated over, so you keep a limited number of instances in memory.
+
+ Args:
+ chunk_size (int | None): Number of data_models to return in each chunk. Defaults to yielding one instance at a time.
+ instance_type (Literal['node', 'edge']): Whether to query for nodes or edges.
+ limit (int | None): Maximum number of instances to return. Defaults to returning all items.
+ include_typing (bool): Whether to return property type information as part of the result.
+ sources (Source | Sequence[Source] | None): Views to retrieve properties from.
+ space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces).
+ sort (list[InstanceSort | dict] | InstanceSort | dict | None): Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index.
+ filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
+ debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+
+ Yields:
+ Edge | EdgeList | Node | NodeList: yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.data_modeling.instances(
+ chunk_size=chunk_size,
+ instance_type=instance_type,
+ limit=limit,
+ include_typing=include_typing,
+ sources=sources,
+ space=space,
+ sort=sort,
+ filter=filter,
+ debug=debug,
+ )
+ )
+
+ @overload
+ def retrieve_edges(self, edges: EdgeId | tuple[str, str], *, edge_cls: type[T_Edge]) -> T_Edge | None: ...
+
+ @overload
+ def retrieve_edges(
+ self,
+ edges: EdgeId | tuple[str, str],
+ *,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> Edge | None: ...
+
+ @overload
+ def retrieve_edges(
+ self, edges: Sequence[EdgeId] | Sequence[tuple[str, str]], *, edge_cls: type[T_Edge]
+ ) -> EdgeList[T_Edge]: ...
+
+ @overload
+ def retrieve_edges(
+ self,
+ edges: Sequence[EdgeId] | Sequence[tuple[str, str]],
+ *,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> EdgeList[Edge]: ...
+
+ def retrieve_edges(
+ self,
+ edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]],
+ edge_cls: type[T_Edge] = Edge,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> EdgeList[T_Edge] | T_Edge | Edge | None:
+ """
+ `Retrieve one or more edges by id(s). `_
+
+ Note:
+ This method should be used for retrieving edges with a custom edge class. You can use it
+ without providing a custom edge class, but in that case, the retrieved edges will be of the
+ built-in Edge class.
+
+
+ Args:
+ edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]]): Edge id(s) to retrieve.
+ edge_cls (type[T_Edge]): The custom edge class to use, the retrieved edges will automatically be serialized into this class.
+ sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class.
+ include_typing (bool): Whether to include typing information
+
+ Returns:
+ EdgeList[T_Edge] | T_Edge | Edge | None: The requested edges.
+
+ Examples:
+
+ Retrieve edges using a custom typed class "Flow". Any property that you want to look up by a different attribute name,
+ e.g. you want `my_edge.flow_rate` to return the data for property `flowRate`, must use the PropertyOptions as shown below.
+ We strongly suggest you use snake_cased attribute names, as is done here:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import EdgeId, TypedEdge, PropertyOptions, DirectRelationReference, ViewId
+ >>> class Flow(TypedEdge):
+ ... flow_rate = PropertyOptions(identifier="flowRate")
+ ...
+ ... def __init__(
+ ... self,
+ ... space: str,
+ ... external_id: str,
+ ... version: int,
+ ... type: DirectRelationReference,
+ ... last_updated_time: int,
+ ... created_time: int,
+ ... flow_rate: float,
+ ... start_node: DirectRelationReference,
+ ... end_node: DirectRelationReference,
+ ... deleted_time: int | None = None,
+ ... ) -> None:
+ ... super().__init__(
+ ... space, external_id, version, type, last_updated_time, created_time, start_node, end_node, deleted_time
+ ... )
+ ... self.flow_rate = flow_rate
+ ...
+ ... @classmethod
+ ... def get_source(cls) -> ViewId:
+ ... return ViewId("sp_model_space", "flow", "1")
+ ...
+ >>> client = CogniteClient()
+ >>> res = client.data_modeling.instances.retrieve_edges(
+ ... EdgeId("mySpace", "theFlow"), edge_cls=Flow
+ ... )
+ >>> isinstance(res, Flow)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.retrieve_edges(
+ edges=edges, edge_cls=edge_cls, sources=sources, include_typing=include_typing
+ )
+ )
+
+ @overload
+ def retrieve_nodes(self, nodes: NodeId | tuple[str, str], *, node_cls: type[T_Node]) -> T_Node | None: ...
+
+ @overload
+ def retrieve_nodes(
+ self,
+ nodes: NodeId | tuple[str, str],
+ *,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> Node | None: ...
+
+ @overload
+ def retrieve_nodes(
+ self, nodes: Sequence[NodeId] | Sequence[tuple[str, str]], *, node_cls: type[T_Node]
+ ) -> NodeList[T_Node]: ...
+
+ @overload
+ def retrieve_nodes(
+ self,
+ nodes: Sequence[NodeId] | Sequence[tuple[str, str]],
+ *,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> NodeList[Node]: ...
+
+ def retrieve_nodes(
+ self,
+ nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]],
+ node_cls: type[T_Node] = Node,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> NodeList[T_Node] | T_Node | Node | None:
+ """
+ `Retrieve one or more nodes by id(s). `_
+
+ Note:
+ This method should be used for retrieving nodes with a custom node class. You can use it
+ without providing a custom node class, but in that case, the retrieved nodes will be of the
+ built-in Node class.
+
+ Args:
+ nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]]): Node id(s) to retrieve.
+ node_cls (type[T_Node]): The custom node class to use, the retrieved nodes will automatically be serialized to this class.
+ sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class.
+ include_typing (bool): Whether to include typing information
+
+ Returns:
+ NodeList[T_Node] | T_Node | Node | None: The requested edges.
+
+ Examples:
+
+ Retrieve nodes using a custom typed node class "Person". Any property that you want to look up by a different attribute name,
+ e.g. you want `my_node.birth_year` to return the data for property `birthYear`, must use the PropertyOptions as shown below.
+ We strongly suggest you use snake_cased attribute names, as is done here:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import NodeId, TypedNode, PropertyOptions, DirectRelationReference, ViewId
+ >>> class Person(TypedNode):
+ ... birth_year = PropertyOptions(identifier="birthYear")
+ ...
+ ... def __init__(
+ ... self,
+ ... space: str,
+ ... external_id: str,
+ ... version: int,
+ ... last_updated_time: int,
+ ... created_time: int,
+ ... name: str,
+ ... birth_year: int | None = None,
+ ... type: DirectRelationReference | None = None,
+ ... deleted_time: int | None = None,
+ ... ):
+ ... super().__init__(
+ ... space=space,
+ ... external_id=external_id,
+ ... version=version,
+ ... last_updated_time=last_updated_time,
+ ... created_time=created_time,
+ ... type=type,
+ ... deleted_time=deleted_time
+ ... )
+ ... self.name = name
+ ... self.birth_year = birth_year
+ ...
+ ... @classmethod
+ ... def get_source(cls) -> ViewId:
+ ... return ViewId("myModelSpace", "Person", "1")
+ ...
+ >>> client = CogniteClient()
+ >>> res = client.data_modeling.instances.retrieve_nodes(
+ ... NodeId("myDataSpace", "myPerson"), node_cls=Person
+ ... )
+ >>> isinstance(res, Person)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.retrieve_nodes(
+ nodes=nodes, node_cls=node_cls, sources=sources, include_typing=include_typing
+ )
+ )
+
+ def retrieve(
+ self,
+ nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ sources: Source | Sequence[Source] | None = None,
+ include_typing: bool = False,
+ ) -> InstancesResult[Node, Edge]:
+ """
+ `Retrieve one or more instance by id(s). `_
+
+ Args:
+ nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids
+ edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids
+ sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views.
+ include_typing (bool): Whether to return property type information as part of the result.
+
+ Returns:
+ InstancesResult[Node, Edge]: Requested instances.
+
+ Examples:
+
+ Retrieve instances by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.instances.retrieve(
+ ... nodes=("mySpace", "myNodeExternalId"),
+ ... edges=("mySpace", "myEdgeExternalId"),
+ ... sources=("mySpace", "myViewExternalId", "myViewVersion"))
+
+ Retrieve nodes an edges using the built in data class
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId, ViewId
+ >>> res = client.data_modeling.instances.retrieve(
+ ... NodeId("mySpace", "myNode"),
+ ... EdgeId("mySpace", "myEdge"),
+ ... ViewId("mySpace", "myViewExternalId", "myViewVersion"))
+
+ Retrieve nodes an edges using the the view object as source
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId
+ >>> res = client.data_modeling.instances.retrieve(
+ ... NodeId("mySpace", "myNode"),
+ ... EdgeId("mySpace", "myEdge"),
+ ... sources=("myspace", "myView"))
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.retrieve(
+ nodes=nodes, edges=edges, sources=sources, include_typing=include_typing
+ )
+ )
+
+ def delete(
+ self,
+ nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ ) -> InstancesDeleteResult:
+ """
+ `Delete one or more instances `_
+
+ Args:
+ nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids
+ edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids
+
+ Returns:
+ InstancesDeleteResult: The instance ID(s) that was deleted. Empty list if nothing was deleted.
+
+ Examples:
+
+ Delete instances by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.instances.delete(nodes=("mySpace", "myNode"))
+
+ Delete nodes and edges using the built in data class
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId
+ >>> client.data_modeling.instances.delete(NodeId('mySpace', 'myNode'), EdgeId('mySpace', 'myEdge'))
+
+ Delete all nodes from a NodeList
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId
+ >>> my_view = client.data_modeling.views.retrieve(('mySpace', 'myView'))
+ >>> my_nodes = client.data_modeling.instances.list(instance_type='node', sources=my_view, limit=None)
+ >>> client.data_modeling.instances.delete(nodes=my_nodes.as_ids())
+ """
+ return run_sync(self.__async_client.data_modeling.instances.delete(nodes=nodes, edges=edges))
+
+ def inspect(
+ self,
+ nodes: NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ edges: EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None = None,
+ *,
+ involved_views: InvolvedViews | None = None,
+ involved_containers: InvolvedContainers | None = None,
+ ) -> InstanceInspectResults:
+ """
+ `Reverse lookup for instances. `_
+
+ This method will return the involved views and containers for the given nodes and edges.
+
+ Args:
+ nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node IDs.
+ edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge IDs.
+ involved_views (InvolvedViews | None): Whether to include involved views. Must pass at least one of involved_views or involved_containers.
+ involved_containers (InvolvedContainers | None): Whether to include involved containers. Must pass at least one of involved_views or involved_containers.
+
+ Returns:
+ InstanceInspectResults: List of instance inspection results.
+
+ Examples:
+
+ Look up the involved views for a given node and edge:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId, InvolvedViews
+ >>> client = CogniteClient()
+ >>> res = client.data_modeling.instances.inspect(
+ ... nodes=NodeId("my-space", "foo1"),
+ ... edges=EdgeId("my-space", "bar2"),
+ ... involved_views=InvolvedViews(all_versions=False),
+ ... )
+
+ Look up the involved containers:
+
+ >>> from cognite.client.data_classes.data_modeling import InvolvedContainers
+ >>> res = client.data_modeling.instances.inspect(
+ ... nodes=[("my-space", "foo1"), ("my-space", "foo2")],
+ ... involved_containers=InvolvedContainers(),
+ ... )
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.inspect(
+ nodes=nodes, edges=edges, involved_views=involved_views, involved_containers=involved_containers
+ )
+ )
+
+ def subscribe(
+ self,
+ query: Query,
+ callback: Callable[[QueryResult], None | Awaitable[None]],
+ poll_delay_seconds: float = 30,
+ throttle_seconds: float = 1,
+ ) -> SubscriptionContext:
+ """
+ Subscribe to a query and get updates when the result set changes. This runs the sync() method in a background task.
+ We do not support chaining result sets when subscribing to a query.
+
+ Tip:
+ For a practical guide on using this method to create a live local replica of your data,
+ see :ref:`this example of syncing instances to a local SQLite database `.
+
+ Args:
+ query (Query): The query to subscribe to.
+ callback (Callable[[QueryResult], None | Awaitable[None]]): The callback function to call when the result set changes. Can be a regular or async function.
+ poll_delay_seconds (float): The time to wait between polls when no data is present. Defaults to 30 seconds.
+ throttle_seconds (float): The time to wait between polls despite data being present.
+
+ Returns:
+ SubscriptionContext: An object that can be used to inspect and cancel the subscription.
+
+ Examples:
+
+ Subscribe to a given query and process the results in your own callback function
+ (here we just print the result for illustration):
+
+ >>> from cognite.client import AsyncCogniteClient
+ >>> from cognite.client.data_classes.data_modeling.query import (
+ ... Query, QueryResult, NodeResultSetExpression, Select, SourceSelector)
+ >>> from cognite.client.data_classes.data_modeling import ViewId
+ >>> from cognite.client.data_classes.filters import Equals
+ >>>
+ >>> client = AsyncCogniteClient()
+ >>> def just_print_the_result(result: QueryResult) -> None:
+ >>> print(result)
+ >>>
+ >>> view_id = ViewId("someSpace", "someView", "v1")
+ >>> filter = Equals(view_id.as_property_ref("myAsset"), "Il-Tempo-Gigante")
+ >>> query = Query(
+ >>> with_={"work_orders": NodeResultSetExpression(filter=filter)},
+ >>> select={"work_orders": Select([SourceSelector(view_id, ["*"])])}
+ >>> )
+ >>> subscription_context = await client.data_modeling.instances.subscribe(
+ ... query, callback=just_print_the_result
+ ... )
+ >>> # Use the returned subscription_context to manage the subscription, e.g. to cancel it:
+ >>> subscription_context.cancel()
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.subscribe(
+ query=query, callback=callback, poll_delay_seconds=poll_delay_seconds, throttle_seconds=throttle_seconds
+ )
+ )
+
+ def apply(
+ self,
+ nodes: NodeApply | Sequence[NodeApply] | None = None,
+ edges: EdgeApply | Sequence[EdgeApply] | None = None,
+ auto_create_start_nodes: bool = False,
+ auto_create_end_nodes: bool = False,
+ auto_create_direct_relations: bool = True,
+ skip_on_version_conflict: bool = False,
+ replace: bool = False,
+ ) -> InstancesApplyResult:
+ """
+ `Add or update (upsert) instances. `_
+
+ Args:
+ nodes (NodeApply | Sequence[NodeApply] | None): Nodes to apply
+ edges (EdgeApply | Sequence[EdgeApply] | None): Edges to apply
+ auto_create_start_nodes (bool): Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested.
+ auto_create_end_nodes (bool): Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested.
+ auto_create_direct_relations (bool): Whether to create missing direct relation targets when ingesting.
+ skip_on_version_conflict (bool): If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly.
+ replace (bool): How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call.
+ Returns:
+ InstancesApplyResult: Created instance(s)
+
+ Examples:
+
+ Create new node without data:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import EdgeApply, NodeOrEdgeData, NodeApply
+ >>> client = CogniteClient()
+ >>> node = NodeApply("mySpace", "myNodeId")
+ >>> res = client.data_modeling.instances.apply(node)
+
+ Create two nodes with data with a one-to-many edge
+
+ >>> from cognite.client.data_classes.data_modeling import ContainerId, EdgeApply, NodeOrEdgeData, NodeApply, ViewId
+ >>> work_order = NodeApply(
+ ... space="industrial",
+ ... external_id="work_order:123",
+ ... sources=[
+ ... # Insert data through a view
+ ... NodeOrEdgeData(
+ ... ViewId("mySpace", "WorkOrderView", "v1"),
+ ... {"title": "Repair pump", "createdYear": 2023}
+ ... )
+ ... ]
+ ... )
+ >>> pump = NodeApply(
+ ... space="industrial",
+ ... external_id="pump:456",
+ ... sources=[
+ ... # Insert data directly to the container
+ ... NodeOrEdgeData(
+ ... ContainerId("mySpace", "PumpContainer"),
+ ... {"name": "Pump 456", "location": "Subsea"}
+ ... )
+ ... ]
+ ... )
+ ... # This is one-to-many edge, in this case from a work order to a pump
+ >>> work_order_to_pump = EdgeApply(
+ ... space="industrial",
+ ... external_id="relation:work_order:123:pump:456",
+ ... type=("industrial", "relates-to"),
+ ... start_node=("industrial", "work_order:123"),
+ ... end_node=("industrial", "pump:456"),
+ ... )
+ >>> res = client.data_modeling.instances.apply([work_order, pump], [work_order_to_pump])
+
+ Create new edge and automatically create end nodes.
+
+ >>> from cognite.client.data_classes.data_modeling import EdgeApply
+ >>> work_order_to_pump = EdgeApply(
+ ... space="industrial",
+ ... external_id="relation:work_order:123:pump:456",
+ ... type=("industrial", "relates-to"),
+ ... start_node=("industrial", "work_order:123"),
+ ... end_node=("industrial", "pump:456"),
+ ... )
+ >>> res = client.data_modeling.instances.apply(
+ ... edges=work_order_to_pump,
+ ... auto_create_start_nodes=True,
+ ... auto_create_end_nodes=True
+ ... )
+
+ Using helper function to create valid graphql timestamp for a datetime object:
+
+ >>> from cognite.client.utils import datetime_to_ms_iso_timestamp
+ >>> from datetime import datetime, timezone
+ >>> my_date = datetime(2020, 3, 14, 15, 9, 26, 535000, tzinfo=timezone.utc)
+ >>> data_model_timestamp = datetime_to_ms_iso_timestamp(my_date) # "2020-03-14T15:09:26.535+00:00"
+
+ Create a typed node apply. Any property that you want to look up by a different attribute name, e.g. you want
+ `my_node.birth_year` to return the data for property `birthYear`, must use the PropertyOptions as shown below.
+ We strongly suggest you use snake_cased attribute names, as is done here:
+
+ >>> from cognite.client.data_classes.data_modeling import TypedNodeApply, PropertyOptions
+ >>> class PersonApply(TypedNodeApply):
+ ... birth_year = PropertyOptions(identifier="birthYear")
+ ...
+ ... def __init__(self, space: str, external_id, name: str, birth_year: int):
+ ... super().__init__(space, external_id, type=("sp_model_space", "Person"))
+ ... self.name = name
+ ... self.birth_year = birth_year
+ ... def get_source(self):
+ ... return ViewId("sp_model_space", "Person", "v1")
+ ...
+ >>> person = PersonApply("sp_date_space", "my_person", "John Doe", 1980)
+ >>> res = client.data_modeling.instances.apply(nodes=person)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.apply(
+ nodes=nodes,
+ edges=edges,
+ auto_create_start_nodes=auto_create_start_nodes,
+ auto_create_end_nodes=auto_create_end_nodes,
+ auto_create_direct_relations=auto_create_direct_relations,
+ skip_on_version_conflict=skip_on_version_conflict,
+ replace=replace,
+ )
+ )
+
+ @overload
+ def search(
+ self,
+ view: ViewId,
+ query: str | None = None,
+ *,
+ instance_type: Literal["node"] = "node",
+ properties: list[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ include_typing: bool = False,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ operator: Literal["AND", "OR"] = "OR",
+ ) -> NodeList[Node]: ...
+
+ @overload
+ def search(
+ self,
+ view: ViewId,
+ query: str | None = None,
+ *,
+ instance_type: Literal["edge"],
+ properties: list[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ include_typing: bool = False,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ operator: Literal["AND", "OR"] = "OR",
+ ) -> EdgeList[Edge]: ...
+
+ @overload
+ def search(
+ self,
+ view: ViewId,
+ query: str | None = None,
+ *,
+ instance_type: type[T_Node],
+ properties: list[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ include_typing: bool = False,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ operator: Literal["AND", "OR"] = "OR",
+ ) -> NodeList[T_Node]: ...
+
+ @overload
+ def search(
+ self,
+ view: ViewId,
+ query: str | None = None,
+ *,
+ instance_type: type[T_Edge],
+ properties: list[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ include_typing: bool = False,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ operator: Literal["AND", "OR"] = "OR",
+ ) -> EdgeList[T_Edge]: ...
+
+ def search(
+ self,
+ view: ViewId,
+ query: str | None = None,
+ instance_type: Literal["node", "edge"] | type[T_Node] | type[T_Edge] = "node",
+ properties: list[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ include_typing: bool = False,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ operator: Literal["AND", "OR"] = "OR",
+ ) -> NodeList[T_Node] | EdgeList[T_Edge]:
+ """
+ `Search instances `_
+
+ Args:
+ view (ViewId): View to search in.
+ query (str | None): Query string that will be parsed and used for search.
+ instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
+ properties (list[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
+ target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space (str | SequenceNotStr[str] | None): Restrict instance search to the given space (or list of spaces).
+ filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
+ include_typing (bool): Whether to include typing information.
+ limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number
+ of results (1000) if set to None, -1, or math.inf.
+ sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered.
+ operator (Literal['AND', 'OR']): Controls how multiple search terms are combined when matching documents. OR (default): A document matches if it contains any of the query terms in the searchable fields. This typically returns more results but with lower precision. AND: A document matches only if it contains all of the query terms across the searchable fields. This typically returns fewer results but with higher relevance.
+
+ Returns:
+ NodeList[T_Node] | EdgeList[T_Edge]: Search result with matching nodes or edges.
+
+ Examples:
+
+ Search for Arnold in the person view in the name property:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import ViewId
+ >>> client = CogniteClient()
+ >>> res = client.data_modeling.instances.search(
+ ... ViewId("mySpace", "PersonView", "v1"),
+ ... query="Arnold",
+ ... properties=["name"],
+ ... )
+
+ Search for Tarantino, Ritchie or Scorsese in the person view in the name property, but only born after 1942:
+
+ >>> from cognite.client.data_classes.data_modeling import ViewId
+ >>> from cognite.client.data_classes import filters
+ >>> born_after_1942 = filters.Range(["mySpace", "PersonView/v1", "birthYear"], gt=1942)
+ >>> res = client.data_modeling.instances.search(
+ ... ViewId("mySpace", "PersonView", "v1"),
+ ... query="Tarantino Ritchie Scorsese",
+ ... properties=["name"],
+ ... filter=born_after_1942,
+ ... operator="OR"
+ ... )
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.search(
+ view=view,
+ query=query,
+ instance_type=instance_type,
+ properties=properties,
+ target_units=target_units,
+ space=space,
+ filter=filter,
+ include_typing=include_typing,
+ limit=limit,
+ sort=sort,
+ operator=operator,
+ )
+ )
+
+ @overload
+ def aggregate(
+ self,
+ view: ViewId,
+ aggregates: MetricAggregation | dict,
+ group_by: None = None,
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: str | SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> AggregatedNumberedValue: ...
+
+ @overload
+ def aggregate(
+ self,
+ view: ViewId,
+ aggregates: Sequence[MetricAggregation | dict],
+ group_by: None = None,
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: str | SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> list[AggregatedNumberedValue]: ...
+
+ @overload
+ def aggregate(
+ self,
+ view: ViewId,
+ aggregates: MetricAggregation | dict | Sequence[MetricAggregation | dict],
+ group_by: str | SequenceNotStr[str],
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: str | SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> InstanceAggregationResultList: ...
+
+ def aggregate(
+ self,
+ view: ViewId,
+ aggregates: MetricAggregation | dict | Sequence[MetricAggregation | dict],
+ group_by: str | SequenceNotStr[str] | None = None,
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: str | SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList:
+ """
+ `Aggregate data across nodes/edges `_
+
+ Args:
+ view (ViewId): View to aggregate over.
+ aggregates (MetricAggregation | dict | Sequence[MetricAggregation | dict]): The properties to aggregate over.
+ group_by (str | SequenceNotStr[str] | None): The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by.
+ instance_type (Literal['node', 'edge']): The type of instance.
+ query (str | None): Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s).
+ properties (str | SequenceNotStr[str] | None): Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default.
+ target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space (str | SequenceNotStr[str] | None): Restrict instance aggregate query to the given space (or list of spaces).
+ filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
+ limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number
+ of results (1000) if set to None, -1, or math.inf.
+
+ Returns:
+ AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: Node or edge aggregation results.
+
+ Examples:
+
+ Get the average run time in minutes for pumps grouped by release year:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import ViewId, aggregations as aggs
+ >>> client = CogniteClient()
+ >>> avg_run_time = aggs.Avg("runTimeMinutes")
+ >>> view_id = ViewId("mySpace", "PumpView", "v1")
+ >>> res = client.data_modeling.instances.aggregate(view_id, avg_run_time, group_by="releaseYear")
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.aggregate(
+ view=view,
+ aggregates=aggregates,
+ group_by=group_by,
+ instance_type=instance_type,
+ query=query,
+ properties=properties,
+ target_units=target_units,
+ space=space,
+ filter=filter,
+ limit=limit,
+ )
+ )
+
+ @overload
+ def histogram(
+ self,
+ view: ViewId,
+ histograms: Histogram,
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> HistogramValue: ...
+
+ @overload
+ def histogram(
+ self,
+ view: ViewId,
+ histograms: Sequence[Histogram],
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> list[HistogramValue]: ...
+
+ def histogram(
+ self,
+ view: ViewId,
+ histograms: Histogram | Sequence[Histogram],
+ instance_type: Literal["node", "edge"] = "node",
+ query: str | None = None,
+ properties: SequenceNotStr[str] | None = None,
+ target_units: list[TargetUnit] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> HistogramValue | list[HistogramValue]:
+ """
+ `Produces histograms for nodes/edges `_
+
+ Args:
+ view (ViewId): View to to aggregate over.
+ histograms (Histogram | Sequence[Histogram]): The properties to aggregate over.
+ instance_type (Literal['node', 'edge']): Whether to search for nodes or edges.
+ query (str | None): Query string that will be parsed and used for search.
+ properties (SequenceNotStr[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
+ target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space (str | SequenceNotStr[str] | None): Restrict histogram query to instances in the given space (or list of spaces).
+ filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
+ limit (int): Maximum number of instances to return. Defaults to 25.
+
+ Returns:
+ HistogramValue | list[HistogramValue]: Node or edge aggregation results.
+
+ Examples:
+
+ Find the number of people born per decade:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import aggregations as aggs, ViewId
+ >>> client = CogniteClient()
+ >>> birth_by_decade = aggs.Histogram("birthYear", interval=10.0)
+ >>> view_id = ViewId("mySpace", "PersonView", "v1")
+ >>> res = client.data_modeling.instances.histogram(view_id, birth_by_decade)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.histogram(
+ view=view,
+ histograms=histograms,
+ instance_type=instance_type,
+ query=query,
+ properties=properties,
+ target_units=target_units,
+ space=space,
+ filter=filter,
+ limit=limit,
+ )
+ )
+
+ def query(self, query: Query, include_typing: bool = False, debug: DebugParameters | None = None) -> QueryResult:
+ """
+ `Advanced query interface for nodes/edges. `_
+
+ The Data Modelling API exposes an advanced query interface. The query interface supports parameterization,
+ recursive edge traversal, chaining of result sets, and granular property selection.
+
+ Args:
+ query (Query): Query.
+ include_typing (bool): Should we return property type information as part of the result?
+ debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+
+ Returns:
+ QueryResult: The resulting nodes and/or edges from the query.
+
+ Examples:
+
+ Find work orders created before 2023 sorted by title:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling.query import Query, Select, NodeResultSetExpression, EdgeResultSetExpression, SourceSelector
+ >>> from cognite.client.data_classes.filters import Range, Equals
+ >>> from cognite.client.data_classes.data_modeling.ids import ViewId
+ >>> client = CogniteClient()
+ >>> work_order_id = ViewId("mySpace", "WorkOrderView", "v1")
+ >>> pump_id = ViewId("mySpace", "PumpView", "v1")
+ >>> query = Query(
+ ... with_ = {
+ ... "work_orders": NodeResultSetExpression(filter=Range(work_order_id.as_property_ref("createdYear"), lt=2023)),
+ ... "work_orders_to_pumps": EdgeResultSetExpression(from_="work_orders", filter=Equals(["edge", "type"], {"space": work_order_id.space, "externalId": "WorkOrder.asset"})),
+ ... "pumps": NodeResultSetExpression(from_="work_orders_to_pumps"),
+ ... },
+ ... select = {
+ ... "pumps": Select(
+ ... [SourceSelector(pump_id, ["name"])], sort=[InstanceSort(pump_id.as_property_ref("name"))]),
+ ... },
+ ... )
+ >>> res = client.data_modeling.instances.query(query)
+
+ To convert units, specify what your target units are in the SourceSelector. You can either use
+ a UnitReference or a UnitSystemReference. Note that in order for a property to be converted, they
+ need to have a unit defined in the underlying container.
+
+ >>> from cognite.client.data_classes.data_modeling.data_types import UnitReference, UnitSystemReference
+ >>> selected_source = SourceSelector(
+ ... source=ViewId("my-space", "my-xid", "v1"),
+ ... properties=["f32_prop1", "f32_prop2", "f64_prop1", "f64_prop2"],
+ ... target_units=[
+ ... TargetUnit("f32_prop1", UnitReference("pressure:kilopa")),
+ ... TargetUnit("f32_prop2", UnitReference("pressure:barg")),
+ ... TargetUnit("f64_prop1", UnitSystemReference("SI")),
+ ... TargetUnit("f64_prop2", UnitSystemReference("Imperial")),
+ ... ],
+ ... )
+
+ To select all properties, use '[*]' in your SourceSelector:
+
+ >>> SourceSelector(source=ViewId("my-space", "my-xid", "v1"), properties=["*"])
+
+ To debug and/or profile your query, you can use the debug parameter:
+
+ >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters
+ >>> debug_params = DebugParameters(
+ ... emit_results=False,
+ ... include_plan=True, # Include the postgres execution plan
+ ... include_translated_query=True, # Include the internal representation of the query.
+ ... profile=True,
+ ... )
+ >>> res = client.data_modeling.instances.query(query, debug=debug_params)
+ >>> print(res.debug)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.query(query=query, include_typing=include_typing, debug=debug)
+ )
+
+ def sync(self, query: Query, include_typing: bool = False, debug: DebugParameters | None = None) -> QueryResult:
+ """
+ `Subscription to changes for nodes/edges. `_
+
+ Subscribe to changes for nodes and edges in a project, matching a supplied filter.
+
+ Args:
+ query (Query): Query.
+ include_typing (bool): Should we return property type information as part of the result?
+ debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+
+ Returns:
+ QueryResult: The resulting nodes and/or edges from the query.
+
+ Examples:
+
+ Find work orders created before 2023 sorted by title:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling.instances import InstanceSort
+ >>> from cognite.client.data_classes.data_modeling.query import Query, Select, NodeResultSetExpression, EdgeResultSetExpression, SourceSelector
+ >>> from cognite.client.data_classes.filters import Range, Equals
+ >>> from cognite.client.data_classes.data_modeling.ids import ViewId
+ >>> client = CogniteClient()
+ >>> work_order_id = ViewId("mySpace", "WorkOrderView", "v1")
+ >>> pump_id = ViewId("mySpace", "PumpView", "v1")
+ >>> query = Query(
+ ... with_ = {
+ ... "work_orders": NodeResultSetExpression(filter=Range(work_order_id.as_property_ref("createdYear"), lt=2023)),
+ ... "work_orders_to_pumps": EdgeResultSetExpression(from_="work_orders", filter=Equals(["edge", "type"], {"space": work_order_id.space, "externalId": "WorkOrder.asset"})),
+ ... "pumps": NodeResultSetExpression(from_="work_orders_to_pumps"),
+ ... },
+ ... select = {
+ ... "pumps": Select(
+ ... [SourceSelector(pump_id, ["name"])], sort=[InstanceSort(pump_id.as_property_ref("name"))]),
+ ... },
+ ... )
+ >>> res = client.data_modeling.instances.sync(query)
+ >>> # Added a new work order with pumps created before 2023
+ >>> query.cursors = res.cursors
+ >>> res_new = client.data_modeling.instances.sync(query)
+
+ In the last example, the res_new will only contain the pumps that have been added with the new work order.
+
+ To debug and/or profile your query, you can use the debug parameter:
+
+ >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters
+ >>> debug_params = DebugParameters(
+ ... emit_results=False,
+ ... include_plan=True, # Include the postgres execution plan
+ ... include_translated_query=True, # Include the internal representation of the query.
+ ... profile=True,
+ ... )
+ >>> res = client.data_modeling.instances.sync(query, debug=debug_params)
+ >>> print(res.debug)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.sync(query=query, include_typing=include_typing, debug=debug)
+ )
+
+ @overload
+ def list(
+ self,
+ instance_type: Literal["node"] = "node",
+ include_typing: bool = False,
+ sources: Source | Sequence[Source] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> NodeList[Node]: ...
+
+ @overload
+ def list(
+ self,
+ instance_type: Literal["edge"],
+ include_typing: bool = False,
+ sources: Source | Sequence[Source] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> EdgeList[Edge]: ...
+
+ @overload
+ def list(
+ self,
+ instance_type: type[T_Node],
+ *,
+ space: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> NodeList[T_Node]: ...
+
+ @overload
+ def list(
+ self,
+ instance_type: type[T_Edge],
+ *,
+ space: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> EdgeList[T_Edge]: ...
+
+ def list(
+ self,
+ instance_type: Literal["node", "edge"] | type[T_Node] | type[T_Edge] = "node",
+ include_typing: bool = False,
+ sources: Source | Sequence[Source] | None = None,
+ space: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ sort: Sequence[InstanceSort | dict] | InstanceSort | dict | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ debug: DebugParameters | None = None,
+ ) -> NodeList[T_Node] | EdgeList[T_Edge]:
+ """
+ `List instances `_
+
+ Args:
+ instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
+ include_typing (bool): Whether to return property type information as part of the result.
+ sources (Source | Sequence[Source] | None): Views to retrieve properties from.
+ space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces).
+ limit (int | None): Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered.
+ filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
+ debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+
+ Returns:
+ NodeList[T_Node] | EdgeList[T_Edge]: List of requested instances
+
+ Examples:
+
+ List instances and limit to 5:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> instance_list = client.data_modeling.instances.list(limit=5)
+
+ List some instances in the space 'my-space':
+
+ >>> instance_list = client.data_modeling.instances.list(space="my-space")
+
+ List instances and sort by some property:
+
+ >>> from cognite.client.data_classes.data_modeling import InstanceSort
+ >>> property_sort = InstanceSort(
+ ... property=('space', 'view_xid/view_version', 'some_property'),
+ ... direction="descending",
+ ... nulls_first=True)
+ >>> instance_list = client.data_modeling.instances.list(sort=property_sort)
+
+ Iterate over instances (nodes by default), one-by-one:
+
+ >>> for node in client.data_modeling.instances():
+ ... node
+ >>> for edge in client.data_modeling.instances(instance_type="edge"):
+ ... edge
+
+ Iterate over chunks of instances to reduce memory load:
+
+ >>> for instance_list in client.data_modeling.instances(chunk_size=100):
+ ... instance_list # do something with the instances
+
+ List instances with a view as source:
+
+ >>> from cognite.client.data_classes.data_modeling import ViewId
+ >>> my_view = ViewId("mySpace", "myView", "v1")
+ >>> instance_list = client.data_modeling.instances.list(sources=my_view)
+
+ Convert instances to pandas DataFrame with expanded properties (``expand_properties=True``).
+ This will add the properties directly as dataframe columns. Specifying ``camel_case=True``
+ will convert the basic columns to camel case (e.g. externalId), but leave the property names as-is.
+
+ >>> df = instance_list.to_pandas(
+ ... expand_properties=True,
+ ... camel_case=True,
+ ... )
+
+ To debug and/or profile your query, you can use the debug parameter:
+
+ >>> from cognite.client.data_classes.data_modeling.debug import DebugParameters
+ >>> debug_params = DebugParameters(
+ ... emit_results=False,
+ ... include_plan=True, # Include the postgres execution plan
+ ... include_translated_query=True, # Include the internal representation of the query.
+ ... profile=True,
+ ... )
+ >>> res = client.data_modeling.instances.list(
+ ... debug=debug_params, sources=my_view
+ ... )
+ >>> print(res.debug)
+ """
+ return run_sync(
+ self.__async_client.data_modeling.instances.list(
+ instance_type=instance_type,
+ include_typing=include_typing,
+ sources=sources,
+ space=space,
+ limit=limit,
+ sort=sort,
+ filter=filter,
+ debug=debug,
+ )
+ )
diff --git a/cognite/client/_sync_api/data_modeling/space_statistics.py b/cognite/client/_sync_api/data_modeling/space_statistics.py
new file mode 100644
index 0000000000..b6b802cfa2
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/space_statistics.py
@@ -0,0 +1,80 @@
+"""
+===============================================================================
+b8446180971deded041820cbe43cb4ef
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.statistics import SpaceStatistics, SpaceStatisticsList
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncSpaceStatisticsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def retrieve(self, space: str) -> SpaceStatistics | None: ...
+
+ @overload
+ def retrieve(self, space: SequenceNotStr[str]) -> SpaceStatisticsList: ...
+
+ def retrieve(self, space: str | SequenceNotStr[str]) -> SpaceStatistics | SpaceStatisticsList | None:
+ """
+ `Retrieve usage data and limits per space `_
+
+ Args:
+ space (str | SequenceNotStr[str]): The space or spaces to retrieve statistics for.
+
+ Returns:
+ SpaceStatistics | SpaceStatisticsList | None: The requested statistics and limits for the specified space(s).
+
+ Examples:
+
+ Fetch statistics for a single space:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> result = client.data_modeling.statistics.spaces.retrieve("my-space")
+
+ Fetch statistics for multiple spaces:
+ >>> res = client.data_modeling.statistics.spaces.retrieve(
+ ... ["my-space1", "my-space2"]
+ ... )
+ """
+ return run_sync(self.__async_client.data_modeling.statistics.spaces.retrieve(space=space))
+
+ def list(self) -> SpaceStatisticsList:
+ """
+ `Retrieve usage for all spaces `_
+
+ Returns statistics for data modeling resources grouped by each space in the project.
+
+ Returns:
+ SpaceStatisticsList: The requested statistics and limits for all spaces in the project.
+
+ Examples:
+
+ Fetch statistics for all spaces in the project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> stats = client.data_modeling.statistics.spaces.list()
+ >>> for space_stats in stats:
+ ... print(f"Space: {space_stats.space}, Nodes: {space_stats.nodes}")
+ """
+ return run_sync(self.__async_client.data_modeling.statistics.spaces.list())
diff --git a/cognite/client/_sync_api/data_modeling/spaces.py b/cognite/client/_sync_api/data_modeling/spaces.py
new file mode 100644
index 0000000000..9f6c03e13b
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/spaces.py
@@ -0,0 +1,158 @@
+"""
+===============================================================================
+ad14ab3f96c33bdeba7358c9567da303
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.spaces import Space, SpaceApply, SpaceList
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncSpacesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Space]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[SpaceList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Space | SpaceList]:
+ """
+ Iterate over spaces
+
+ Fetches spaces as they are iterated over, so you keep a limited number of spaces in memory.
+
+ Args:
+ chunk_size (int | None): Number of spaces to return in each chunk. Defaults to yielding one space a time.
+ limit (int | None): Maximum number of spaces to return. Defaults to returning all items.
+
+ Yields:
+ Space | SpaceList: yields Space one by one if chunk_size is not specified, else SpaceList objects.
+ """
+ yield from SyncIterator(self.__async_client.data_modeling.spaces(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def retrieve(self, spaces: str) -> Space | None: ...
+
+ @overload
+ def retrieve(self, spaces: SequenceNotStr[str]) -> SpaceList: ...
+
+ def retrieve(self, spaces: str | SequenceNotStr[str]) -> Space | SpaceList | None:
+ """
+ `Retrieve one or more spaces. `_
+
+ Args:
+ spaces (str | SequenceNotStr[str]): Space ID
+
+ Returns:
+ Space | SpaceList | None: Requested space or None if it does not exist.
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.spaces.retrieve(spaces='mySpace')
+
+ Get multiple spaces by id:
+
+ >>> res = client.data_modeling.spaces.retrieve(spaces=["MySpace", "MyAwesomeSpace", "MyOtherSpace"])
+ """
+ return run_sync(self.__async_client.data_modeling.spaces.retrieve(spaces=spaces))
+
+ def delete(self, spaces: str | SequenceNotStr[str]) -> list[str]:
+ """
+ `Delete one or more spaces `_
+
+ Args:
+ spaces (str | SequenceNotStr[str]): ID or ID list ids of spaces.
+ Returns:
+ list[str]: The space(s) which has been deleted.
+ Examples:
+
+ Delete spaces by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.spaces.delete(spaces=["mySpace", "myOtherSpace"])
+ """
+ return run_sync(self.__async_client.data_modeling.spaces.delete(spaces=spaces))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ, include_global: bool = False) -> SpaceList:
+ """
+ `List spaces `_
+
+ Args:
+ limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ include_global (bool): Whether to include global spaces. Defaults to False.
+
+ Returns:
+ SpaceList: List of requested spaces
+
+ Examples:
+
+ List spaces and filter on max start time:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> space_list = client.data_modeling.spaces.list(limit=5)
+
+ Iterate over spaces, one-by-one:
+
+ >>> for space in client.data_modeling.spaces():
+ ... space # do something with the space
+
+ Iterate over chunks of spaces to reduce memory load:
+
+ >>> for space_list in client.data_modeling.spaces(chunk_size=2500):
+ ... space_list # do something with the spaces
+ """
+ return run_sync(self.__async_client.data_modeling.spaces.list(limit=limit, include_global=include_global))
+
+ @overload
+ def apply(self, spaces: Sequence[SpaceApply]) -> SpaceList: ...
+
+ @overload
+ def apply(self, spaces: SpaceApply) -> Space: ...
+
+ def apply(self, spaces: SpaceApply | Sequence[SpaceApply]) -> Space | SpaceList:
+ """
+ `Create or patch one or more spaces. `_
+
+ Args:
+ spaces (SpaceApply | Sequence[SpaceApply]): Space | Sequence[Space]): Space or spaces of spacesda to create or update.
+
+ Returns:
+ Space | SpaceList: Created space(s)
+
+ Examples:
+
+ Create new spaces:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import SpaceApply
+ >>> client = CogniteClient()
+ >>> spaces = [SpaceApply(space="mySpace", description="My first space", name="My Space"),
+ ... SpaceApply(space="myOtherSpace", description="My second space", name="My Other Space")]
+ >>> res = client.data_modeling.spaces.apply(spaces)
+ """
+ return run_sync(self.__async_client.data_modeling.spaces.apply(spaces=spaces))
diff --git a/cognite/client/_sync_api/data_modeling/statistics.py b/cognite/client/_sync_api/data_modeling/statistics.py
new file mode 100644
index 0000000000..16b2db9023
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/statistics.py
@@ -0,0 +1,50 @@
+"""
+===============================================================================
+22ce5358beca6d957d6ed92d895118ae
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.data_modeling.space_statistics import SyncSpaceStatisticsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.statistics import ProjectStatistics
+from cognite.client.utils._async_helpers import run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncStatisticsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.spaces = SyncSpaceStatisticsAPI(async_client)
+
+ def project(self) -> ProjectStatistics:
+ """
+ `Retrieve project-wide usage data and limits `_
+
+ Returns the usage data and limits for a project's data modelling usage, including data model schemas and graph instances
+
+ Returns:
+ ProjectStatistics: The requested statistics and limits
+
+ Examples:
+
+ Fetch project statistics (and limits) and check the current number of data models vs.
+ and how many more can be created:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> stats = client.data_modeling.statistics.project()
+ >>> data_model_count = stats.data_models.count
+ >>> available_count = stats.data_models.limit - data_model_count
+ """
+ return run_sync(self.__async_client.data_modeling.statistics.project())
diff --git a/cognite/client/_sync_api/data_modeling/views.py b/cognite/client/_sync_api/data_modeling/views.py
new file mode 100644
index 0000000000..a70550d800
--- /dev/null
+++ b/cognite/client/_sync_api/data_modeling/views.py
@@ -0,0 +1,263 @@
+"""
+===============================================================================
+286155d9ffa73428bab0192dd2914c82
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DATA_MODELING_DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.data_modeling.ids import ViewId, ViewIdentifier
+from cognite.client.data_classes.data_modeling.views import View, ViewApply, ViewList
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncViewsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[View]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[ViewList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ limit: int | None = None,
+ space: str | None = None,
+ include_inherited_properties: bool = True,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> Iterator[View | ViewList]:
+ """
+ Iterate over views
+
+ Fetches views as they are iterated over, so you keep a limited number of views in memory.
+
+ Args:
+ chunk_size (int | None): Number of views to return in each chunk. Defaults to yielding one view at a time.
+ limit (int | None): Maximum number of views to return. Defaults to returning all items.
+ space (str | None): (str | None): The space to query.
+ include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
+ all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global (bool): Whether to include global views.
+
+ Yields:
+ View | ViewList: yields View one by one if chunk_size is not specified, else ViewList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.data_modeling.views(
+ chunk_size=chunk_size,
+ limit=limit,
+ space=space,
+ include_inherited_properties=include_inherited_properties,
+ all_versions=all_versions,
+ include_global=include_global,
+ )
+ )
+
+ def retrieve(
+ self,
+ ids: ViewIdentifier | Sequence[ViewIdentifier],
+ include_inherited_properties: bool = True,
+ all_versions: bool = True,
+ ) -> ViewList:
+ """
+ `Retrieve a single view by id. `_
+
+ Args:
+ ids (ViewIdentifier | Sequence[ViewIdentifier]): The view identifier(s). This can be given as a tuple of
+ strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"),
+ or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions
+ will be returned.
+ include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
+ all_versions (bool): Whether to return all versions. If false, only the newest version is returned,
+
+ Returns:
+ ViewList: Requested view or None if it does not exist.
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_modeling.views.retrieve(('mySpace', 'myView', 'v1'))
+ """
+ return run_sync(
+ self.__async_client.data_modeling.views.retrieve(
+ ids=ids, include_inherited_properties=include_inherited_properties, all_versions=all_versions
+ )
+ )
+
+ def delete(self, ids: ViewIdentifier | Sequence[ViewIdentifier]) -> list[ViewId]:
+ """
+ `Delete one or more views `_
+
+ Args:
+ ids (ViewIdentifier | Sequence[ViewIdentifier]): View identifier(s)
+ Returns:
+ list[ViewId]: The identifier for the view(s) which has been deleted. Empty list if nothing was deleted.
+ Examples:
+
+ Delete views by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.data_modeling.views.delete(('mySpace', 'myView', 'v1'))
+ """
+ return run_sync(self.__async_client.data_modeling.views.delete(ids=ids))
+
+ def list(
+ self,
+ limit: int | None = DATA_MODELING_DEFAULT_LIMIT_READ,
+ space: str | None = None,
+ include_inherited_properties: bool = True,
+ all_versions: bool = False,
+ include_global: bool = False,
+ ) -> ViewList:
+ """
+ `List views `_
+
+ Args:
+ limit (int | None): Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ space (str | None): (str | None): The space to query.
+ include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
+ all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global (bool): Whether to include global views.
+
+ Returns:
+ ViewList: List of requested views
+
+ Examples:
+
+ List 5 views:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> view_list = client.data_modeling.views.list(limit=5)
+
+ Iterate over views, one-by-one:
+
+ >>> for view in client.data_modeling.views():
+ ... view # do something with the view
+
+ Iterate over chunks of views to reduce memory load:
+
+ >>> for view_list in client.data_modeling.views(chunk_size=10):
+ ... view_list # do something with the views
+ """
+ return run_sync(
+ self.__async_client.data_modeling.views.list(
+ limit=limit,
+ space=space,
+ include_inherited_properties=include_inherited_properties,
+ all_versions=all_versions,
+ include_global=include_global,
+ )
+ )
+
+ @overload
+ def apply(self, view: Sequence[ViewApply]) -> ViewList: ...
+
+ @overload
+ def apply(self, view: ViewApply) -> View: ...
+
+ def apply(self, view: ViewApply | Sequence[ViewApply]) -> View | ViewList:
+ """
+ `Create or update (upsert) one or more views. `_
+
+ Args:
+ view (ViewApply | Sequence[ViewApply]): View(s) to create or update.
+
+ Returns:
+ View | ViewList: Created view(s)
+
+ Examples:
+
+ Create new views:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import ViewApply, MappedPropertyApply, ContainerId
+ >>> client = CogniteClient()
+ >>> views = [
+ ... ViewApply(
+ ... space="mySpace",
+ ... external_id="myView",
+ ... version="v1",
+ ... properties={
+ ... "someAlias": MappedPropertyApply(
+ ... container=ContainerId("mySpace", "myContainer"),
+ ... container_property_identifier="someProperty",
+ ... ),
+ ... }
+ ... )
+ ... ]
+ >>> res = client.data_modeling.views.apply(views)
+
+
+ Create views with edge relations:
+
+ >>> from cognite.client.data_classes.data_modeling import (
+ ... ContainerId,
+ ... DirectRelationReference,
+ ... MappedPropertyApply,
+ ... MultiEdgeConnectionApply,
+ ... ViewApply,
+ ... ViewId
+ ... )
+ >>> work_order_for_asset = DirectRelationReference(space="mySpace", external_id="work_order_for_asset")
+ >>> work_order_view = ViewApply(
+ ... space="mySpace",
+ ... external_id="WorkOrder",
+ ... version="v1",
+ ... name="WorkOrder",
+ ... properties={
+ ... "title": MappedPropertyApply(
+ ... container=ContainerId(space="mySpace", external_id="WorkOrder"),
+ ... container_property_identifier="title",
+ ... ),
+ ... "asset": MultiEdgeConnectionApply(
+ ... type=work_order_for_asset,
+ ... direction="outwards",
+ ... source=ViewId("mySpace", "Asset", "v1"),
+ ... name="asset",
+ ... ),
+ ... }
+ ... )
+ >>> asset_view = ViewApply(
+ ... space="mySpace",
+ ... external_id="Asset",
+ ... version="v1",
+ ... name="Asset",
+ ... properties={
+ ... "name": MappedPropertyApply(
+ ... container=ContainerId("mySpace", "Asset"),
+ ... name="name",
+ ... container_property_identifier="name",
+ ... ),
+ ... "work_orders": MultiEdgeConnectionApply(
+ ... type=work_order_for_asset,
+ ... direction="inwards",
+ ... source=ViewId("mySpace", "WorkOrder", "v1"),
+ ... name="work_orders",
+ ... ),
+ ... }
+ ... )
+ >>> res = client.data_modeling.views.apply([work_order_view, asset_view])
+ """
+ return run_sync(self.__async_client.data_modeling.views.apply(view=view))
diff --git a/cognite/client/_sync_api/data_sets.py b/cognite/client/_sync_api/data_sets.py
new file mode 100644
index 0000000000..202e866130
--- /dev/null
+++ b/cognite/client/_sync_api/data_sets.py
@@ -0,0 +1,289 @@
+"""
+===============================================================================
+98e4f0c8b9c49ed283cc7f11792b2999
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import DataSet, DataSetFilter, DataSetList, DataSetUpdate, DataSetWrite, TimestampRange
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDataSetsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[DataSet]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[DataSetList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ metadata: dict[str, str] | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ write_protected: bool | None = None,
+ limit: int | None = None,
+ ) -> Iterator[DataSet | DataSetList]:
+ """
+ Iterate over data sets
+
+ Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory.
+
+ Args:
+ chunk_size (int | None): Number of data sets to return in each chunk. Defaults to yielding one data set a time.
+ metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
+ external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
+ write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
+ limit (int | None): Maximum number of data sets to return. Defaults to return all items.
+
+ Yields:
+ DataSet | DataSetList: yields DataSet one by one if chunk is not specified, else DataSetList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.data_sets(
+ chunk_size=chunk_size,
+ metadata=metadata,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ external_id_prefix=external_id_prefix,
+ write_protected=write_protected,
+ limit=limit,
+ )
+ )
+
+ @overload
+ def create(self, data_set: Sequence[DataSet] | Sequence[DataSetWrite]) -> DataSetList: ...
+
+ @overload
+ def create(self, data_set: DataSet | DataSetWrite) -> DataSet: ...
+
+ def create(
+ self, data_set: DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]
+ ) -> DataSet | DataSetList:
+ """
+ `Create one or more data sets. `_
+
+ Args:
+ data_set (DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]): Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create.
+
+ Returns:
+ DataSet | DataSetList: Created data set(s)
+
+ Examples:
+
+ Create new data sets:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import DataSetWrite
+ >>> client = CogniteClient()
+ >>> data_sets = [DataSetWrite(name="1st level"), DataSetWrite(name="2nd level")]
+ >>> res = client.data_sets.create(data_sets)
+ """
+ return run_sync(self.__async_client.data_sets.create(data_set=data_set))
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> DataSet | None:
+ """
+ `Retrieve a single data set by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ DataSet | None: Requested data set or None if it does not exist.
+
+ Examples:
+
+ Get data set by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_sets.retrieve(id=1)
+
+ Get data set by external id:
+
+ >>> res = client.data_sets.retrieve(external_id="1")
+ """
+ return run_sync(self.__async_client.data_sets.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> DataSetList:
+ """
+ `Retrieve multiple data sets by id. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ DataSetList: The requested data sets.
+
+ Examples:
+
+ Get data sets by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.data_sets.retrieve_multiple(ids=[1, 2, 3])
+
+ Get data sets by external id:
+
+ >>> res = client.data_sets.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.data_sets.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def aggregate_count(self, filter: DataSetFilter | dict[str, Any] | None = None) -> int:
+ """
+ `Aggregate data sets `_
+
+ Args:
+ filter (DataSetFilter | dict[str, Any] | None): Filter on data set filter with exact match
+
+ Returns:
+ int: Count of data sets matching the filter.
+
+ Examples:
+
+ Get the number of write-protected data sets:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> aggregate_protected = client.data_sets.aggregate_count(
+ ... filter={"write_protected": True}
+ ... )
+ """
+ return run_sync(self.__async_client.data_sets.aggregate_count(filter=filter))
+
+ @overload
+ def update(
+ self,
+ item: DataSet | DataSetWrite | DataSetUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> DataSet: ...
+
+ @overload
+ def update(
+ self,
+ item: Sequence[DataSet | DataSetWrite | DataSetUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> DataSetList: ...
+
+ def update(
+ self,
+ item: DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> DataSet | DataSetList:
+ """
+ `Update one or more data sets `_
+
+ Args:
+ item (DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate]): Data set(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ DataSet | DataSetList: Updated data set(s)
+
+ Examples:
+
+ Update a data set that you have fetched. This will perform a full update of the data set:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> data_set = client.data_sets.retrieve(id=1)
+ >>> data_set.description = "New description"
+ >>> res = client.data_sets.update(data_set)
+
+ Perform a partial update on a data set, updating the description and removing a field from metadata:
+
+ >>> from cognite.client.data_classes import DataSetUpdate
+ >>> my_update = DataSetUpdate(id=1).description.set("New description").metadata.remove(["key"])
+ >>> res = client.data_sets.update(my_update)
+ """
+ return run_sync(self.__async_client.data_sets.update(item=item, mode=mode))
+
+ def list(
+ self,
+ metadata: dict[str, str] | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ write_protected: bool | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> DataSetList:
+ """
+ `List data sets `_
+
+ Args:
+ metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
+ external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
+ write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
+ limit (int | None): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ DataSetList: List of requested data sets
+
+ Examples:
+
+ List data sets and filter on write_protected:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> data_sets_list = client.data_sets.list(limit=5, write_protected=False)
+
+ Iterate over data sets, one-by-one:
+
+ >>> for data_set in client.data_sets():
+ ... data_set # do something with the data set
+
+ Iterate over chunks of data sets to reduce memory load:
+
+ >>> for data_set_list in client.data_sets(chunk_size=2500):
+ ... data_set_list # do something with the list
+ """
+ return run_sync(
+ self.__async_client.data_sets.list(
+ metadata=metadata,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ external_id_prefix=external_id_prefix,
+ write_protected=write_protected,
+ limit=limit,
+ )
+ )
diff --git a/cognite/client/_sync_api/datapoints.py b/cognite/client/_sync_api/datapoints.py
new file mode 100644
index 0000000000..75ee920d12
--- /dev/null
+++ b/cognite/client/_sync_api/datapoints.py
@@ -0,0 +1,1559 @@
+"""
+===============================================================================
+bb21beeeb7ec0d88e0a4320fead5b347
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+import datetime
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Any, Literal, overload
+from zoneinfo import ZoneInfo
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_DATAPOINTS_CHUNK_SIZE
+from cognite.client._sync_api.synthetic_time_series import SyncSyntheticDatapointsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ Datapoints,
+ DatapointsArray,
+ DatapointsArrayList,
+ DatapointsList,
+ DatapointsQuery,
+ LatestDatapointQuery,
+)
+from cognite.client.data_classes.data_modeling.ids import NodeId
+from cognite.client.data_classes.datapoint_aggregates import Aggregate
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+
+class SyncDatapointsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.synthetic = SyncSyntheticDatapointsAPI(async_client)
+
+ @overload
+ def __call__(
+ self,
+ queries: DatapointsQuery,
+ *,
+ return_arrays: Literal[True] = True,
+ chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE,
+ chunk_size_time_series: int | None = None,
+ ) -> Iterator[DatapointsArray]: ...
+
+ @overload
+ def __call__(
+ self,
+ queries: Sequence[DatapointsQuery],
+ *,
+ return_arrays: Literal[True] = True,
+ chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE,
+ chunk_size_time_series: int | None = None,
+ ) -> Iterator[DatapointsArrayList]: ...
+
+ @overload
+ def __call__(
+ self,
+ queries: DatapointsQuery,
+ *,
+ return_arrays: Literal[False],
+ chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE,
+ chunk_size_time_series: int | None = None,
+ ) -> Iterator[Datapoints]: ...
+
+ @overload
+ def __call__(
+ self,
+ queries: Sequence[DatapointsQuery],
+ *,
+ return_arrays: Literal[False],
+ chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE,
+ chunk_size_time_series: int | None = None,
+ ) -> Iterator[DatapointsList]: ...
+
+ def __call__(
+ self,
+ queries: DatapointsQuery | Sequence[DatapointsQuery],
+ *,
+ chunk_size_datapoints: int = DEFAULT_DATAPOINTS_CHUNK_SIZE,
+ chunk_size_time_series: int | None = None,
+ return_arrays: bool = True,
+ ) -> Iterator[DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList]:
+ """
+ `Iterate through datapoints in chunks, for one or more time series. `_
+
+ Note:
+ Control memory usage by specifying ``chunk_size_time_series``, how many time series to iterate simultaneously and ``chunk_size_datapoints``,
+ how many datapoints to yield per iteration (per individual time series). See full example in examples. Note that in order to make efficient
+ use of the API request limits, this method will never hold less than 100k datapoints in memory at a time, per time series.
+
+ If you run with memory constraints, use ``return_arrays=True`` (the default).
+
+ No empty chunk is ever returned.
+
+ Args:
+ queries (DatapointsQuery | Sequence[DatapointsQuery]): Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating.
+ chunk_size_datapoints (int): The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000.
+ chunk_size_time_series (int | None): The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time).
+ return_arrays (bool): Whether to return the datapoints as numpy arrays. Default: True.
+
+ Yields:
+ DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList: If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for.
+
+ Examples:
+
+ Iterate through the datapoints of a single time series with external_id="foo", in chunks of 25k:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import DatapointsQuery
+ >>> client = CogniteClient()
+ >>> query = DatapointsQuery(external_id="foo", start="2w-ago")
+ >>> for chunk in client.time_series.data(query, chunk_size_datapoints=25_000):
+ ... pass # do something with the datapoints chunk
+
+ Iterate through datapoints from multiple time series, and do not return them as memory-efficient numpy arrays.
+ As one or more time series get exhausted (no more data), they are no longer part of the returned "chunk list".
+ Note that the order is still preserved (for the remaining).
+
+ If you run with ``chunk_size_time_series=None``, an easy way to check when a time series is exhausted is to
+ use the ``.get`` method, as illustrated below:
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> queries = [
+ ... DatapointsQuery(id=123),
+ ... DatapointsQuery(external_id="foo"),
+ ... DatapointsQuery(instance_id=NodeId("my-space", "my-ts-xid"))
+ ... ]
+ >>> for chunk_lst in client.time_series.data(query, return_arrays=False):
+ ... if chunk_lst.get(id=123) is None:
+ ... print("Time series with id=123 has no more datapoints!")
+
+ A likely use case for iterating datapoints is to clone data from one project to another, while keeping a low memory
+ footprint and without having to write very custom logic involving count aggregates (which won't work for string data)
+ or do time-domain splitting yourself.
+
+ Here's an example of how to do so efficiently, while including bad- and uncertain data (``ignore_bad_datapoints=False``) and
+ copying status codes (``include_status=True``). This is automatically taken care of when the Datapoints(-Array) objects are passed
+ directly to an insert method. The only assumption below is that the time series have already been created in the target project.
+
+ >>> from cognite.client.utils import MIN_TIMESTAMP_MS, MAX_TIMESTAMP_MS
+ >>> target_client = CogniteClient()
+ >>> ts_to_copy = client.time_series.list(data_set_external_ids="my-use-case")
+ >>> queries = [
+ ... DatapointsQuery(
+ ... external_id=ts.external_id,
+ ... include_status=True,
+ ... ignore_bad_datapoints=False,
+ ... start=MIN_TIMESTAMP_MS,
+ ... end=MAX_TIMESTAMP_MS + 1, # end is exclusive
+ ... )
+ ... for ts in ts_to_copy
+ ... ]
+ >>> for dps_chunk in client.time_series.data(
+ ... queries, # may be several thousand time series...
+ ... chunk_size_time_series=20, # control memory usage by specifying how many to iterate at a time
+ ... chunk_size_datapoints=100_000,
+ ... ):
+ ... target_client.time_series.data.insert_multiple(
+ ... [{"external_id": dps.external_id, "datapoints": dps} for dps in dps_chunk]
+ ... )
+ """
+ yield from SyncIterator(
+ self.__async_client.time_series.data(
+ queries=queries,
+ chunk_size_datapoints=chunk_size_datapoints,
+ chunk_size_time_series=chunk_size_time_series,
+ return_arrays=return_arrays,
+ )
+ )
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: int | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: Sequence[int | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ external_id: str | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ external_id: SequenceNotStr[str | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ instance_id: NodeId | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ instance_id: Sequence[NodeId | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery],
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery],
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery],
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery],
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery],
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsList: ...
+
+ def retrieve(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None,
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None,
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> Datapoints | DatapointsList | None:
+ """
+ `Retrieve datapoints for one or more time series. `_
+
+ **Performance guide**:
+ In order to retrieve millions of datapoints as efficiently as possible, here are a few guidelines:
+
+ 1. Make *one* call to retrieve and fetch all time series in go, rather than making multiple calls (if your memory allows it). The SDK will optimize retrieval strategy for you!
+ 2. For best speed, and significantly lower memory usage, consider using ``retrieve_arrays(...)`` which uses ``numpy.ndarrays`` for data storage.
+ 3. Unlimited queries (``limit=None``) are most performant as they are always fetched in parallel, for any number of requested time series, even one.
+ 4. Limited queries, (e.g. ``limit=500_000``) are much less performant, at least for large limits, as each individual time series is fetched serially (we can't predict where on the timeline the datapoints are). Thus parallelisation is only used when asking for multiple "limited" time series.
+ 5. Try to avoid specifying `start` and `end` to be very far from the actual data: If you have data from 2000 to 2015, don't use start=0 (1970).
+ 6. Using ``timezone`` and/or calendar granularities like month/quarter/year in aggregate queries comes at a penalty as they are expensive for the API to compute.
+
+ Warning:
+ When using the AsyncCogniteClient, always ``await`` the result of this method and never run multiple calls concurrently (e.g. using asyncio.gather).
+ You can pass as many queries as you like to a single call, and the SDK will optimize the retrieval strategy for you intelligently.
+
+ Tip:
+ To read datapoints efficiently, while keeping a low memory footprint e.g. to copy from one project to another, check out :py:meth:`~DatapointsAPI.__call__`.
+ It allows you to iterate through datapoints in chunks, and also control how many time series to iterate at the same time.
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Args:
+ id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below.
+ external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below.
+ instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids.
+ start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
+ end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
+ aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
+ include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
+ ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+
+ Returns:
+ Datapoints | DatapointsList | None: A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+
+ Examples:
+
+ You can specify the identifiers of the datapoints you wish to retrieve in a number of ways. In this example
+ we are using the time-ago format, ``"2w-ago"`` to get raw data for the time series with id=42 from 2 weeks ago up until now.
+ You can also use the time-ahead format, like ``"3d-ahead"``, to specify a relative time in the future.
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> dps = client.time_series.data.retrieve(id=42, start="2w-ago")
+ >>> # You can also use instance_id:
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> dps = client.time_series.data.retrieve(instance_id=NodeId("ts-space", "foo"))
+
+ Although raw datapoints are returned by default, you can also get aggregated values, such as `max` or `average`. You may also fetch more than one time series simultaneously. Here we are
+ getting daily averages and maximum values for all of 2018, for two different time series, where we're specifying `start` and `end` as integers
+ (milliseconds after epoch). In the below example, we fetch them using their external ids:
+
+ >>> dps_lst = client.time_series.data.retrieve(
+ ... external_id=["foo", "bar"],
+ ... start=1514764800000,
+ ... end=1546300800000,
+ ... aggregates=["max", "average"],
+ ... granularity="1d")
+
+ In the two code examples above, we have a `dps` object (an instance of ``Datapoints``), and a `dps_lst` object (an instance of ``DatapointsList``).
+ On `dps`, which in this case contains raw datapoints, you may access the underlying data directly by using the `.value` attribute. This works for
+ both numeric and string (raw) datapoints, but not aggregates - they must be accessed by their respective names, because you're allowed to fetch
+ all available aggregates simultaneously, and they are stored on the same object:
+
+ >>> raw_data = dps.value
+ >>> first_dps = dps_lst[0] # optionally: `dps_lst.get(external_id="foo")`
+ >>> avg_data = first_dps.average
+ >>> max_data = first_dps.max
+
+ You may also slice a ``Datapoints`` object (you get ``Datapoints`` back), or ask for "a row of data" at a single index in same way you would do with a
+ built-in `list` (you get a `Datapoint` object back, note the singular name). You'll also get `Datapoint` objects when iterating through a ``Datapoints``
+ object, but this should generally be avoided (consider this a performance warning):
+
+ >>> dps_slice = dps[-10:] # Last ten values
+ >>> dp = dps[3] # The third value
+ >>> for dp in dps_slice:
+ ... pass # do something!
+
+ All parameters can be individually set if you use and pass ``DatapointsQuery`` objects (even ``ignore_unknown_ids``, contrary to the API).
+ If you also pass top-level parameters, these will be overruled by the individual parameters (where both exist, so think of these as defaults).
+ You are free to mix any kind of ids and external ids: Single identifiers, single DatapointsQuery objects and (mixed) lists of these.
+
+ Let's say you want different aggregates and end-times for a few time series (when only fetching a single aggregate, you may pass
+ the string directly for convenience):
+
+ >>> from cognite.client.data_classes import DatapointsQuery
+ >>> dps_lst = client.time_series.data.retrieve(
+ ... id=[
+ ... DatapointsQuery(id=42, end="1d-ago", aggregates="average"),
+ ... DatapointsQuery(id=69, end="2d-ahead", aggregates=["average"]),
+ ... DatapointsQuery(id=96, end="3d-ago", aggregates=["min", "max", "count"]),
+ ... ],
+ ... external_id=DatapointsQuery(external_id="foo", aggregates="max"),
+ ... start="5d-ago",
+ ... granularity="1h")
+
+ Certain aggregates are very useful when they follow the calendar, for example electricity consumption per day, week, month
+ or year. You may request such calendar-based aggregates in a specific timezone to make them even more useful: daylight savings (DST)
+ will be taken care of automatically and the datapoints will be aligned to the timezone. Note: Calendar granularities and timezone
+ can be used independently. To get monthly local aggregates in Oslo, Norway you can do:
+
+ >>> dps = client.time_series.data.retrieve(
+ ... id=123,
+ ... aggregates="sum",
+ ... granularity="1month",
+ ... timezone="Europe/Oslo")
+
+ When requesting multiple time series, an easy way to get the datapoints of a specific one is to use the `.get` method
+ on the returned ``DatapointsList`` object, then specify if you want `id` or `external_id`. Note: If you fetch a time series
+ by using `id`, you can still access it with its `external_id` (and the opposite way around), if you know it:
+
+ >>> from datetime import datetime, timezone
+ >>> utc = timezone.utc
+ >>> dps_lst = client.time_series.data.retrieve(
+ ... start=datetime(1907, 10, 14, tzinfo=utc),
+ ... end=datetime(1907, 11, 6, tzinfo=utc),
+ ... id=[42, 43, 44, ..., 499, 500],
+ ... )
+ >>> ts_350 = dps_lst.get(id=350) # ``Datapoints`` object
+
+ ...but what happens if you request some duplicate ids or external_ids? In this example we will show how to get data from
+ multiple disconnected periods. Let's say you're tasked to train a machine learning model to recognize a specific failure mode
+ of a system, and you want the training data to only be from certain periods (when an alarm was on/high). Assuming these alarms
+ are stored as events in CDF, with both start- and end times, we can use these directly in the query.
+
+ After fetching, the `.get` method will return a list of ``Datapoints`` instead, (assuming we have more than one event) in the
+ same order, similar to how slicing works with non-unique indices on Pandas DataFrames:
+
+ >>> periods = client.events.list(type="alarm", subtype="pressure")
+ >>> sensor_xid = "foo-pressure-bar"
+ >>> dps_lst = client.time_series.data.retrieve(
+ ... id=[42, 43, 44],
+ ... external_id=[
+ ... DatapointsQuery(external_id=sensor_xid, start=ev.start_time, end=ev.end_time)
+ ... for ev in periods
+ ... ])
+ >>> ts_44 = dps_lst.get(id=44) # Single ``Datapoints`` object
+ >>> ts_lst = dps_lst.get(external_id=sensor_xid) # List of ``len(periods)`` ``Datapoints`` objects
+
+ The API has an endpoint to :py:meth:`~DatapointsAPI.retrieve_latest`, i.e. "before", but not "after". Luckily, we can emulate that behaviour easily.
+ Let's say we have a very dense time series and do not want to fetch all of the available raw data (or fetch less precise
+ aggregate data), just to get the very first datapoint of every month (from e.g. the year 2000 through 2010):
+
+ >>> import itertools
+ >>> month_starts = [
+ ... datetime(year, month, 1, tzinfo=utc)
+ ... for year, month in itertools.product(range(2000, 2011), range(1, 13))]
+ >>> dps_lst = client.time_series.data.retrieve(
+ ... external_id=[DatapointsQuery(external_id="foo", start=start) for start in month_starts],
+ ... limit=1)
+
+ To get *all* historic and future datapoints for a time series, e.g. to do a backup, you may want to import the two integer
+ constants: ``MIN_TIMESTAMP_MS`` and ``MAX_TIMESTAMP_MS``, to make sure you do not miss any. **Performance warning**: This pattern of
+ fetching datapoints from the entire valid time domain is slower and shouldn't be used for regular "day-to-day" queries:
+
+ >>> from cognite.client.utils import MIN_TIMESTAMP_MS, MAX_TIMESTAMP_MS
+ >>> dps_backup = client.time_series.data.retrieve(
+ ... id=123,
+ ... start=MIN_TIMESTAMP_MS,
+ ... end=MAX_TIMESTAMP_MS + 1) # end is exclusive
+
+ If you have a time series with 'unit_external_id' set, you can use the 'target_unit' parameter to convert the datapoints
+ to the desired unit. In the example below, we are converting temperature readings from a sensor measured and stored in Celsius,
+ to Fahrenheit (we're assuming that the time series has e.g. ``unit_external_id="temperature:deg_c"`` ):
+
+ >>> client.time_series.data.retrieve(
+ ... id=42, start="2w-ago", target_unit="temperature:deg_f")
+
+ Or alternatively, you can use the 'target_unit_system' parameter to convert the datapoints to the desired unit system:
+
+ >>> client.time_series.data.retrieve(
+ ... id=42, start="2w-ago", target_unit_system="Imperial")
+
+ To retrieve status codes for a time series, pass ``include_status=True``. This is only possible for raw datapoint queries.
+ You would typically also pass ``ignore_bad_datapoints=False`` to not hide all the datapoints that are marked as uncertain or bad,
+ which is the API's default behaviour. You may also use ``treat_uncertain_as_bad`` to control how uncertain values are interpreted.
+
+ >>> dps = client.time_series.data.retrieve(
+ ... id=42, include_status=True, ignore_bad_datapoints=False)
+ >>> dps.status_code # list of integer codes, e.g.: [0, 1073741824, 2147483648]
+ >>> dps.status_symbol # list of symbolic representations, e.g. [Good, Uncertain, Bad]
+
+ There are six aggregates directly related to status codes, three for count: 'count_good', 'count_uncertain' and 'count_bad', and
+ three for duration: 'duration_good', 'duration_uncertain' and 'duration_bad'. These may be fetched as any other aggregate.
+ It is important to note that status codes may influence how other aggregates are computed: Aggregates will in general omit the
+ time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good
+ datapoint will be considered constant. To put simply, what 'average' may return depends on your setting for 'ignore_bad_datapoints'
+ and 'treat_uncertain_as_bad' (in the presence of uncertain/bad datapoints).
+ """
+ return run_sync(
+ self.__async_client.time_series.data.retrieve(
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ start=start,
+ end=end,
+ aggregates=aggregates,
+ granularity=granularity,
+ timezone=timezone,
+ target_unit=target_unit,
+ target_unit_system=target_unit_system,
+ limit=limit,
+ include_outside_points=include_outside_points,
+ ignore_unknown_ids=ignore_unknown_ids,
+ include_status=include_status,
+ ignore_bad_datapoints=ignore_bad_datapoints,
+ treat_uncertain_as_bad=treat_uncertain_as_bad,
+ )
+ )
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ id: int | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArray | None: ...
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ id: Sequence[int | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArrayList: ...
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ external_id: str | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArray | None: ...
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ external_id: SequenceNotStr[str | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArrayList: ...
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ instance_id: NodeId | DatapointsQuery,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArray | None: ...
+
+ @overload
+ def retrieve_arrays(
+ self,
+ *,
+ instance_id: Sequence[NodeId | DatapointsQuery],
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArrayList: ...
+
+ def retrieve_arrays(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None,
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None,
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> DatapointsArray | DatapointsArrayList | None:
+ """
+ `Retrieve datapoints for one or more time series. `_
+
+ Note:
+ This method requires ``numpy`` to be installed.
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Args:
+ id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below.
+ external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below.
+ instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids.
+ start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
+ end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
+ aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
+ include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
+ ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+
+ Returns:
+ DatapointsArray | DatapointsArrayList | None: A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+
+ Note:
+ For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments.
+
+ When retrieving raw datapoints with ``ignore_bad_datapoints=False``, bad datapoints with the value NaN can not be distinguished from those
+ missing a value (due to being stored in a numpy array). To solve this, all missing values have their timestamp recorded in a set you may access:
+ ``dps.null_timestamps``. If you chose to pass a ``DatapointsArray`` to an insert method, this will be inspected automatically to replicate correctly
+ (inserting status codes will soon be supported).
+
+ Examples:
+
+ Get weekly ``min`` and ``max`` aggregates for a time series with id=42 since the year 2000, then compute the range of values:
+
+ >>> from cognite.client import CogniteClient
+ >>> from datetime import datetime, timezone
+ >>> client = CogniteClient()
+ >>> dps = client.time_series.data.retrieve_arrays(
+ ... id=42,
+ ... start=datetime(2020, 1, 1, tzinfo=timezone.utc),
+ ... aggregates=["min", "max"],
+ ... granularity="7d")
+ >>> weekly_range = dps.max - dps.min
+
+ Get up-to 2 million raw datapoints for the last 48 hours for a noisy time series with external_id="ts-noisy",
+ then use a small and wide moving average filter to smooth it out:
+
+ >>> import numpy as np
+ >>> dps = client.time_series.data.retrieve_arrays(
+ ... external_id="ts-noisy",
+ ... start="2d-ago",
+ ... limit=2_000_000)
+ >>> smooth = np.convolve(dps.value, np.ones(5) / 5) # doctest: +SKIP
+ >>> smoother = np.convolve(dps.value, np.ones(20) / 20) # doctest: +SKIP
+
+ Get raw datapoints for multiple time series, that may or may not exist, from the last 2 hours, then find the
+ largest gap between two consecutive values for all time series, also taking the previous value into account (outside point).
+
+ >>> id_lst = [42, 43, 44]
+ >>> dps_lst = client.time_series.data.retrieve_arrays(
+ ... id=id_lst,
+ ... start="2h-ago",
+ ... include_outside_points=True,
+ ... ignore_unknown_ids=True)
+ >>> largest_gaps = [np.max(np.diff(dps.timestamp)) for dps in dps_lst]
+
+ Get raw datapoints for a time series with external_id="bar" from the last 10 weeks, then convert to a ``pandas.Series``
+ (you can of course also use the ``to_pandas()`` convenience method if you want a ``pandas.DataFrame``):
+
+ >>> import pandas as pd
+ >>> dps = client.time_series.data.retrieve_arrays(external_id="bar", start="10w-ago")
+ >>> series = pd.Series(dps.value, index=dps.timestamp)
+ """
+ return run_sync(
+ self.__async_client.time_series.data.retrieve_arrays(
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ start=start,
+ end=end,
+ aggregates=aggregates,
+ granularity=granularity,
+ timezone=timezone,
+ target_unit=target_unit,
+ target_unit_system=target_unit_system,
+ limit=limit,
+ include_outside_points=include_outside_points,
+ ignore_unknown_ids=ignore_unknown_ids,
+ include_status=include_status,
+ ignore_bad_datapoints=ignore_bad_datapoints,
+ treat_uncertain_as_bad=treat_uncertain_as_bad,
+ )
+ )
+
+ def retrieve_dataframe(
+ self,
+ *,
+ id: None | int | DatapointsQuery | Sequence[int | DatapointsQuery] = None,
+ external_id: None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery] = None,
+ instance_id: None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery] = None,
+ start: int | str | datetime.datetime | None = None,
+ end: int | str | datetime.datetime | None = None,
+ aggregates: Aggregate | str | list[Aggregate | str] | None = None,
+ granularity: str | None = None,
+ timezone: str | datetime.timezone | ZoneInfo | None = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ limit: int | None = None,
+ include_outside_points: bool = False,
+ ignore_unknown_ids: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ uniform_index: bool = False,
+ include_status: bool = False,
+ include_unit: bool = True,
+ include_aggregate_name: bool = True,
+ include_granularity_name: bool = False,
+ ) -> pd.DataFrame:
+ """
+ Get datapoints directly in a pandas dataframe.
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Note:
+ For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments.
+
+ Args:
+ id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, DatapointsQuery or (mixed) sequence of these. See examples.
+ external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, DatapointsQuery or (mixed) sequence of these. See examples.
+ instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id, DatapointsQuery or (mixed) sequence of these. See examples.
+ start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
+ end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
+ aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
+ ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+ uniform_index (bool): If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False
+ include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex).
+ include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level)
+ include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level)
+ include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level)
+
+ Returns:
+ pd.DataFrame: A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max").
+
+ Tip:
+ Pandas DataFrames have one shared index, so when you fetch datapoints from multiple time series, the final index will be
+ the union of all the timestamps. Thus, unless all time series have the exact same timestamps, the various columns will contain
+ NaNs to fill the "missing" values. For lower memory usage on unaligned data, use the :py:meth:`~DatapointsAPI.retrieve_arrays` method.
+
+ Warning:
+ If you have duplicated time series in your query, the dataframe columns will also contain duplicates.
+
+ When retrieving raw datapoints with ``ignore_bad_datapoints=False``, bad datapoints with the value NaN can not be distinguished from those
+ missing a value (due to being stored in a numpy array); all will become NaNs in the dataframe.
+
+ Examples:
+
+ Get a pandas dataframe using a single id, and use this id as column name, with no more than 100 datapoints:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> df = client.time_series.data.retrieve_dataframe(
+ ... id=12345,
+ ... start="2w-ago",
+ ... end="now",
+ ... limit=100,
+ ... column_names="id")
+
+ Get the pandas dataframe with a uniform index (fixed spacing between points) of 1 day, for two time series with
+ individually specified aggregates, from 1990 through 2020:
+
+ >>> from datetime import datetime, timezone
+ >>> from cognite.client.data_classes import DatapointsQuery
+ >>> df = client.time_series.data.retrieve_dataframe(
+ ... external_id=[
+ ... DatapointsQuery(external_id="foo", aggregates="discrete_variance"),
+ ... DatapointsQuery(external_id="bar", aggregates=["total_variation", "continuous_variance"]),
+ ... ],
+ ... granularity="1d",
+ ... start=datetime(1990, 1, 1, tzinfo=timezone.utc),
+ ... end=datetime(2020, 12, 31, tzinfo=timezone.utc),
+ ... uniform_index=True)
+
+ Get a pandas dataframe containing the 'average' aggregate for two time series using a monthly granularity,
+ starting Jan 1, 1970 all the way up to present, without having the aggregate name in the columns:
+
+ >>> df = client.time_series.data.retrieve_dataframe(
+ ... external_id=["foo", "bar"],
+ ... aggregates="average",
+ ... granularity="1mo",
+ ... include_aggregate_name=False)
+
+ You may also use ``pandas.Timestamp`` to define start and end. Here we fetch using instance_id:
+
+ >>> import pandas as pd
+ >>> df = client.time_series.data.retrieve_dataframe(
+ ... instance_id=NodeId("my-space", "my-ts-xid"),
+ ... start=pd.Timestamp("2023-01-01"),
+ ... end=pd.Timestamp("2023-02-01"))
+ """
+ return run_sync(
+ self.__async_client.time_series.data.retrieve_dataframe(
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ start=start,
+ end=end,
+ aggregates=aggregates,
+ granularity=granularity,
+ timezone=timezone,
+ target_unit=target_unit,
+ target_unit_system=target_unit_system,
+ limit=limit,
+ include_outside_points=include_outside_points,
+ ignore_unknown_ids=ignore_unknown_ids,
+ ignore_bad_datapoints=ignore_bad_datapoints,
+ treat_uncertain_as_bad=treat_uncertain_as_bad,
+ uniform_index=uniform_index,
+ include_status=include_status,
+ include_unit=include_unit,
+ include_aggregate_name=include_aggregate_name,
+ include_granularity_name=include_granularity_name,
+ )
+ )
+
+ @overload
+ def retrieve_latest(
+ self,
+ id: int | LatestDatapointQuery,
+ *,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ id: Sequence[int | LatestDatapointQuery],
+ *,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ id: int | LatestDatapointQuery,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ id: Sequence[int | LatestDatapointQuery],
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ external_id: str | LatestDatapointQuery,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ external_id: SequenceNotStr[str | LatestDatapointQuery],
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ instance_id: NodeId | LatestDatapointQuery,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> Datapoints | None: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ instance_id: Sequence[NodeId | LatestDatapointQuery],
+ external_id: None = None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None,
+ external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None,
+ instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None,
+ external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None,
+ instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ @overload
+ def retrieve_latest(
+ self,
+ *,
+ external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None,
+ instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> DatapointsList: ...
+
+ def retrieve_latest(
+ self,
+ id: int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None = None,
+ external_id: str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None = None,
+ instance_id: NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None = None,
+ before: None | int | str | datetime.datetime = None,
+ target_unit: str | None = None,
+ target_unit_system: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ignore_unknown_ids: bool = False,
+ ) -> Datapoints | DatapointsList | None:
+ """
+ `Get the latest datapoint for one or more time series `_
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Args:
+ id (int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None): Id or list of ids.
+ external_id (str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None): External id or list of external ids.
+ instance_id (NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None): Instance id or list of instance ids.
+ before (None | int | str | datetime.datetime): Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'.
+ target_unit (str | None): The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system (str | None): The unit system of the datapoint returned. Cannot be used with target_unit.
+ include_status (bool): Also return the status code, an integer, for each datapoint in the response.
+ ignore_bad_datapoints (bool): Prevent datapoints with a bad status code to be returned. Default: True.
+ treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True.
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ Datapoints | DatapointsList | None: A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+
+ Examples:
+
+ Getting the latest datapoint in a time series. This method returns a Datapoints object, so the datapoint
+ (if it exists) will be the first element:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.time_series.data.retrieve_latest(id=1)[0]
+
+ You can also use external_id or instance_id; single identifier or list of identifiers:
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> res = client.time_series.data.retrieve_latest(
+ ... external_id=["foo", "bar"],
+ ... instance_id=NodeId("my-space", "my-ts-xid"))
+
+ You can also get the first datapoint before a specific time:
+
+ >>> res = client.time_series.data.retrieve_latest(id=1, before="2d-ago")[0]
+
+ You can also get the first datapoint before a specific time in the future e.g. forecast data:
+
+ >>> res = client.time_series.data.retrieve_latest(id=1, before="2d-ahead")[0]
+
+ You can also retrieve the datapoint in a different unit or unit system:
+
+ >>> res = client.time_series.data.retrieve_latest(id=1, target_unit="temperature:deg_f")[0]
+ >>> res = client.time_series.data.retrieve_latest(id=1, target_unit_system="Imperial")[0]
+
+ You may also pass an instance of LatestDatapointQuery:
+
+ >>> from cognite.client.data_classes import LatestDatapointQuery
+ >>> res = client.time_series.data.retrieve_latest(id=LatestDatapointQuery(id=1, before=60_000))[0]
+
+ If you need the latest datapoint for multiple time series, simply give a list of ids. Note that we are
+ using external ids here, but either will work:
+
+ >>> res = client.time_series.data.retrieve_latest(external_id=["abc", "def"])
+ >>> latest_abc = res[0][0]
+ >>> latest_def = res[1][0]
+
+ If you for example need to specify a different value of 'before' for each time series, you may pass several
+ LatestDatapointQuery objects. These will override any parameter passed directly to the function and also allows
+ for individual customisation of 'target_unit', 'target_unit_system', 'include_status', 'ignore_bad_datapoints'
+ and 'treat_uncertain_as_bad'.
+
+ >>> from datetime import datetime, timezone
+ >>> id_queries = [
+ ... 123,
+ ... LatestDatapointQuery(id=456, before="1w-ago"),
+ ... LatestDatapointQuery(id=789, before=datetime(2018,1,1, tzinfo=timezone.utc)),
+ ... LatestDatapointQuery(id=987, target_unit="temperature:deg_f")]
+ >>> ext_id_queries = [
+ ... "foo",
+ ... LatestDatapointQuery(external_id="abc", before="3h-ago", target_unit_system="Imperial"),
+ ... LatestDatapointQuery(external_id="def", include_status=True),
+ ... LatestDatapointQuery(external_id="ghi", treat_uncertain_as_bad=False),
+ ... LatestDatapointQuery(external_id="jkl", include_status=True, ignore_bad_datapoints=False)]
+ >>> res = client.time_series.data.retrieve_latest(
+ ... id=id_queries, external_id=ext_id_queries)
+ """
+ return run_sync(
+ self.__async_client.time_series.data.retrieve_latest(
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ before=before,
+ target_unit=target_unit,
+ target_unit_system=target_unit_system,
+ include_status=include_status,
+ ignore_bad_datapoints=ignore_bad_datapoints,
+ treat_uncertain_as_bad=treat_uncertain_as_bad,
+ ignore_unknown_ids=ignore_unknown_ids,
+ )
+ )
+
+ def insert(
+ self,
+ datapoints: Datapoints
+ | DatapointsArray
+ | Sequence[dict[str, int | float | str | datetime.datetime]]
+ | Sequence[
+ tuple[int | float | datetime.datetime, int | float | str]
+ | tuple[int | float | datetime.datetime, int | float | str, int]
+ ],
+ id: int | None = None,
+ external_id: str | None = None,
+ instance_id: NodeId | None = None,
+ ) -> None:
+ """
+ Insert datapoints into a time series
+
+ Timestamps can be represented as milliseconds since epoch or datetime objects. Note that naive datetimes
+ are interpreted to be in the local timezone (not UTC), adhering to Python conventions for datetime handling.
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Args:
+ datapoints (Datapoints | DatapointsArray | Sequence[dict[str, int | float | str | datetime.datetime]] | Sequence[tuple[int | float | datetime.datetime, int | float | str] | tuple[int | float | datetime.datetime, int | float | str, int]]): The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below.
+ id (int | None): Id of time series to insert datapoints into.
+ external_id (str | None): External id of time series to insert datapoint into.
+ instance_id (NodeId | None): Instance ID of time series to insert datapoints into.
+
+ Note:
+ All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass
+ either the status code (int) or status symbol (str). Only one of code and symbol is required. If both are given,
+ they must match or an API error will be raised.
+
+ Datapoints marked bad can take on any of the following values: None (missing), NaN, and +/- Infinity. It is also not
+ restricted by the normal numeric range [-1e100, 1e100] (i.e. can be any valid float64).
+
+ Examples:
+
+ Your datapoints can be a list of tuples where the first element is the timestamp and the second element is the value.
+ The third element is optional and may contain the status code for the datapoint. To pass by symbol, a dictionary must be used.
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import StatusCode
+ >>> from datetime import datetime, timezone
+ >>> client = CogniteClient()
+ >>> datapoints = [
+ ... (datetime(2018,1,1, tzinfo=timezone.utc), 1000),
+ ... (datetime(2018,1,2, tzinfo=timezone.utc), 2000, StatusCode.Good),
+ ... (datetime(2018,1,3, tzinfo=timezone.utc), 3000, StatusCode.Uncertain),
+ ... (datetime(2018,1,4, tzinfo=timezone.utc), None, StatusCode.Bad),
+ ... ]
+ >>> client.time_series.data.insert(datapoints, id=1)
+
+ The timestamp can be given by datetime as above, or in milliseconds since epoch. Status codes can also be
+ passed as normal integers; this is necessary if a subcategory or modifier flag is needed, e.g. 3145728: 'GoodClamped':
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> datapoints = [
+ ... (150000000000, 1000),
+ ... (160000000000, 2000, 3145728),
+ ... (170000000000, 2000, 2147483648), # Same as StatusCode.Bad
+ ... ]
+ >>> client.time_series.data.insert(datapoints, instance_id=NodeId("my-space", "my-ts-xid"))
+
+ Or they can be a list of dictionaries:
+
+ >>> import math
+ >>> datapoints = [
+ ... {"timestamp": 150000000000, "value": 1000},
+ ... {"timestamp": 160000000000, "value": 2000},
+ ... {"timestamp": 170000000000, "value": 3000, "status": {"code": 0}},
+ ... {"timestamp": 180000000000, "value": 4000, "status": {"symbol": "Uncertain"}},
+ ... {"timestamp": 190000000000, "value": math.nan, "status": {"code": StatusCode.Bad, "symbol": "Bad"}},
+ ... ]
+ >>> client.time_series.data.insert(datapoints, external_id="abcd")
+
+ Or they can be a Datapoints or DatapointsArray object (with raw datapoints only). Note that the id or external_id
+ set on these objects are not inspected/used (as they belong to the "from-time-series", and not the "to-time-series"),
+ and so you must explicitly pass the identifier of the time series you want to insert into, which in this example is
+ `external_id="foo"`.
+
+ If the Datapoints or DatapointsArray are fetched with status codes, these will be automatically used in the insert:
+
+ >>> data = client.time_series.data.retrieve(
+ ... external_id="abc",
+ ... start="1w-ago",
+ ... end="now",
+ ... include_status=True,
+ ... ignore_bad_datapoints=False,
+ ... )
+ >>> client.time_series.data.insert(data, external_id="foo")
+ """
+ return run_sync(
+ self.__async_client.time_series.data.insert(
+ datapoints=datapoints, id=id, external_id=external_id, instance_id=instance_id
+ )
+ )
+
+ def insert_multiple(
+ self, datapoints: list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]
+ ) -> None:
+ """
+ `Insert datapoints into multiple time series `_
+
+ Timestamps can be represented as milliseconds since epoch or datetime objects. Note that naive datetimes
+ are interpreted to be in the local timezone (not UTC), adhering to Python conventions for datetime handling.
+
+ Time series support status codes like Good, Uncertain and Bad. You can read more in the Cognite Data Fusion developer documentation on
+ `status codes. `_
+
+ Args:
+ datapoints (list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]): The datapoints you wish to insert along with the ids of the time series. See examples below.
+
+ Note:
+ All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass
+ either the status code (int) or status symbol (str). Only one of code and symbol is required. If both are given,
+ they must match or an API error will be raised.
+
+ Datapoints marked bad can take on any of the following values: None (missing), NaN, and +/- Infinity. It is also not
+ restricted by the normal numeric range [-1e100, 1e100] (i.e. can be any valid float64).
+
+ Examples:
+
+ Your datapoints can be a list of dictionaries, each containing datapoints for a different (presumably) time series. These dictionaries
+ must have the key "datapoints" (containing the data) specified as a ``Datapoints`` object, a ``DatapointsArray`` object, or list of either
+ tuples `(timestamp, value)` or dictionaries, `{"timestamp": ts, "value": value}`.
+
+ When passing tuples, the third element is optional and may contain the status code for the datapoint. To pass by symbol, a dictionary must be used.
+
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> from cognite.client.data_classes import StatusCode
+ >>> from datetime import datetime, timezone
+ >>> client = CogniteClient()
+ >>> to_insert = [
+ ... {"id": 1, "datapoints": [
+ ... (datetime(2018,1,1, tzinfo=timezone.utc), 1000),
+ ... (datetime(2018,1,2, tzinfo=timezone.utc), 2000, StatusCode.Good)],
+ ... },
+ ... {"external_id": "foo", "datapoints": [
+ ... (datetime(2018,1,3, tzinfo=timezone.utc), 3000),
+ ... (datetime(2018,1,4, tzinfo=timezone.utc), 4000, StatusCode.Uncertain)],
+ ... },
+ ... {"instance_id": NodeId("my-space", "my-ts-xid"), "datapoints": [
+ ... (datetime(2018,1,5, tzinfo=timezone.utc), 5000),
+ ... (datetime(2018,1,6, tzinfo=timezone.utc), None, StatusCode.Bad)],
+ ... }
+ ... ]
+
+ Passing datapoints using the dictionary format with timestamp given in milliseconds since epoch:
+
+ >>> import math
+ >>> to_insert.append(
+ ... {"external_id": "bar", "datapoints": [
+ ... {"timestamp": 170000000, "value": 7000},
+ ... {"timestamp": 180000000, "value": 8000, "status": {"symbol": "Uncertain"}},
+ ... {"timestamp": 190000000, "value": None, "status": {"code": StatusCode.Bad}},
+ ... {"timestamp": 200000000, "value": math.inf, "status": {"code": StatusCode.Bad, "symbol": "Bad"}},
+ ... ]})
+
+ If the Datapoints or DatapointsArray are fetched with status codes, these will be automatically used in the insert:
+
+ >>> data_to_clone = client.time_series.data.retrieve(
+ ... external_id="bar", include_status=True, ignore_bad_datapoints=False)
+ >>> to_insert.append({"external_id": "bar-clone", "datapoints": data_to_clone})
+ >>> client.time_series.data.insert_multiple(to_insert)
+ """
+ return run_sync(self.__async_client.time_series.data.insert_multiple(datapoints=datapoints))
+
+ def delete_range(
+ self,
+ start: int | str | datetime.datetime,
+ end: int | str | datetime.datetime,
+ id: int | None = None,
+ external_id: str | None = None,
+ instance_id: NodeId | None = None,
+ ) -> None:
+ """
+ Delete a range of datapoints from a time series.
+
+ Args:
+ start (int | str | datetime.datetime): Inclusive start of delete range
+ end (int | str | datetime.datetime): Exclusive end of delete range
+ id (int | None): Id of time series to delete data from
+ external_id (str | None): External id of time series to delete data from
+ instance_id (NodeId | None): Instance ID of time series to delete data from
+
+ Examples:
+
+ Deleting the last week of data from a time series:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.time_series.data.delete_range(start="1w-ago", end="now", id=1)
+
+ Deleting the data from now until 2 days in the future from a time series containing e.g. forecasted data:
+
+ >>> client.time_series.data.delete_range(start="now", end="2d-ahead", id=1)
+ """
+ return run_sync(
+ self.__async_client.time_series.data.delete_range(
+ start=start, end=end, id=id, external_id=external_id, instance_id=instance_id
+ )
+ )
+
+ def delete_ranges(self, ranges: list[dict[str, Any]]) -> None:
+ """
+ `Delete a range of datapoints from multiple time series. `_
+
+ Args:
+ ranges (list[dict[str, Any]]): The list of datapoint ids along with time range to delete. See examples below.
+
+ Examples:
+
+ Each element in the list ranges must be specify either id or external_id, and a range:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> ranges = [{"id": 1, "start": "2d-ago", "end": "now"},
+ ... {"external_id": "abc", "start": "2d-ago", "end": "2d-ahead"}]
+ >>> client.time_series.data.delete_ranges(ranges)
+ """
+ return run_sync(self.__async_client.time_series.data.delete_ranges(ranges=ranges))
+
+ def insert_dataframe(self, df: pd.DataFrame, dropna: bool = True) -> None:
+ """
+ Insert a dataframe containing datapoints to one or more time series.
+
+ The index of the dataframe must contain the timestamps (pd.DatetimeIndex). The column identifiers
+ must contain the IDs (``int``), external IDs (``str``) or instance IDs (``NodeId`` or 2-tuple (space, ext. ID))
+ of the already existing time series to which the datapoints from that particular column will be written.
+
+ Note:
+ The column identifiers must be unique.
+
+ Args:
+ df (pd.DataFrame): Pandas DataFrame object containing the time series.
+ dropna (bool): Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True.
+
+ Warning:
+ You can not insert datapoints with status codes using this method (``insert_dataframe``), you'll need
+ to use the :py:meth:`~DatapointsAPI.insert` method instead (or :py:meth:`~DatapointsAPI.insert_multiple`)!
+
+ Examples:
+ Post a dataframe with white noise to three time series, one using ID, one using external id
+ and one using instance id:
+
+ >>> import numpy as np
+ >>> import pandas as pd
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> node_id = NodeId("my-space", "my-ts-xid")
+ >>> df = pd.DataFrame(
+ ... {
+ ... 123: np.random.normal(0, 1, 100),
+ ... "foo": np.random.normal(0, 1, 100),
+ ... node_id: np.random.normal(0, 1, 100),
+ ... },
+ ... index=pd.date_range(start="2018-01-01", periods=100, freq="1d")
+ ... )
+ >>> client.time_series.data.insert_dataframe(df)
+ """
+ return run_sync(self.__async_client.time_series.data.insert_dataframe(df=df, dropna=dropna))
diff --git a/cognite/client/_sync_api/datapoints_subscriptions.py b/cognite/client/_sync_api/datapoints_subscriptions.py
new file mode 100644
index 0000000000..b7f33333f3
--- /dev/null
+++ b/cognite/client/_sync_api/datapoints_subscriptions.py
@@ -0,0 +1,314 @@
+"""
+===============================================================================
+78f40e7a27949090a0e6c70356c9f300
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.datapoints_subscriptions import (
+ DatapointSubscription,
+ DatapointSubscriptionBatch,
+ DatapointSubscriptionList,
+ DataPointSubscriptionUpdate,
+ DataPointSubscriptionWrite,
+ TimeSeriesIDList,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDatapointsSubscriptionAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[DatapointSubscription]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[DatapointSubscriptionList]: ...
+
+ def __call__(
+ self, chunk_size: int | None = None, limit: int | None = None
+ ) -> Iterator[DatapointSubscription | DatapointSubscriptionList]:
+ """
+ Iterate over all datapoint subscriptions.
+
+ Args:
+ chunk_size (int | None): The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time.
+ limit (int | None): Maximum number of items to return. Defaults to return all datapoint subscriptions.
+
+ Yields:
+ DatapointSubscription | DatapointSubscriptionList: Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions.
+ """
+ yield from SyncIterator(self.__async_client.time_series.subscriptions(chunk_size=chunk_size, limit=limit))
+
+ def create(self, subscription: DataPointSubscriptionWrite) -> DatapointSubscription:
+ """
+ `Create a subscription `_
+
+ Create a subscription that can be used to listen for changes in data points for a set of time series.
+
+ Args:
+ subscription (DataPointSubscriptionWrite): Subscription to create.
+
+ Returns:
+ DatapointSubscription: Created subscription
+
+ Examples:
+
+ Create a subscription with explicit time series IDs:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import DataPointSubscriptionWrite
+ >>> client = CogniteClient()
+ >>> sub = DataPointSubscriptionWrite(
+ ... external_id="my_subscription",
+ ... name="My subscription",
+ ... partition_count=1,
+ ... time_series_ids=["myFistTimeSeries", "mySecondTimeSeries"])
+ >>> created = client.time_series.subscriptions.create(sub)
+
+ Create a subscription with explicit time series IDs given as Node IDs
+ either from CogniteTimeSeries or an extension of CogniteTimeseries:
+
+ >>> from cognite.client.data_classes import DataPointSubscriptionWrite
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> sub = DataPointSubscriptionWrite(
+ ... external_id="my_subscription",
+ ... name="My subscription with Data Model Ids",
+ ... partition_count=1,
+ ... instance_ids=[NodeId("my_space", "myFistTimeSeries"), NodeId("my_space", "mySecondTimeSeries")])
+ >>> created = client.time_series.subscriptions.create(sub)
+
+ Create a filter defined subscription for all numeric time series that are stepwise:
+
+ >>> from cognite.client.data_classes import DataPointSubscriptionWrite
+ >>> from cognite.client.data_classes import filters as flt
+ >>> from cognite.client.data_classes.datapoints_subscriptions import DatapointSubscriptionProperty
+ >>> is_numeric_stepwise = flt.And(
+ ... flt.Equals(DatapointSubscriptionProperty.is_string, False),
+ ... flt.Equals(DatapointSubscriptionProperty.is_step, True))
+ >>> sub = DataPointSubscriptionWrite(
+ ... external_id="my_subscription",
+ ... name="My subscription for numeric, stepwise time series",
+ ... partition_count=1,
+ ... filter=is_numeric_stepwise)
+ >>> created = client.time_series.subscriptions.create(sub)
+ """
+ return run_sync(self.__async_client.time_series.subscriptions.create(subscription=subscription))
+
+ def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete subscription(s). This operation cannot be undone. `_
+
+ Args:
+ external_id (str | SequenceNotStr[str]): External ID or list of external IDs of subscriptions to delete.
+ ignore_unknown_ids (bool): Whether to ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Examples:
+
+ Delete a subscription by external ID:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.time_series.subscriptions.delete("my_subscription")
+ """
+ return run_sync(
+ self.__async_client.time_series.subscriptions.delete(
+ external_id=external_id, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def retrieve(self, external_id: str) -> DatapointSubscription | None:
+ """
+ `Retrieve one subscription by external ID. `_
+
+ Args:
+ external_id (str): External ID of the subscription to retrieve.
+
+ Returns:
+ DatapointSubscription | None: The requested subscription.
+
+ Examples:
+
+ Retrieve a subscription by external ID:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.time_series.subscriptions.retrieve("my_subscription")
+ """
+ return run_sync(self.__async_client.time_series.subscriptions.retrieve(external_id=external_id))
+
+ def list_member_time_series(self, external_id: str, limit: int | None = DEFAULT_LIMIT_READ) -> TimeSeriesIDList:
+ """
+ `List time series in a subscription `_
+
+ Retrieve a list of time series (IDs) that the subscription is currently retrieving updates from
+
+ Args:
+ external_id (str): External ID of the subscription to retrieve members of.
+ limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ TimeSeriesIDList: List of time series in the subscription.
+
+ Examples:
+
+ List time series in a subscription:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import DataPointSubscriptionUpdate
+ >>> client = CogniteClient()
+ >>> members = client.time_series.subscriptions.list_member_time_series("my_subscription")
+ >>> timeseries_external_ids = members.as_external_ids()
+ """
+ return run_sync(
+ self.__async_client.time_series.subscriptions.list_member_time_series(external_id=external_id, limit=limit)
+ )
+
+ def update(
+ self,
+ update: DataPointSubscriptionUpdate | DataPointSubscriptionWrite,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> DatapointSubscription:
+ """
+ `Update a subscriptions `_
+
+ Update a subscription. Note that Fields that are not included in the request are not changed.
+ Furthermore, the subscription partition cannot be changed.
+
+ Args:
+ update (DataPointSubscriptionUpdate | DataPointSubscriptionWrite): The subscription update.
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing.
+
+ Returns:
+ DatapointSubscription: Updated subscription.
+
+ Examples:
+
+ Change the name of a preexisting subscription:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import DataPointSubscriptionUpdate
+ >>> client = CogniteClient()
+ >>> update = DataPointSubscriptionUpdate("my_subscription").name.set("My New Name")
+ >>> updated = client.time_series.subscriptions.update(update)
+
+
+ Add a time series to a preexisting subscription:
+
+ >>> from cognite.client.data_classes import DataPointSubscriptionUpdate
+ >>> update = DataPointSubscriptionUpdate("my_subscription").time_series_ids.add(["MyNewTimeSeriesExternalId"])
+ >>> updated = client.time_series.subscriptions.update(update)
+ """
+ return run_sync(self.__async_client.time_series.subscriptions.update(update=update, mode=mode))
+
+ def iterate_data(
+ self,
+ external_id: str,
+ start: str | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ partition: int = 0,
+ poll_timeout: int = 5,
+ cursor: str | None = None,
+ include_status: bool = False,
+ ignore_bad_datapoints: bool = True,
+ treat_uncertain_as_bad: bool = True,
+ ) -> Iterator[DatapointSubscriptionBatch]:
+ """
+ `Iterate over data from a given subscription. `_
+
+ Data can be ingested datapoints and time ranges where data is deleted. This endpoint will also return changes to
+ the subscription itself, that is, if time series are added or removed from the subscription.
+
+ Warning:
+ This endpoint will store updates from when the subscription was created, but updates
+ older than 7 days may be discarded.
+
+ Args:
+ external_id (str): The external ID of the subscription.
+ start (str | None): When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None.
+ limit (int): Approximate number of results to return across all partitions.
+ partition (int): The partition to iterate over. Defaults to 0.
+ poll_timeout (int): How many seconds to wait for new data, until an empty response is sent. Defaults to 5.
+ cursor (str | None): Optional cursor to start iterating from.
+ include_status (bool): Also return the status code, an integer, for each datapoint in the response.
+ ignore_bad_datapoints (bool): Do not return bad datapoints. Default: True.
+ treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True.
+
+ Yields:
+ DatapointSubscriptionBatch: Changes to the subscription and data in the subscribed time series.
+
+ Examples:
+
+ Iterate over changes to subscription timeseries since the beginning until there is no more data:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> for batch in client.time_series.subscriptions.iterate_data("my_subscription"):
+ ... # Changes to the subscription itself:
+ ... print(f"Added {len(batch.subscription_changes.added)} timeseries")
+ ... print(f"Removed {len(batch.subscription_changes.removed)} timeseries")
+ ... print(f"Changed timeseries data in {len(batch.updates)} updates")
+ ... # Changes to datapoints for time series in the subscription:
+ ... for update in batch.updates:
+ ... upserts.time_series # The time series the update belongs to
+ ... upserts.upserts # The upserted datapoints, if any
+ ... upserts.deletes # Ranges of deleted periods, if any
+ ... if not batch.has_next:
+ ... break
+
+ Iterate continuously over all changes to the subscription newer than 3 days:
+
+ >>> for batch in client.time_series.subscriptions.iterate_data("my_subscription", "3d-ago"):
+ ... pass # do something
+ """
+ yield from SyncIterator(
+ self.__async_client.time_series.subscriptions.iterate_data(
+ external_id=external_id,
+ start=start,
+ limit=limit,
+ partition=partition,
+ poll_timeout=poll_timeout,
+ cursor=cursor,
+ include_status=include_status,
+ ignore_bad_datapoints=ignore_bad_datapoints,
+ treat_uncertain_as_bad=treat_uncertain_as_bad,
+ )
+ )
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatapointSubscriptionList:
+ """
+ `List data point subscriptions `_
+
+ Args:
+ limit (int | None): Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ Returns:
+ DatapointSubscriptionList: List of requested datapoint subscriptions
+
+ Examples:
+
+ List 5 subscriptions:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> subscriptions = client.time_series.subscriptions.list(limit=5)
+ """
+ return run_sync(self.__async_client.time_series.subscriptions.list(limit=limit))
diff --git a/cognite/client/_sync_api/diagrams.py b/cognite/client/_sync_api/diagrams.py
new file mode 100644
index 0000000000..ffba1034c1
--- /dev/null
+++ b/cognite/client/_sync_api/diagrams.py
@@ -0,0 +1,217 @@
+"""
+===============================================================================
+0abf6d100ed5bb2cc4a7c6be6ecff503
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes._base import CogniteResource
+from cognite.client.data_classes.contextualization import (
+ DetectJobBundle,
+ DiagramConvertResults,
+ DiagramDetectConfig,
+ DiagramDetectResults,
+ FileReference,
+)
+from cognite.client.data_classes.data_modeling import NodeId
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDiagramsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def detect(
+ self,
+ entities: Sequence[dict | CogniteResource],
+ search_field: str = "name",
+ partial_match: bool = False,
+ min_tokens: int = 2,
+ file_ids: int | Sequence[int] | None = None,
+ file_external_ids: str | SequenceNotStr[str] | None = None,
+ file_instance_ids: NodeId | Sequence[NodeId] | None = None,
+ file_references: list[FileReference] | FileReference | None = None,
+ pattern_mode: bool = False,
+ configuration: DiagramDetectConfig | None = None,
+ *,
+ multiple_jobs: Literal[False] = False,
+ ) -> DiagramDetectResults: ...
+
+ @overload
+ def detect(
+ self,
+ entities: Sequence[dict | CogniteResource],
+ search_field: str = "name",
+ partial_match: bool = False,
+ min_tokens: int = 2,
+ file_ids: int | Sequence[int] | None = None,
+ file_external_ids: str | SequenceNotStr[str] | None = None,
+ file_instance_ids: NodeId | Sequence[NodeId] | None = None,
+ file_references: list[FileReference] | FileReference | None = None,
+ pattern_mode: bool = False,
+ configuration: DiagramDetectConfig | None = None,
+ *,
+ multiple_jobs: Literal[True],
+ ) -> tuple[DetectJobBundle, list[dict[str, Any]]]: ...
+
+ def detect(
+ self,
+ entities: Sequence[dict | CogniteResource],
+ search_field: str = "name",
+ partial_match: bool = False,
+ min_tokens: int = 2,
+ file_ids: int | Sequence[int] | None = None,
+ file_external_ids: str | SequenceNotStr[str] | None = None,
+ file_instance_ids: NodeId | Sequence[NodeId] | None = None,
+ file_references: list[FileReference] | FileReference | None = None,
+ pattern_mode: bool | None = None,
+ configuration: DiagramDetectConfig | None = None,
+ *,
+ multiple_jobs: bool = False,
+ ) -> DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]:
+ """
+ `Detect annotations in engineering diagrams `_
+
+ Note:
+ All users on this CDF subscription with assets read-all and files read-all capabilities in the project,
+ are able to access the data sent to this endpoint.
+
+ Args:
+ entities (Sequence[dict | CogniteResource]): List of entities to detect
+ search_field (str): If entities is a list of dictionaries, this is the key to the values to detect in the PnId
+ partial_match (bool): Allow for a partial match (e.g. missing prefix).
+ min_tokens (int): Minimal number of tokens a match must be based on
+ file_ids (int | Sequence[int] | None): ID of the files, should already be uploaded in the same tenant.
+ file_external_ids (str | SequenceNotStr[str] | None): File external ids, alternative to file_ids and file_references.
+ file_instance_ids (NodeId | Sequence[NodeId] | None): Files to detect in, specified by instance id.
+ file_references (list[FileReference] | FileReference | None): File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response.
+ pattern_mode (bool | None): If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None.
+ configuration (DiagramDetectConfig | None): Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_.
+ multiple_jobs (bool): Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False.
+ Returns:
+ DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results.
+
+ Note:
+ The results are not written to CDF, to create annotations based on detected entities use `AnnotationsAPI`.
+
+ Examples:
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.contextualization import FileReference
+ >>> client = CogniteClient()
+ >>> detect_job = client.diagrams.detect(
+ ... entities=[
+ ... {"userDefinedField": "21PT1017","ignoredField": "AA11"},
+ ... {"userDefinedField": "21PT1018"}],
+ ... search_field="userDefinedField",
+ ... partial_match=True,
+ ... min_tokens=2,
+ ... file_ids=[101],
+ ... file_external_ids=["Test1"],
+ ... file_references=[
+ ... FileReference(id=20, first_page=1, last_page=10),
+ ... FileReference(external_id="ext_20", first_page=11, last_page=20)
+ ... ])
+ >>> result = detect_job.get_result()
+ >>> print(result)
+
+ {
+ 'items': [
+ {'fileId': 101, 'annotations': []},
+ {'fileExternalId': 'Test1', 'fileId: 1, 'annotations': []},
+ {'fileId': 20, 'fileExternalId': 'ext_20', 'annotations': [], 'pageCount': 17},
+ {
+ 'fileId': 20,
+ 'fileExternalId': 'ext_20',
+ 'annotations': [
+ {
+ 'text': '21PT1017',
+ 'entities': [{"userDefinedField": "21PT1017","ignoredField": "AA11"}],
+ 'region': {
+ 'page': 12,
+ 'shape': 'rectangle',
+ 'vertices': [
+ {'x': 0.01, 'y': 0.01},
+ {'x': 0.01, 'y': 0.02},
+ {'x': 0.02, 'y': 0.02},
+ {'x': 0.02, 'y': 0.01}
+ ]
+ }
+ }
+ ],
+ 'pageCount': 17
+ }
+ ]
+ }
+
+
+ To use beta configuration options you can use a dictionary or `DiagramDetectConfig` object for convenience:
+
+ >>> from cognite.client.data_classes.contextualization import ConnectionFlags, DiagramDetectConfig
+ >>> config = DiagramDetectConfig(
+ ... remove_leading_zeros=True,
+ ... connection_flags=ConnectionFlags(
+ ... no_text_inbetween=True,
+ ... natural_reading_order=True,
+ ... )
+ ... )
+ >>> job = client.diagrams.detect(entities=[{"name": "A1"}], file_id=123, config=config)
+
+ Check the documentation for `DiagramDetectConfig` for more information on the available options.
+ """
+ return run_sync(
+ self.__async_client.diagrams.detect(
+ entities=entities,
+ search_field=search_field,
+ partial_match=partial_match,
+ min_tokens=min_tokens,
+ file_ids=file_ids,
+ file_external_ids=file_external_ids,
+ file_instance_ids=file_instance_ids,
+ file_references=file_references,
+ pattern_mode=pattern_mode,
+ configuration=configuration,
+ multiple_jobs=multiple_jobs,
+ )
+ )
+
+ def get_detect_jobs(self, job_ids: list[int]) -> list[DiagramDetectResults]:
+ return run_sync(self.__async_client.diagrams.get_detect_jobs(job_ids=job_ids))
+
+ def convert(self, detect_job: DiagramDetectResults) -> DiagramConvertResults:
+ """
+ Convert a P&ID to interactive SVGs where the provided annotations are highlighted.
+
+ Note:
+ Will automatically wait for the detect job to complete before starting the conversion.
+
+ Args:
+ detect_job (DiagramDetectResults): detect job
+
+ Returns:
+ DiagramConvertResults: Resulting queued job.
+
+ Examples:
+
+ Run a detection job, then convert the results:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> detect_job = client.diagrams.detect(...)
+ >>> client.diagrams.convert(detect_job=detect_job)
+ """
+ return run_sync(self.__async_client.diagrams.convert(detect_job=detect_job))
diff --git a/cognite/client/_sync_api/document_preview.py b/cognite/client/_sync_api/document_preview.py
new file mode 100644
index 0000000000..5f691428d4
--- /dev/null
+++ b/cognite/client/_sync_api/document_preview.py
@@ -0,0 +1,148 @@
+"""
+===============================================================================
+a613b724c1f2c9708b74ff1e9f99b3aa
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from pathlib import Path
+from typing import IO
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.documents import TemporaryLink
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncDocumentPreviewAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def download_page_as_png_bytes(self, id: int, page_number: int = 1) -> bytes:
+ """
+ `Downloads an image preview for a specific page of the specified document. `_
+
+ Args:
+ id (int): The server-generated ID for the document you want to retrieve the preview of.
+ page_number (int): Page number to preview. Starting at 1 for first page.
+
+ Returns:
+ bytes: The png preview of the document.
+
+ Examples:
+
+ Download image preview of page 5 of file with id 123:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> content = client.documents.previews.download_page_as_png_bytes(id=123, page_number=5)
+
+ Download an image preview and display using IPython.display.Image (for example in a Jupyter Notebook):
+
+ >>> from IPython.display import Image
+ >>> binary_png = client.documents.previews.download_page_as_png_bytes(id=123, page_number=5)
+ >>> Image(binary_png)
+ """
+ return run_sync(
+ self.__async_client.documents.previews.download_page_as_png_bytes(id=id, page_number=page_number)
+ )
+
+ def download_page_as_png(
+ self, path: Path | str | IO, id: int, page_number: int = 1, overwrite: bool = False
+ ) -> None:
+ """
+ `Downloads an image preview for a specific page of the specified document. `_
+
+ Args:
+ path (Path | str | IO): The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'.
+ id (int): The server-generated ID for the document you want to retrieve the preview of.
+ page_number (int): Page number to preview. Starting at 1 for first page.
+ overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False.
+
+ Examples:
+
+ Download Image preview of page 5 of file with id 123 to folder "previews":
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.documents.previews.download_page_as_png("previews", id=123, page_number=5)
+ """
+ return run_sync(
+ self.__async_client.documents.previews.download_page_as_png(
+ path=path, id=id, page_number=page_number, overwrite=overwrite
+ )
+ )
+
+ def download_document_as_pdf_bytes(self, id: int) -> bytes:
+ """
+ `Downloads a pdf preview of the specified document. `_
+
+ Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete.
+
+ Args:
+ id (int): The server-generated ID for the document you want to retrieve the preview of.
+
+ Returns:
+ bytes: The pdf preview of the document.
+
+ Examples:
+
+ Download PDF preview of file with id 123:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> content = client.documents.previews.download_document_as_pdf_bytes(id=123)
+ """
+ return run_sync(self.__async_client.documents.previews.download_document_as_pdf_bytes(id=id))
+
+ def download_document_as_pdf(self, path: Path | str | IO, id: int, overwrite: bool = False) -> None:
+ """
+ `Downloads a pdf preview of the specified document. `_
+
+ Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete.
+
+ Args:
+ path (Path | str | IO): The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'.
+ id (int): The server-generated ID for the document you want to retrieve the preview of.
+ overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False.
+
+ Examples:
+
+ Download PDF preview of file with id 123 to folder "previews":
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.documents.previews.download_document_as_pdf("previews", id=123)
+ """
+ return run_sync(
+ self.__async_client.documents.previews.download_document_as_pdf(path=path, id=id, overwrite=overwrite)
+ )
+
+ def retrieve_pdf_link(self, id: int) -> TemporaryLink:
+ """
+ `Retrieve a Temporary link to download pdf preview `_
+
+ Args:
+ id (int): The server-generated ID for the document you want to retrieve the preview of.
+
+ Returns:
+ TemporaryLink: A temporary link to download the pdf preview.
+
+ Examples:
+
+ Retrieve the PDF preview download link for document with id 123:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> link = client.documents.previews.retrieve_pdf_link(id=123)
+ """
+ return run_sync(self.__async_client.documents.previews.retrieve_pdf_link(id=id))
diff --git a/cognite/client/_sync_api/documents.py b/cognite/client/_sync_api/documents.py
new file mode 100644
index 0000000000..3624dacb56
--- /dev/null
+++ b/cognite/client/_sync_api/documents.py
@@ -0,0 +1,466 @@
+"""
+===============================================================================
+a09b74dddecf74bb06f572649b34a8bc
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import TYPE_CHECKING, Any, BinaryIO, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api.document_preview import SyncDocumentPreviewAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList
+from cognite.client.data_classes.documents import (
+ Document,
+ DocumentHighlightList,
+ DocumentList,
+ DocumentProperty,
+ DocumentSort,
+ SortableProperty,
+ SourceFileProperty,
+)
+from cognite.client.data_classes.filters import Filter
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDocumentsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.previews = SyncDocumentPreviewAPI(async_client)
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[DocumentList]: ...
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Document]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None,
+ limit: int | None = None,
+ ) -> Iterator[Document | DocumentList]:
+ """
+ Iterate over documents
+
+ Fetches documents as they are iterated over, so you keep a limited number of documents in memory.
+
+ Args:
+ chunk_size (int | None): Number of documents to return in each chunk. Defaults to yielding one document at a time.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to return.
+ sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
+ limit (int | None): Maximum number of documents to return. Default to return all items.
+
+ Yields:
+ Document | DocumentList: yields Documents one by one if chunk_size is not specified, else DocumentList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.documents(chunk_size=chunk_size, filter=filter, sort=sort, limit=limit)
+ )
+
+ def aggregate_count(self, query: str | None = None, filter: Filter | dict[str, Any] | None = None) -> int:
+ """
+ `Count of documents matching the specified filters and search. `_
+
+ Args:
+ query (str | None): The free text search query, for details see the documentation referenced above.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count.
+
+ Returns:
+ int: The number of documents matching the specified filters and search.
+
+ Examples:
+
+ Count the number of documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> count = client.documents.aggregate_count()
+
+ Count the number of PDF documents in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf")
+ >>> pdf_count = client.documents.aggregate_count(filter=is_pdf)
+
+ Count the number of documents with a related asset in a subtree rooted at any of
+ the specified external IDs, e.g. 'Plant_1' and 'Plant_2':
+
+ >>> client.documents.aggregate_count(
+ ... filter=filters.InAssetSubtree(
+ ... property=DocumentProperty.asset_external_ids,
+ ... values=['Plant_1', 'Plant_2'],
+ ... )
+ ... )
+ """
+ return run_sync(self.__async_client.documents.aggregate_count(query=query, filter=filter))
+
+ def aggregate_cardinality_values(
+ self,
+ property: DocumentProperty | SourceFileProperty | list[str] | str,
+ query: str | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate property count for documents. `_
+
+ Args:
+ property (DocumentProperty | SourceFileProperty | list[str] | str): The property to count the cardinality of.
+ query (str | None): The free text search query, for details see the documentation referenced above.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+
+ Returns:
+ int: The number of documents matching the specified filters and search.
+
+ Examples:
+
+ Count the number of types of documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> client = CogniteClient()
+ >>> count = client.documents.aggregate_cardinality_values(DocumentProperty.type)
+
+ Count the number of authors of plain/text documents in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> is_plain_text = filters.Equals(DocumentProperty.mime_type, "text/plain")
+ >>> plain_text_author_count = client.documents.aggregate_cardinality_values(DocumentProperty.author, filter=is_plain_text)
+
+ Count the number of types of documents in your CDF project but exclude documents that start with "text":
+
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> from cognite.client.data_classes import aggregations
+ >>> agg = aggregations
+ >>> is_not_text = agg.Not(agg.Prefix("text"))
+ >>> type_count_excluded_text = client.documents.aggregate_cardinality_values(DocumentProperty.type, aggregate_filter=is_not_text)
+ """
+ return run_sync(
+ self.__async_client.documents.aggregate_cardinality_values(
+ property=property, query=query, filter=filter, aggregate_filter=aggregate_filter
+ )
+ )
+
+ def aggregate_cardinality_properties(
+ self,
+ path: SourceFileProperty | list[str] = SourceFileProperty.metadata,
+ query: str | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate paths count for documents. `_
+
+ Args:
+ path (SourceFileProperty | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys).
+ query (str | None): The free text search query, for details see the documentation referenced above.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+
+ Returns:
+ int: The number of documents matching the specified filters and search.
+
+ Examples:
+
+ Count the number metadata keys for documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> count = client.documents.aggregate_cardinality_properties()
+ """
+ return run_sync(
+ self.__async_client.documents.aggregate_cardinality_properties(
+ path=path, query=query, filter=filter, aggregate_filter=aggregate_filter
+ )
+ )
+
+ def aggregate_unique_values(
+ self,
+ property: DocumentProperty | SourceFileProperty | list[str] | str,
+ query: str | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> UniqueResultList:
+ """
+ `Get unique properties with counts for documents. `_
+
+ Args:
+ property (DocumentProperty | SourceFileProperty | list[str] | str): The property to group by.
+ query (str | None): The free text search query, for details see the documentation referenced above.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ limit (int): Maximum number of items. Defaults to 25.
+
+ Returns:
+ UniqueResultList: List of unique values of documents matching the specified filters and search.
+
+ Examples:
+
+ Get the unique types with count of documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> client = CogniteClient()
+ >>> result = client.documents.aggregate_unique_values(DocumentProperty.mime_type)
+ >>> unique_types = result.unique
+
+ Get the different languages with count for documents with external id prefix "abc":
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> is_abc = filters.Prefix(DocumentProperty.external_id, "abc")
+ >>> result = client.documents.aggregate_unique_values(DocumentProperty.language, filter=is_abc)
+ >>> unique_languages = result.unique
+
+ Get the unique mime types with count of documents, but exclude mime types that start with text:
+
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> from cognite.client.data_classes import aggregations
+ >>> agg = aggregations
+ >>> is_not_text = agg.Not(agg.Prefix("text"))
+ >>> result = client.documents.aggregate_unique_values(DocumentProperty.mime_type, aggregate_filter=is_not_text)
+ >>> unique_mime_types = result.unique
+ """
+ return run_sync(
+ self.__async_client.documents.aggregate_unique_values(
+ property=property, query=query, filter=filter, aggregate_filter=aggregate_filter, limit=limit
+ )
+ )
+
+ def aggregate_unique_properties(
+ self,
+ path: DocumentProperty | SourceFileProperty | list[str] | str,
+ query: str | None = None,
+ filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> UniqueResultList:
+ """
+ `Get unique paths with counts for documents. `_
+
+ Args:
+ path (DocumentProperty | SourceFileProperty | list[str] | str): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ query (str | None): The free text search query, for details see the documentation referenced above.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ limit (int): Maximum number of items. Defaults to 25.
+
+ Returns:
+ UniqueResultList: List of unique values of documents matching the specified filters and search.
+
+ Examples:
+
+ Get the unique metadata keys with count of documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.documents import SourceFileProperty
+ >>> client = CogniteClient()
+ >>> result = client.documents.aggregate_unique_values(SourceFileProperty.metadata)
+ """
+ return run_sync(
+ self.__async_client.documents.aggregate_unique_properties(
+ path=path, query=query, filter=filter, aggregate_filter=aggregate_filter, limit=limit
+ )
+ )
+
+ def retrieve_content(self, id: int) -> bytes:
+ """
+ `Retrieve document content `_
+
+ Returns extracted textual information for the given document.
+
+ The document pipeline extracts up to 1MiB of textual information from each processed document.
+ The search and list endpoints truncate the textual content of each document,
+ in order to reduce the size of the returned payload. If you want the whole text for a document,
+ you can use this endpoint.
+
+ Args:
+ id (int): The server-generated ID for the document you want to retrieve the content of.
+
+ Returns:
+ bytes: The content of the document.
+
+ Examples:
+
+ Retrieve the content of a document with id 123:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> content = client.documents.retrieve_content(id=123)
+ """
+ return run_sync(self.__async_client.documents.retrieve_content(id=id))
+
+ def retrieve_content_buffer(self, id: int, buffer: BinaryIO) -> None:
+ """
+ `Retrieve document content into buffer `_
+
+ Returns extracted textual information for the given document.
+
+ The document pipeline extracts up to 1MiB of textual information from each processed document.
+ The search and list endpoints truncate the textual content of each document,
+ in order to reduce the size of the returned payload. If you want the whole text for a document,
+ you can use this endpoint.
+
+ Args:
+ id (int): The server-generated ID for the document you want to retrieve the content of.
+ buffer (BinaryIO): The document content is streamed directly into the buffer. This is useful for retrieving large documents.
+
+ Examples:
+
+ Retrieve the content of a document with id 123 into local file "my_text.txt":
+
+ >>> from cognite.client import CogniteClient
+ >>> from pathlib import Path
+ >>> client = CogniteClient()
+ >>> with Path("my_file.txt").open("wb") as buffer:
+ ... client.documents.retrieve_content_buffer(id=123, buffer=buffer)
+ """
+ return run_sync(self.__async_client.documents.retrieve_content_buffer(id=id, buffer=buffer))
+
+ @overload
+ def search(
+ self,
+ query: str,
+ highlight: Literal[False] = False,
+ filter: Filter | dict[str, Any] | None = None,
+ sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> DocumentList: ...
+
+ @overload
+ def search(
+ self,
+ query: str,
+ highlight: Literal[True],
+ filter: Filter | dict[str, Any] | None = None,
+ sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> DocumentHighlightList: ...
+
+ def search(
+ self,
+ query: str,
+ highlight: bool = False,
+ filter: Filter | dict[str, Any] | None = None,
+ sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> DocumentList | DocumentHighlightList:
+ """
+ `Search documents `_
+
+ This endpoint lets you search for documents by using advanced filters and free text queries.
+ Free text queries are matched against the documents' filenames and contents. For more information, see
+ endpoint documentation referenced above.
+
+ Args:
+ query (str): The free text search query.
+ highlight (bool): Whether or not matches in search results should be highlighted.
+ filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to search.
+ sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
+ limit (int): Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25.
+
+ Returns:
+ DocumentList | DocumentHighlightList: List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned.
+
+ Examples:
+
+ Search for text "pump 123" in PDF documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> client = CogniteClient()
+ >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf")
+ >>> documents = client.documents.search("pump 123", filter=is_pdf)
+
+ Find all documents with exact text 'CPLEX Error 1217: No Solution exists.'
+ in plain text files created the last week in your CDF project and highlight the matches:
+
+ >>> from datetime import datetime, timedelta
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> from cognite.client.utils import timestamp_to_ms
+ >>> is_plain_text = filters.Equals(DocumentProperty.mime_type, "text/plain")
+ >>> last_week = filters.Range(DocumentProperty.created_time,
+ ... gt=timestamp_to_ms(datetime.now() - timedelta(days=7)))
+ >>> documents = client.documents.search('"CPLEX Error 1217: No Solution exists."',
+ ... highlight=True,
+ ... filter=filters.And(is_plain_text, last_week))
+ """
+ return run_sync(
+ self.__async_client.documents.search(
+ query=query, highlight=highlight, filter=filter, sort=sort, limit=limit
+ )
+ )
+
+ def list(
+ self,
+ filter: Filter | dict[str, Any] | None = None,
+ sort: DocumentSort | SortableProperty | tuple[SortableProperty, Literal["asc", "desc"]] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> DocumentList:
+ """
+ `List documents `_
+
+ You can use filters to narrow down the list. Unlike the search method, list does not restrict the number
+ of documents to return, meaning that setting the limit to -1 will return all the documents in your
+ project.
+
+ Args:
+ filter (Filter | dict[str, Any] | None): Filter | dict[str, Any] | None): The filter to narrow down the documents to return.
+ sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
+ limit (int | None): Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents.
+
+ Returns:
+ DocumentList: List of documents
+
+ Examples:
+
+ List all PDF documents in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.documents import DocumentProperty
+ >>> client = CogniteClient()
+ >>> is_pdf = filters.Equals(DocumentProperty.mime_type, "application/pdf")
+ >>> pdf_documents = client.documents.list(filter=is_pdf)
+
+ List documents in your CDF project:
+
+ >>> documents = client.documents.list(limit=100)
+
+ Iterate over documents, one-by-one:
+
+ >>> for document in client.documents():
+ ... document # do something with the document
+
+ Iterate over chunks of documents to reduce memory load:
+
+ >>> for document_list in client.documents(chunk_size=250):
+ ... document_list # do something with the document
+
+ List all documents in your CDF project sorted by mime/type in descending order:
+
+ >>> from cognite.client.data_classes.documents import SortableDocumentProperty
+ >>> documents = client.documents.list(sort=(SortableDocumentProperty.mime_type, "desc"))
+ """
+ return run_sync(self.__async_client.documents.list(filter=filter, sort=sort, limit=limit))
diff --git a/cognite/client/_sync_api/entity_matching.py b/cognite/client/_sync_api/entity_matching.py
new file mode 100644
index 0000000000..a57ac37db4
--- /dev/null
+++ b/cognite/client/_sync_api/entity_matching.py
@@ -0,0 +1,318 @@
+"""
+===============================================================================
+547f2afb342b36199f74baa8a4115a2f
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Literal
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes._base import CogniteResource
+from cognite.client.data_classes.contextualization import (
+ ContextualizationJobList,
+ EntityMatchingModel,
+ EntityMatchingModelList,
+ EntityMatchingModelUpdate,
+ EntityMatchingPredictionResult,
+)
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncEntityMatchingAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> EntityMatchingModel | None:
+ """
+ `Retrieve model `_
+
+ Args:
+ id (int | None): id of the model to retrieve.
+ external_id (str | None): external id of the model to retrieve.
+
+ Returns:
+ EntityMatchingModel | None: Model requested.
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> retrieved_model = client.entity_matching.retrieve(id=1)
+ """
+ return run_sync(self.__async_client.entity_matching.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self, ids: Sequence[int] | None = None, external_ids: SequenceNotStr[str] | None = None
+ ) -> EntityMatchingModelList:
+ """
+ `Retrieve models `_
+
+ Args:
+ ids (Sequence[int] | None): ids of the model to retrieve.
+ external_ids (SequenceNotStr[str] | None): external ids of the model to retrieve.
+
+ Returns:
+ EntityMatchingModelList: Models requested.
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> retrieved_models = client.entity_matching.retrieve_multiple([1,2,3])
+ """
+ return run_sync(self.__async_client.entity_matching.retrieve_multiple(ids=ids, external_ids=external_ids))
+
+ def update(
+ self,
+ item: EntityMatchingModel
+ | EntityMatchingModelUpdate
+ | Sequence[EntityMatchingModel | EntityMatchingModelUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> EntityMatchingModelList | EntityMatchingModel:
+ """
+ `Update model `_
+
+ Args:
+ item (EntityMatchingModel | EntityMatchingModelUpdate | Sequence[EntityMatchingModel | EntityMatchingModelUpdate]): Model(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ EntityMatchingModelList | EntityMatchingModel: No description.
+
+ Examples:
+ >>> from cognite.client.data_classes.contextualization import EntityMatchingModelUpdate
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.entity_matching.update(EntityMatchingModelUpdate(id=1).name.set("New name"))
+ """
+ return run_sync(self.__async_client.entity_matching.update(item=item, mode=mode))
+
+ def list(
+ self,
+ name: str | None = None,
+ description: str | None = None,
+ original_id: int | None = None,
+ feature_type: str | None = None,
+ classifier: str | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> EntityMatchingModelList:
+ """
+ `List models `_
+
+ Args:
+ name (str | None): Optional user-defined name of model.
+ description (str | None): Optional user-defined description of model.
+ original_id (int | None): id of the original model for models that were created with refit.
+ feature_type (str | None): feature type that defines the combination of features used.
+ classifier (str | None): classifier used in training.
+ limit (int | None): Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ EntityMatchingModelList: List of models.
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.entity_matching.list(limit=1, name="test")
+ """
+ return run_sync(
+ self.__async_client.entity_matching.list(
+ name=name,
+ description=description,
+ original_id=original_id,
+ feature_type=feature_type,
+ classifier=classifier,
+ limit=limit,
+ )
+ )
+
+ def list_jobs(self) -> ContextualizationJobList:
+ """
+ List jobs, typically model fit and predict runs.
+ Returns:
+ ContextualizationJobList: List of jobs.
+ """
+ return run_sync(self.__async_client.entity_matching.list_jobs())
+
+ def delete(
+ self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None
+ ) -> None:
+ """
+ `Delete models `_
+
+ https://api-docs.cognite.com/20230101/tag/Entity-matching/operation/entityMatchingDelete
+
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.entity_matching.delete(id=1)
+ """
+ return run_sync(self.__async_client.entity_matching.delete(id=id, external_id=external_id))
+
+ def fit(
+ self,
+ sources: Sequence[dict | CogniteResource],
+ targets: Sequence[dict | CogniteResource],
+ true_matches: Sequence[dict | tuple[int | str, int | str]] | None = None,
+ match_fields: dict | Sequence[tuple[str, str]] | None = None,
+ feature_type: str | None = None,
+ classifier: str | None = None,
+ ignore_missing_fields: bool = False,
+ name: str | None = None,
+ description: str | None = None,
+ external_id: str | None = None,
+ ) -> EntityMatchingModel:
+ """
+ Fit entity matching model.
+
+ Note:
+ All users on this CDF subscription with assets read-all and entitymatching read-all and write-all
+ capabilities in the project, are able to access the data sent to this endpoint.
+
+ Args:
+ sources (Sequence[dict | CogniteResource]): entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields.
+ targets (Sequence[dict | CogniteResource]): entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used.
+ true_matches (Sequence[dict | tuple[int | str, int | str]] | None): Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
+ match_fields (dict | Sequence[tuple[str, str]] | None): List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}.
+ feature_type (str | None): feature type that defines the combination of features used, see API docs for details.
+ classifier (str | None): classifier used in training.
+ ignore_missing_fields (bool): whether missing data in match_fields should return error or be filled in with an empty string.
+ name (str | None): Optional user-defined name of model.
+ description (str | None): Optional user-defined description of model.
+ external_id (str | None): Optional external id. Must be unique within the project.
+ Returns:
+ EntityMatchingModel: Resulting queued model.
+
+ Example:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> sources = [{'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}]
+ >>> targets = [{'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}]
+ >>> true_matches = [(1, 101)]
+ >>> model = client.entity_matching.fit(
+ ... sources=sources,
+ ... targets=targets,
+ ... true_matches=true_matches,
+ ... description="AssetMatchingJob1"
+ ... )
+ """
+ return run_sync(
+ self.__async_client.entity_matching.fit(
+ sources=sources,
+ targets=targets,
+ true_matches=true_matches,
+ match_fields=match_fields,
+ feature_type=feature_type,
+ classifier=classifier,
+ ignore_missing_fields=ignore_missing_fields,
+ name=name,
+ description=description,
+ external_id=external_id,
+ )
+ )
+
+ def predict(
+ self,
+ sources: Sequence[dict] | None = None,
+ targets: Sequence[dict] | None = None,
+ num_matches: int = 1,
+ score_threshold: float | None = None,
+ id: int | None = None,
+ external_id: str | None = None,
+ ) -> EntityMatchingPredictionResult:
+ """
+ `Predict entity matching. `_
+
+ Warning:
+ Blocks and waits for the model to be ready if it has been recently created.
+
+ Note:
+ All users on this CDF subscription with assets read-all and entitymatching read-all and write-all
+ capabilities in the project, are able to access the data sent to this endpoint.
+
+ Args:
+ sources (Sequence[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit.
+ targets (Sequence[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit.
+ num_matches (int): number of matches to return for each item.
+ score_threshold (float | None): only return matches with a score above this threshold
+ id (int | None): ids of the model to use.
+ external_id (str | None): external ids of the model to use.
+
+ Returns:
+ EntityMatchingPredictionResult: object which can be used to wait for and retrieve results.
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> sources = {'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}
+ >>> targets = {'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}
+ >>> true_matches = [(1, 101)]
+ >>> model = client.entity_matching.predict(
+ ... sources = sources,
+ ... targets = targets,
+ ... num_matches = 1,
+ ... score_threshold = 0.6,
+ ... id=1
+ ... )
+ """
+ return run_sync(
+ self.__async_client.entity_matching.predict(
+ sources=sources,
+ targets=targets,
+ num_matches=num_matches,
+ score_threshold=score_threshold,
+ id=id,
+ external_id=external_id,
+ )
+ )
+
+ def refit(
+ self,
+ true_matches: Sequence[dict | tuple[int | str, int | str]],
+ id: int | None = None,
+ external_id: str | None = None,
+ ) -> EntityMatchingModel:
+ """
+ `Re-fits an entity matching model, using the combination of the old and new true matches. `_
+
+ Note:
+ All users on this CDF subscription with assets read-all and entitymatching read-all and write-all
+ capabilities in the project, are able to access the data sent to this endpoint.
+
+ Args:
+ true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
+ id (int | None): id of the model to use.
+ external_id (str | None): external id of the model to use.
+ Returns:
+ EntityMatchingModel: new model refitted to true_matches.
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> sources = [{'id': 101, 'name': 'ChildAsset1', 'description': 'Child of ParentAsset1'}]
+ >>> targets = [{'id': 1, 'name': 'ParentAsset1', 'description': 'Parent to ChildAsset1'}]
+ >>> true_matches = [(1, 101)]
+ >>> model = client.entity_matching.refit(true_matches=true_matches, id=1)
+ """
+ return run_sync(
+ self.__async_client.entity_matching.refit(true_matches=true_matches, id=id, external_id=external_id)
+ )
diff --git a/cognite/client/_sync_api/events.py b/cognite/client/_sync_api/events.py
new file mode 100644
index 0000000000..28d85a7481
--- /dev/null
+++ b/cognite/client/_sync_api/events.py
@@ -0,0 +1,688 @@
+"""
+===============================================================================
+bfc59b87972253161926e6c07a36e18e
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.events import SortSpec
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ EndTimeFilter,
+ Event,
+ EventFilter,
+ EventList,
+ EventUpdate,
+ TimestampRange,
+)
+from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList
+from cognite.client.data_classes.events import EventPropertyLike, EventWrite
+from cognite.client.data_classes.filters import Filter
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncEventsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Event]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[EventList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ start_time: dict[str, Any] | TimestampRange | None = None,
+ end_time: dict[str, Any] | EndTimeFilter | None = None,
+ active_at_time: dict[str, Any] | TimestampRange | None = None,
+ type: str | None = None,
+ subtype: str | None = None,
+ metadata: dict[str, str] | None = None,
+ asset_ids: Sequence[int] | None = None,
+ asset_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ source: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ sort: SortSpec | list[SortSpec] | None = None,
+ limit: int | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ ) -> Iterator[Event | EventList]:
+ """
+ Iterate over events
+
+ Fetches events as they are iterated over, so you keep a limited number of events in memory.
+
+ Args:
+ chunk_size (int | None): Number of events to return in each chunk. Defaults to yielding one event a time.
+ start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
+ end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps
+ active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
+ type (str | None): Type of the event, e.g 'failure'.
+ subtype (str | None): Subtype of the event, e.g 'electrical'.
+ metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value.
+ asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to.
+ asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to.
+ asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids.
+ source (str | None): The source of this event.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ external_id_prefix (str | None): External Id provided by client. Should be unique within the project
+ sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ limit (int | None): Maximum number of events to return. Defaults to return all items.
+ advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+
+ Yields:
+ Event | EventList: yields Event one by one if chunk_size is not specified, else EventList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.events(
+ chunk_size=chunk_size,
+ start_time=start_time,
+ end_time=end_time,
+ active_at_time=active_at_time,
+ type=type,
+ subtype=subtype,
+ metadata=metadata,
+ asset_ids=asset_ids,
+ asset_external_ids=asset_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ external_id_prefix=external_id_prefix,
+ sort=sort,
+ limit=limit,
+ advanced_filter=advanced_filter,
+ )
+ )
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Event | None:
+ """
+ `Retrieve a single event by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ Event | None: Requested event or None if it does not exist.
+
+ Examples:
+
+ Get event by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.events.retrieve(id=1)
+
+ Get event by external id:
+
+ >>> res = client.events.retrieve(external_id="1")
+ """
+ return run_sync(self.__async_client.events.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> EventList:
+ """
+ `Retrieve multiple events by id. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ EventList: The requested events.
+
+ Examples:
+
+ Get events by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.events.retrieve_multiple(ids=[1, 2, 3])
+
+ Get events by external id:
+
+ >>> res = client.events.retrieve_multiple(external_ids=["abc", "def"])
+ """
+ return run_sync(
+ self.__async_client.events.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def aggregate_unique_values(
+ self,
+ filter: EventFilter | dict[str, Any] | None = None,
+ property: EventPropertyLike | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ ) -> UniqueResultList:
+ """
+ `Get unique properties with counts for events. `_
+
+ Args:
+ filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ property (EventPropertyLike | None): The property name(s) to apply the aggregation on.
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to consider.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+
+ Returns:
+ UniqueResultList: List of unique values of events matching the specified filters and search.
+
+ Examples:
+
+ Get the unique types with count of events in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> client = CogniteClient()
+ >>> result = client.events.aggregate_unique_values(property=EventProperty.type)
+ >>> print(result.unique)
+
+ Get the unique types of events after 2020-01-01 in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> from cognite.client.utils import timestamp_to_ms
+ >>> from datetime import datetime
+ >>> is_after_2020 = filters.Range(EventProperty.start_time, gte=timestamp_to_ms(datetime(2020, 1, 1)))
+ >>> result = client.events.aggregate_unique_values(EventProperty.type, advanced_filter=is_after_2020)
+ >>> print(result.unique)
+
+ Get the unique types of events after 2020-01-01 in your CDF project, but exclude all types that start with
+ "planned":
+
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> from cognite.client.data_classes import aggregations
+ >>> agg = aggregations
+ >>> not_planned = agg.Not(agg.Prefix("planned"))
+ >>> is_after_2020 = filters.Range(EventProperty.start_time, gte=timestamp_to_ms(datetime(2020, 1, 1)))
+ >>> result = client.events.aggregate_unique_values(EventProperty.type, advanced_filter=is_after_2020, aggregate_filter=not_planned)
+ >>> print(result.unique)
+ """
+ return run_sync(
+ self.__async_client.events.aggregate_unique_values(
+ filter=filter, property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter
+ )
+ )
+
+ def aggregate_count(
+ self,
+ property: EventPropertyLike | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ filter: EventFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Count of event matching the specified filters. `_
+
+ Args:
+ property (EventPropertyLike | None): If specified, Get an approximate number of Events with a specific property
+ (property is not null) and matching the filters.
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count.
+ filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+
+ Returns:
+ int: The number of events matching the specified filters and search.
+
+ Examples:
+
+ Count the number of events in your CDF project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> count = client.events.aggregate_count()
+
+ Count the number of workorder events in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> is_workorder = filters.Equals(EventProperty.type, "workorder")
+ >>> workorder_count = client.events.aggregate_count(advanced_filter=is_workorder)
+ """
+ return run_sync(
+ self.__async_client.events.aggregate_count(
+ property=property, advanced_filter=advanced_filter, filter=filter
+ )
+ )
+
+ def aggregate_cardinality_values(
+ self,
+ property: EventPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: EventFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate property count for events. `_
+
+ Args:
+ property (EventPropertyLike): The property to count the cardinality of.
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ Returns:
+ int: The number of properties matching the specified filter.
+
+ Examples:
+
+ Count the number of types of events in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> client = CogniteClient()
+ >>> type_count = client.events.aggregate_cardinality_values(EventProperty.type)
+
+ Count the number of types of events linked to asset 123 in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> is_asset = filters.ContainsAny(EventProperty.asset_ids, 123)
+ >>> plain_text_author_count = client.events.aggregate_cardinality_values(EventProperty.type, advanced_filter=is_asset)
+ """
+ return run_sync(
+ self.__async_client.events.aggregate_cardinality_values(
+ property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_cardinality_properties(
+ self,
+ path: EventPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: EventFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate paths count for events. `_
+
+ Args:
+ path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
+ It means to aggregate only metadata properties (aka keys).
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ Returns:
+ int: The number of properties matching the specified filters and search.
+
+ Examples:
+
+ Count the number of metadata keys for events in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> client = CogniteClient()
+ >>> type_count = client.events.aggregate_cardinality_properties(EventProperty.metadata)
+ """
+ return run_sync(
+ self.__async_client.events.aggregate_cardinality_properties(
+ path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_unique_properties(
+ self,
+ path: EventPropertyLike,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: EventFilter | dict[str, Any] | None = None,
+ ) -> UniqueResultList:
+ """
+ `Get unique paths with counts for events. `_
+
+ Args:
+ path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
+ It means to aggregate only metadata properties (aka keys).
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+
+ Returns:
+ UniqueResultList: List of unique values of events matching the specified filters and search.
+
+ Examples:
+
+ Get the unique metadata keys with count of events in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.events import EventProperty
+ >>> client = CogniteClient()
+ >>> result = client.events.aggregate_unique_properties(EventProperty.metadata)
+ >>> print(result.unique)
+ """
+ return run_sync(
+ self.__async_client.events.aggregate_unique_properties(
+ path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ @overload
+ def create(self, event: Sequence[Event] | Sequence[EventWrite]) -> EventList: ...
+
+ @overload
+ def create(self, event: Event | EventWrite) -> Event: ...
+
+ def create(self, event: Event | EventWrite | Sequence[Event] | Sequence[EventWrite]) -> Event | EventList:
+ """
+ `Create one or more events. `_
+
+ Args:
+ event (Event | EventWrite | Sequence[Event] | Sequence[EventWrite]): Event or list of events to create.
+
+ Returns:
+ Event | EventList: Created event(s)
+
+ Examples:
+
+ Create new events:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import EventWrite
+ >>> client = CogniteClient()
+ >>> events = [EventWrite(start_time=0, end_time=1), EventWrite(start_time=2, end_time=3)]
+ >>> res = client.events.create(events)
+ """
+ return run_sync(self.__async_client.events.create(event=event))
+
+ def delete(
+ self,
+ id: int | Sequence[int] | None = None,
+ external_id: str | SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> None:
+ """
+ `Delete one or more events `_
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Examples:
+
+ Delete events by id or external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.events.delete(id=[1,2,3], external_id="3")
+ """
+ return run_sync(
+ self.__async_client.events.delete(id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ @overload
+ def update(
+ self,
+ item: Sequence[Event | EventWrite | EventUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> EventList: ...
+
+ @overload
+ def update(
+ self,
+ item: Event | EventWrite | EventUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Event: ...
+
+ def update(
+ self,
+ item: Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Event | EventList:
+ """
+ `Update one or more events `_
+
+ Args:
+ item (Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate]): Event(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Event | EventList: Updated event(s)
+
+ Examples:
+
+ Update an event that you have fetched. This will perform a full update of the event:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> event = client.events.retrieve(id=1)
+ >>> event.description = "New description"
+ >>> res = client.events.update(event)
+
+ Perform a partial update on a event, updating the description and adding a new field to metadata:
+
+ >>> from cognite.client.data_classes import EventUpdate
+ >>> my_update = EventUpdate(id=1).description.set("New description").metadata.add({"key": "value"})
+ >>> res = client.events.update(my_update)
+ """
+ return run_sync(self.__async_client.events.update(item=item, mode=mode))
+
+ def search(
+ self,
+ description: str | None = None,
+ filter: EventFilter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> EventList:
+ """
+ `Search for events `_
+ Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
+
+ Args:
+ description (str | None): Fuzzy match on description.
+ filter (EventFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
+ limit (int): Maximum number of results to return.
+
+ Returns:
+ EventList: List of requested events
+
+ Examples:
+
+ Search for events:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.events.search(description="some description")
+ """
+ return run_sync(self.__async_client.events.search(description=description, filter=filter, limit=limit))
+
+ @overload
+ def upsert(self, item: Sequence[Event | EventWrite], mode: Literal["patch", "replace"] = "patch") -> EventList: ...
+
+ @overload
+ def upsert(self, item: Event | EventWrite, mode: Literal["patch", "replace"] = "patch") -> Event: ...
+
+ def upsert(
+ self, item: Event | EventWrite | Sequence[Event | EventWrite], mode: Literal["patch", "replace"] = "patch"
+ ) -> Event | EventList:
+ """
+ Upsert events, i.e., update if it exists, and create if it does not exist.
+ Note this is a convenience method that handles the upserting for you by first calling update on all items,
+ and if any of them fail because they do not exist, it will create them instead.
+
+ For more details, see :ref:`appendix-upsert`.
+
+ Args:
+ item (Event | EventWrite | Sequence[Event | EventWrite]): Event or list of events to upsert.
+ mode (Literal['patch', 'replace']): Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+
+ Returns:
+ Event | EventList: The upserted event(s).
+
+ Examples:
+
+ Upsert for events:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import EventWrite
+ >>> client = CogniteClient()
+ >>> existing_event = client.events.retrieve(id=1)
+ >>> existing_event.description = "New description"
+ >>> new_event = EventWrite(external_id="new_event", description="New event")
+ >>> res = client.events.upsert([existing_event, new_event], mode="replace")
+ """
+ return run_sync(self.__async_client.events.upsert(item=item, mode=mode))
+
+ def list(
+ self,
+ start_time: dict[str, Any] | TimestampRange | None = None,
+ end_time: dict[str, Any] | EndTimeFilter | None = None,
+ active_at_time: dict[str, Any] | TimestampRange | None = None,
+ type: str | None = None,
+ subtype: str | None = None,
+ metadata: dict[str, str] | None = None,
+ asset_ids: Sequence[int] | None = None,
+ asset_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ source: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ sort: SortSpec | list[SortSpec] | None = None,
+ partitions: int | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ ) -> EventList:
+ """
+ `List events `_
+
+ Args:
+ start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
+ end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps.
+ active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
+ type (str | None): Type of the event, e.g 'failure'.
+ subtype (str | None): Subtype of the event, e.g 'electrical'.
+ metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value.
+ asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to.
+ asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to.
+ asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids.
+ source (str | None): The source of this event.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
+ sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ limit (int | None): Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+
+ Returns:
+ EventList: List of requested events
+
+ .. note::
+ When using `partitions`, there are few considerations to keep in mind:
+ * `limit` has to be set to `None` (or `-1`).
+ * API may reject requests if you specify more than 10 partitions. When Cognite enforces this behavior, the requests result in a 400 Bad Request status.
+ * Partitions are done independently of sorting: there's no guarantee of the sort order between elements from different partitions. For this reason providing a `sort` parameter when using `partitions` is not allowed.
+
+
+ Examples:
+
+ List events and filter on max start time:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> event_list = client.events.list(limit=5, start_time={"max": 1500000000})
+
+ Iterate over events, one-by-one:
+
+ >>> for event in client.events():
+ ... event # do something with the event
+
+ Iterate over chunks of events to reduce memory load:
+
+ >>> for event_list in client.events(chunk_size=2500):
+ ... event_list # do something with the events
+
+ Using advanced filter, find all events that have a metadata key 'timezone' starting with 'Europe',
+ and sort by external id ascending:
+
+ >>> from cognite.client.data_classes import filters
+ >>> in_timezone = filters.Prefix(["metadata", "timezone"], "Europe")
+ >>> res = client.events.list(advanced_filter=in_timezone, sort=("external_id", "asc"))
+
+ Note that you can check the API documentation above to see which properties you can filter on
+ with which filters.
+
+ To make it easier to avoid spelling mistakes and easier to look up available properties
+ for filtering and sorting, you can also use the `EventProperty` and `SortableEventProperty` Enums.
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.events import EventProperty, SortableEventProperty
+ >>> in_timezone = filters.Prefix(EventProperty.metadata_key("timezone"), "Europe")
+ >>> res = client.events.list(
+ ... advanced_filter=in_timezone,
+ ... sort=(SortableEventProperty.external_id, "asc"))
+
+ Combine filter and advanced filter:
+
+ >>> from cognite.client.data_classes import filters
+ >>> not_instrument_lvl5 = filters.And(
+ ... filters.ContainsAny("labels", ["Level5"]),
+ ... filters.Not(filters.ContainsAny("labels", ["Instrument"]))
+ ... )
+ >>> res = client.events.list(asset_subtree_ids=[123456], advanced_filter=not_instrument_lvl5)
+ """
+ return run_sync(
+ self.__async_client.events.list(
+ start_time=start_time,
+ end_time=end_time,
+ active_at_time=active_at_time,
+ type=type,
+ subtype=subtype,
+ metadata=metadata,
+ asset_ids=asset_ids,
+ asset_external_ids=asset_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ external_id_prefix=external_id_prefix,
+ sort=sort,
+ partitions=partitions,
+ limit=limit,
+ advanced_filter=advanced_filter,
+ )
+ )
diff --git a/cognite/client/_sync_api/extractionpipelines/__init__.py b/cognite/client/_sync_api/extractionpipelines/__init__.py
new file mode 100644
index 0000000000..01e329ddcb
--- /dev/null
+++ b/cognite/client/_sync_api/extractionpipelines/__init__.py
@@ -0,0 +1,237 @@
+"""
+===============================================================================
+48dfd1c74433af19a2a33d145cae12bb
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api.extractionpipelines.configs import SyncExtractionPipelineConfigsAPI
+from cognite.client._sync_api.extractionpipelines.runs import SyncExtractionPipelineRunsAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import ExtractionPipeline, ExtractionPipelineList, ExtractionPipelineUpdate
+from cognite.client.data_classes.extractionpipelines import ExtractionPipelineWrite
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncExtractionPipelinesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.runs = SyncExtractionPipelineRunsAPI(async_client)
+ self.config = SyncExtractionPipelineConfigsAPI(async_client)
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[ExtractionPipeline]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[ExtractionPipelineList]: ...
+
+ def __call__(
+ self, chunk_size: int | None = None, limit: int | None = None
+ ) -> Iterator[ExtractionPipeline | ExtractionPipelineList]:
+ """
+ Iterate over extraction pipelines
+
+ Args:
+ chunk_size (int | None): Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one.
+ limit (int | None): Limits the number of results to be returned. Defaults to yielding all extraction pipelines.
+
+ Yields:
+ ExtractionPipeline | ExtractionPipelineList: Yields extraction pipelines one by one or in chunks up to the chunk size.
+ """
+ yield from SyncIterator(self.__async_client.extraction_pipelines(chunk_size=chunk_size, limit=limit))
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> ExtractionPipeline | None:
+ """
+ `Retrieve a single extraction pipeline by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ ExtractionPipeline | None: Requested extraction pipeline or None if it does not exist.
+
+ Examples:
+
+ Get extraction pipeline by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.extraction_pipelines.retrieve(id=1)
+
+ Get extraction pipeline by external id:
+
+ >>> res = client.extraction_pipelines.retrieve(external_id="1")
+ """
+ return run_sync(self.__async_client.extraction_pipelines.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> ExtractionPipelineList:
+ """
+ `Retrieve multiple extraction pipelines by ids and external ids. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ ExtractionPipelineList: The requested ExtractionPipelines.
+
+ Examples:
+
+ Get ExtractionPipelines by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.extraction_pipelines.retrieve_multiple(ids=[1, 2, 3])
+
+ Get assets by external id:
+
+ >>> res = client.extraction_pipelines.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.extraction_pipelines.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> ExtractionPipelineList:
+ """
+ `List extraction pipelines `_
+
+ Args:
+ limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ ExtractionPipelineList: List of requested ExtractionPipelines
+
+ Examples:
+
+ List ExtractionPipelines:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> ep_list = client.extraction_pipelines.list(limit=5)
+ """
+ return run_sync(self.__async_client.extraction_pipelines.list(limit=limit))
+
+ @overload
+ def create(self, extraction_pipeline: ExtractionPipeline | ExtractionPipelineWrite) -> ExtractionPipeline: ...
+
+ @overload
+ def create(
+ self, extraction_pipeline: Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]
+ ) -> ExtractionPipelineList: ...
+
+ def create(
+ self,
+ extraction_pipeline: ExtractionPipeline
+ | ExtractionPipelineWrite
+ | Sequence[ExtractionPipeline]
+ | Sequence[ExtractionPipelineWrite],
+ ) -> ExtractionPipeline | ExtractionPipelineList:
+ """
+ `Create one or more extraction pipelines. `_
+
+ You can create an arbitrary number of extraction pipelines, and the SDK will split the request into multiple requests if necessary.
+
+ Args:
+ extraction_pipeline (ExtractionPipeline | ExtractionPipelineWrite | Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]): Extraction pipeline or list of extraction pipelines to create.
+
+ Returns:
+ ExtractionPipeline | ExtractionPipelineList: Created extraction pipeline(s)
+
+ Examples:
+
+ Create new extraction pipeline:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import ExtractionPipelineWrite
+ >>> client = CogniteClient()
+ >>> extpipes = [ExtractionPipelineWrite(name="extPipe1",...), ExtractionPipelineWrite(name="extPipe2",...)]
+ >>> res = client.extraction_pipelines.create(extpipes)
+ """
+ return run_sync(self.__async_client.extraction_pipelines.create(extraction_pipeline=extraction_pipeline))
+
+ def delete(
+ self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None
+ ) -> None:
+ """
+ `Delete one or more extraction pipelines `_
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+
+ Examples:
+
+ Delete extraction pipelines by id or external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.extraction_pipelines.delete(id=[1,2,3], external_id="3")
+ """
+ return run_sync(self.__async_client.extraction_pipelines.delete(id=id, external_id=external_id))
+
+ @overload
+ def update(
+ self, item: ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate
+ ) -> ExtractionPipeline: ...
+
+ @overload
+ def update(
+ self, item: Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]
+ ) -> ExtractionPipelineList: ...
+
+ def update(
+ self,
+ item: ExtractionPipeline
+ | ExtractionPipelineWrite
+ | ExtractionPipelineUpdate
+ | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> ExtractionPipeline | ExtractionPipelineList:
+ """
+ `Update one or more extraction pipelines `_
+
+ Args:
+ item (ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]): Extraction pipeline(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ ExtractionPipeline | ExtractionPipelineList: Updated extraction pipeline(s)
+
+ Examples:
+
+ Update an extraction pipeline that you have fetched. This will perform a full update of the extraction pipeline:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import ExtractionPipelineUpdate
+ >>> client = CogniteClient()
+ >>> update = ExtractionPipelineUpdate(id=1)
+ >>> update.description.set("Another new extpipe")
+ >>> res = client.extraction_pipelines.update(update)
+ """
+ return run_sync(self.__async_client.extraction_pipelines.update(item=item, mode=mode))
diff --git a/cognite/client/_sync_api/extractionpipelines/configs.py b/cognite/client/_sync_api/extractionpipelines/configs.py
new file mode 100644
index 0000000000..2eef92cf3f
--- /dev/null
+++ b/cognite/client/_sync_api/extractionpipelines/configs.py
@@ -0,0 +1,118 @@
+"""
+===============================================================================
+28c18023b9534cbfe75f43d798262161
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import ExtractionPipelineConfig, ExtractionPipelineConfigRevisionList
+from cognite.client.data_classes.extractionpipelines import ExtractionPipelineConfigWrite
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncExtractionPipelineConfigsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def retrieve(
+ self, external_id: str, revision: int | None = None, active_at_time: int | None = None
+ ) -> ExtractionPipelineConfig:
+ """
+ `Retrieve a specific configuration revision, or the latest by default `
+
+ By default the latest configuration revision is retrieved, or you can specify a timestamp or a revision number.
+
+ Args:
+ external_id (str): External id of the extraction pipeline to retrieve config from.
+ revision (int | None): Optionally specify a revision number to retrieve.
+ active_at_time (int | None): Optionally specify a timestamp the configuration revision should be active.
+
+ Returns:
+ ExtractionPipelineConfig: Retrieved extraction pipeline configuration revision
+
+ Examples:
+
+ Retrieve latest config revision:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.extraction_pipelines.config.retrieve("extId")
+ """
+ return run_sync(
+ self.__async_client.extraction_pipelines.config.retrieve(
+ external_id=external_id, revision=revision, active_at_time=active_at_time
+ )
+ )
+
+ def list(self, external_id: str) -> ExtractionPipelineConfigRevisionList:
+ """
+ `Retrieve all configuration revisions from an extraction pipeline `
+
+ Args:
+ external_id (str): External id of the extraction pipeline to retrieve config from.
+
+ Returns:
+ ExtractionPipelineConfigRevisionList: Retrieved extraction pipeline configuration revisions
+
+ Examples:
+
+ Retrieve a list of config revisions:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.extraction_pipelines.config.list("extId")
+ """
+ return run_sync(self.__async_client.extraction_pipelines.config.list(external_id=external_id))
+
+ def create(self, config: ExtractionPipelineConfig | ExtractionPipelineConfigWrite) -> ExtractionPipelineConfig:
+ """
+ `Create a new configuration revision `
+
+ Args:
+ config (ExtractionPipelineConfig | ExtractionPipelineConfigWrite): Configuration revision to create.
+
+ Returns:
+ ExtractionPipelineConfig: Created extraction pipeline configuration revision
+
+ Examples:
+
+ Create a config revision:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import ExtractionPipelineConfigWrite
+ >>> client = CogniteClient()
+ >>> res = client.extraction_pipelines.config.create(ExtractionPipelineConfigWrite(external_id="extId", config="my config contents"))
+ """
+ return run_sync(self.__async_client.extraction_pipelines.config.create(config=config))
+
+ def revert(self, external_id: str, revision: int) -> ExtractionPipelineConfig:
+ """
+ `Revert to a previous configuration revision `
+
+ Args:
+ external_id (str): External id of the extraction pipeline to revert revision for.
+ revision (int): Revision to revert to.
+
+ Returns:
+ ExtractionPipelineConfig: New latest extraction pipeline configuration revision.
+
+ Examples:
+
+ Revert a config revision:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.extraction_pipelines.config.revert("extId", 5)
+ """
+ return run_sync(
+ self.__async_client.extraction_pipelines.config.revert(external_id=external_id, revision=revision)
+ )
diff --git a/cognite/client/_sync_api/extractionpipelines/runs.py b/cognite/client/_sync_api/extractionpipelines/runs.py
new file mode 100644
index 0000000000..74e5062d84
--- /dev/null
+++ b/cognite/client/_sync_api/extractionpipelines/runs.py
@@ -0,0 +1,126 @@
+"""
+===============================================================================
+c7e0250a7afdf41370a375942043efcf
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Any, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.extractionpipelines import RunStatus
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ ExtractionPipelineRun,
+ ExtractionPipelineRunList,
+ TimestampRange,
+)
+from cognite.client.data_classes.extractionpipelines import (
+ ExtractionPipelineRunWrite,
+)
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncExtractionPipelineRunsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def list(
+ self,
+ external_id: str,
+ statuses: RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None = None,
+ message_substring: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | str | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> ExtractionPipelineRunList:
+ """
+ `List runs for an extraction pipeline with given external_id `_
+
+ Args:
+ external_id (str): Extraction pipeline external Id.
+ statuses (RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None): One or more among "success" / "failure" / "seen".
+ message_substring (str | None): Failure message part.
+ created_time (dict[str, Any] | TimestampRange | str | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms.
+ If a string is passed, it is assumed to be the minimum value.
+ limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ ExtractionPipelineRunList: List of requested extraction pipeline runs
+
+ Tip:
+ The ``created_time`` parameter can also be passed as a string, to support the most typical usage pattern
+ of fetching the most recent runs, meaning it is implicitly assumed to be the minimum created time. The
+ format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute), e.g. "12d-ago".
+
+ Examples:
+
+ List extraction pipeline runs:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> runsList = client.extraction_pipelines.runs.list(external_id="test ext id", limit=5)
+
+ Filter extraction pipeline runs on a given status:
+
+ >>> runs_list = client.extraction_pipelines.runs.list(external_id="test ext id", statuses=["seen"], limit=5)
+
+ Get all failed pipeline runs in the last 24 hours for pipeline 'extId':
+
+ >>> from cognite.client.data_classes import ExtractionPipelineRun
+ >>> res = client.extraction_pipelines.runs.list(external_id="extId", statuses="failure", created_time="24h-ago")
+ """
+ return run_sync(
+ self.__async_client.extraction_pipelines.runs.list(
+ external_id=external_id,
+ statuses=statuses,
+ message_substring=message_substring,
+ created_time=created_time,
+ limit=limit,
+ )
+ )
+
+ @overload
+ def create(self, run: ExtractionPipelineRun | ExtractionPipelineRunWrite) -> ExtractionPipelineRun: ...
+
+ @overload
+ def create(
+ self, run: Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]
+ ) -> ExtractionPipelineRunList: ...
+
+ def create(
+ self,
+ run: ExtractionPipelineRun
+ | ExtractionPipelineRunWrite
+ | Sequence[ExtractionPipelineRun]
+ | Sequence[ExtractionPipelineRunWrite],
+ ) -> ExtractionPipelineRun | ExtractionPipelineRunList:
+ """
+ `Create one or more extraction pipeline runs. `_
+
+ You can create an arbitrary number of extraction pipeline runs, and the SDK will split the request into multiple requests.
+
+ Args:
+ run (ExtractionPipelineRun | ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): ExtractionPipelineRun| ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): Extraction pipeline or list of extraction pipeline runs to create.
+
+ Returns:
+ ExtractionPipelineRun | ExtractionPipelineRunList: Created extraction pipeline run(s)
+
+ Examples:
+
+ Report a new extraction pipeline run:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import ExtractionPipelineRunWrite
+ >>> client = CogniteClient()
+ >>> res = client.extraction_pipelines.runs.create(
+ ... ExtractionPipelineRunWrite(status="success", extpipe_external_id="extId"))
+ """
+ return run_sync(self.__async_client.extraction_pipelines.runs.create(run=run))
diff --git a/cognite/client/_sync_api/files.py b/cognite/client/_sync_api/files.py
new file mode 100644
index 0000000000..9b4b267e11
--- /dev/null
+++ b/cognite/client/_sync_api/files.py
@@ -0,0 +1,964 @@
+"""
+===============================================================================
+6c1c42132e1b371d885eb22ff47f5467
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterator, Iterator, Sequence
+from pathlib import Path
+from typing import Any, BinaryIO, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ FileMetadata,
+ FileMetadataFilter,
+ FileMetadataList,
+ FileMetadataUpdate,
+ FileMetadataWrite,
+ FileMultipartUploadSession,
+ GeoLocation,
+ GeoLocationFilter,
+ Label,
+ LabelFilter,
+ TimestampRange,
+)
+from cognite.client.data_classes.data_modeling import NodeId
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncFilesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[FileMetadata]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[FileMetadataList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ mime_type: str | None = None,
+ metadata: dict[str, str] | None = None,
+ asset_ids: Sequence[int] | None = None,
+ asset_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ labels: LabelFilter | None = None,
+ geo_location: GeoLocationFilter | None = None,
+ source: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ source_created_time: dict[str, Any] | TimestampRange | None = None,
+ source_modified_time: dict[str, Any] | TimestampRange | None = None,
+ uploaded_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ directory_prefix: str | None = None,
+ uploaded: bool | None = None,
+ limit: int | None = None,
+ ) -> Iterator[FileMetadata | FileMetadataList]:
+ """
+ Iterate over files
+
+ Fetches file metadata objects as they are iterated over, so you keep a limited number of metadata objects in memory.
+
+ Args:
+ chunk_size (int | None): Number of files to return in each chunk. Defaults to yielding one event a time.
+ name (str | None): Name of the file.
+ mime_type (str | None): File type. E.g. text/plain, application/pdf, ..
+ metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
+ asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs.
+ asset_external_ids (SequenceNotStr[str] | None): No description.
+ asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids.
+ labels (LabelFilter | None): Return only the files matching the specified label(s).
+ geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
+ source (str | None): The source of this event.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range.
+ source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range.
+ uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
+ external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
+ directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client.
+ uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
+ limit (int | None): Maximum number of files to return. Defaults to return all items.
+
+ Yields:
+ FileMetadata | FileMetadataList: yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.files(
+ chunk_size=chunk_size,
+ name=name,
+ mime_type=mime_type,
+ metadata=metadata,
+ asset_ids=asset_ids,
+ asset_external_ids=asset_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ labels=labels,
+ geo_location=geo_location,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ source_created_time=source_created_time,
+ source_modified_time=source_modified_time,
+ uploaded_time=uploaded_time,
+ external_id_prefix=external_id_prefix,
+ directory_prefix=directory_prefix,
+ uploaded=uploaded,
+ limit=limit,
+ )
+ )
+
+ def create(
+ self, file_metadata: FileMetadata | FileMetadataWrite, overwrite: bool = False
+ ) -> tuple[FileMetadata, str]:
+ """
+ Create file without uploading content.
+
+ Args:
+ file_metadata (FileMetadata | FileMetadataWrite): File metadata for the file to create.
+ overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+
+ Returns:
+ tuple[FileMetadata, str]: Tuple containing the file metadata and upload url of the created file.
+
+ Examples:
+
+ Create a file:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import FileMetadataWrite
+ >>> client = CogniteClient()
+ >>> file_metadata = FileMetadataWrite(name="MyFile")
+ >>> res = client.files.create(file_metadata)
+ """
+ return run_sync(self.__async_client.files.create(file_metadata=file_metadata, overwrite=overwrite))
+
+ def retrieve(
+ self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> FileMetadata | None:
+ """
+ `Retrieve a single file metadata by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+ instance_id (NodeId | None): Instance ID
+
+ Returns:
+ FileMetadata | None: Requested file metadata or None if it does not exist.
+
+ Examples:
+
+ Get file metadata by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.files.retrieve(id=1)
+
+ Get file metadata by external id:
+
+ >>> res = client.files.retrieve(external_id="1")
+ """
+ return run_sync(self.__async_client.files.retrieve(id=id, external_id=external_id, instance_id=instance_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ instance_ids: Sequence[NodeId] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> FileMetadataList:
+ """
+ `Retrieve multiple file metadatas by id. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ instance_ids (Sequence[NodeId] | None): Instance IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ FileMetadataList: The requested file metadatas.
+
+ Examples:
+
+ Get file metadatas by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.files.retrieve_multiple(ids=[1, 2, 3])
+
+ Get file_metadatas by external id:
+
+ >>> res = client.files.retrieve_multiple(external_ids=["abc", "def"])
+ """
+ return run_sync(
+ self.__async_client.files.retrieve_multiple(
+ ids=ids, external_ids=external_ids, instance_ids=instance_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | None = None) -> int:
+ """
+ `Aggregate files `_
+
+ Args:
+ filter (FileMetadataFilter | dict[str, Any] | None): Filter on file metadata filter with exact match
+
+ Returns:
+ int: Count of files matching the filter.
+
+ Examples:
+
+ Get the count of files that have been uploaded:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> aggregate_uploaded = client.files.aggregate_count(filter={"uploaded": True})
+ """
+ return run_sync(self.__async_client.files.aggregate_count(filter=filter))
+
+ def delete(
+ self,
+ id: int | Sequence[int] | None = None,
+ external_id: str | SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> None:
+ """
+ `Delete files `_
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): str or list of str
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Examples:
+
+ Delete files by id or external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.files.delete(id=[1,2,3], external_id="3")
+ """
+ return run_sync(
+ self.__async_client.files.delete(id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ @overload
+ def update(
+ self,
+ item: FileMetadata | FileMetadataWrite | FileMetadataUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> FileMetadata: ...
+
+ @overload
+ def update(
+ self,
+ item: Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> FileMetadataList: ...
+
+ def update(
+ self,
+ item: FileMetadata
+ | FileMetadataWrite
+ | FileMetadataUpdate
+ | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> FileMetadata | FileMetadataList:
+ """
+ `Update files `_
+ Currently, a full replacement of labels on a file is not supported (only partial add/remove updates). See the example below on how to perform partial labels update.
+
+ Args:
+ item (FileMetadata | FileMetadataWrite | FileMetadataUpdate | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate]): file(s) to update.
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ FileMetadata | FileMetadataList: The updated files.
+
+ Examples:
+
+ Update file metadata that you have fetched. This will perform a full update of the file metadata:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> file_metadata = client.files.retrieve(id=1)
+ >>> file_metadata.description = "New description"
+ >>> res = client.files.update(file_metadata)
+
+ Perform a partial update on file metadata, updating the source and adding a new field to metadata:
+
+ >>> from cognite.client.data_classes import FileMetadataUpdate
+ >>> my_update = FileMetadataUpdate(id=1).source.set("new source").metadata.add({"key": "value"})
+ >>> res = client.files.update(my_update)
+
+ Attach labels to a files:
+
+ >>> from cognite.client.data_classes import FileMetadataUpdate
+ >>> my_update = FileMetadataUpdate(id=1).labels.add(["PUMP", "VERIFIED"])
+ >>> res = client.files.update(my_update)
+
+ Detach a single label from a file:
+
+ >>> from cognite.client.data_classes import FileMetadataUpdate
+ >>> my_update = FileMetadataUpdate(id=1).labels.remove("PUMP")
+ >>> res = client.files.update(my_update)
+ """
+ return run_sync(self.__async_client.files.update(item=item, mode=mode))
+
+ def search(
+ self,
+ name: str | None = None,
+ filter: FileMetadataFilter | dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ ) -> FileMetadataList:
+ """
+ `Search for files. `_
+ Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
+
+ Args:
+ name (str | None): Prefix and fuzzy search on name.
+ filter (FileMetadataFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
+ limit (int): Max number of results to return.
+
+ Returns:
+ FileMetadataList: List of requested files metadata.
+
+ Examples:
+
+ Search for a file:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.files.search(name="some name")
+
+ Search for an asset with an attached label:
+
+ >>> my_label_filter = LabelFilter(contains_all=["WELL LOG"])
+ >>> res = client.assets.search(name="xyz",filter=FileMetadataFilter(labels=my_label_filter))
+ """
+ return run_sync(self.__async_client.files.search(name=name, filter=filter, limit=limit))
+
+ def upload_content(
+ self, path: Path, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> FileMetadata:
+ """
+ `Upload a file content `_
+
+ Args:
+ path (Path): Path to the file you wish to upload.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ instance_id (NodeId | None): Instance ID of the file.
+ Returns:
+ FileMetadata: No description.
+ """
+ return run_sync(
+ self.__async_client.files.upload_content(path=path, external_id=external_id, instance_id=instance_id)
+ )
+
+ def upload(
+ self,
+ path: Path,
+ external_id: str | None = None,
+ name: str | None = None,
+ source: str | None = None,
+ mime_type: str | None = None,
+ metadata: dict[str, str] | None = None,
+ directory: str | None = None,
+ asset_ids: Sequence[int] | None = None,
+ source_created_time: int | None = None,
+ source_modified_time: int | None = None,
+ data_set_id: int | None = None,
+ labels: Sequence[Label] | None = None,
+ geo_location: GeoLocation | None = None,
+ security_categories: Sequence[int] | None = None,
+ recursive: bool = False,
+ overwrite: bool = False,
+ ) -> FileMetadata | FileMetadataList:
+ """
+ `Upload a file `_
+
+ Args:
+ path (Path): Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ name (str | None): Name of the file.
+ source (str | None): The source of the file.
+ mime_type (str | None): File type. E.g. text/plain, application/pdf, ...
+ metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
+ directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids (Sequence[int] | None): No description.
+ source_created_time (int | None): The timestamp for when the file was originally created in the source system.
+ source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
+ data_set_id (int | None): ID of the data set.
+ labels (Sequence[Label] | None): A list of the labels associated with this resource item.
+ geo_location (GeoLocation | None): The geographic metadata of the file.
+ security_categories (Sequence[int] | None): Security categories to attach to this file.
+ recursive (bool): If path is a directory, upload all contained files recursively.
+ overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+
+ Returns:
+ FileMetadata | FileMetadataList: The file metadata of the uploaded file(s).
+
+ Examples:
+
+ Upload a file in a given path:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> from pathlib import Path
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_file = Path("/path/to/file.txt")
+ >>> res = client.files.upload(my_file, name="my_file")
+
+ If name is omitted, this method will use the name of the file (file.txt in the example above):
+
+ >>> res = client.files.upload(my_file)
+
+ You can also upload all files in a directory by setting path to the path of a directory
+ (filenames will be automatically used for `name`):
+
+ >>> upload_dir = Path("/path/to/my/directory")
+ >>> res = client.files.upload(upload_dir)
+
+ You can also upload all files in a directory recursively by passing `recursive=True`:
+
+ >>> res = client.files.upload(upload_dir, recursive=True)
+
+ Upload a file with a label:
+
+ >>> from cognite.client.data_classes import Label
+ >>> res = client.files.upload(my_file, name="my_file", labels=[Label(external_id="WELL LOG")])
+
+ Upload a file with a geo_location:
+
+ >>> from cognite.client.data_classes import GeoLocation, Geometry
+ >>> geometry = Geometry(type="LineString", coordinates=[[30, 10], [10, 30], [40, 40]])
+ >>> res = client.files.upload(my_file, geo_location=GeoLocation(type="Feature", geometry=geometry))
+ """
+ return run_sync(
+ self.__async_client.files.upload(
+ path=path,
+ external_id=external_id,
+ name=name,
+ source=source,
+ mime_type=mime_type,
+ metadata=metadata,
+ directory=directory,
+ asset_ids=asset_ids,
+ source_created_time=source_created_time,
+ source_modified_time=source_modified_time,
+ data_set_id=data_set_id,
+ labels=labels,
+ geo_location=geo_location,
+ security_categories=security_categories,
+ recursive=recursive,
+ overwrite=overwrite,
+ )
+ )
+
+ def upload_content_bytes(
+ self, content: str | bytes | BinaryIO, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> FileMetadata:
+ """
+ Upload bytes or string (UTF-8 assumed).
+
+ Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_content_session`.
+
+ Args:
+ content (str | bytes | BinaryIO): The content to upload.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ instance_id (NodeId | None): Instance ID of the file.
+
+ Returns:
+ FileMetadata: No description.
+
+ Examples:
+
+ Finish a file creation by uploading the content using external_id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.files.upload_content_bytes(
+ ... b"some content", external_id="my_file_xid")
+
+ ...or by using instance_id:
+
+ >>> from cognite.client.data_classes.data_modeling import NodeId
+ >>> res = client.files.upload_content_bytes(
+ ... b"some content", instance_id=NodeId("my-space", "my_file_xid"))
+ """
+ return run_sync(
+ self.__async_client.files.upload_content_bytes(
+ content=content, external_id=external_id, instance_id=instance_id
+ )
+ )
+
+ def upload_bytes(
+ self,
+ content: str | bytes | BinaryIO | AsyncIterator[bytes],
+ name: str,
+ external_id: str | None = None,
+ source: str | None = None,
+ mime_type: str | None = None,
+ metadata: dict[str, str] | None = None,
+ directory: str | None = None,
+ asset_ids: Sequence[int] | None = None,
+ data_set_id: int | None = None,
+ labels: Sequence[Label] | None = None,
+ geo_location: GeoLocation | None = None,
+ source_created_time: int | None = None,
+ source_modified_time: int | None = None,
+ security_categories: Sequence[int] | None = None,
+ overwrite: bool = False,
+ ) -> FileMetadata:
+ """
+ Upload bytes or string.
+
+ You can also pass a file handle to 'content'. The file must be opened in binary mode or an error will be raised.
+
+ Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_session`.
+
+ Args:
+ content (str | bytes | BinaryIO | AsyncIterator[bytes]): The content to upload.
+ name (str): Name of the file.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ source (str | None): The source of the file.
+ mime_type (str | None): File type. E.g. text/plain, application/pdf,...
+ metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
+ directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids (Sequence[int] | None): No description.
+ data_set_id (int | None): Id of the data set.
+ labels (Sequence[Label] | None): A list of the labels associated with this resource item.
+ geo_location (GeoLocation | None): The geographic metadata of the file.
+ source_created_time (int | None): The timestamp for when the file was originally created in the source system.
+ source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
+ security_categories (Sequence[int] | None): Security categories to attach to this file.
+ overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+
+ Returns:
+ FileMetadata: The metadata of the uploaded file.
+
+ Examples:
+
+ Upload a file from memory:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.files.upload_bytes(b"some content", name="my_file", asset_ids=[1,2,3])
+ """
+ return run_sync(
+ self.__async_client.files.upload_bytes(
+ content=content,
+ name=name,
+ external_id=external_id,
+ source=source,
+ mime_type=mime_type,
+ metadata=metadata,
+ directory=directory,
+ asset_ids=asset_ids,
+ data_set_id=data_set_id,
+ labels=labels,
+ geo_location=geo_location,
+ source_created_time=source_created_time,
+ source_modified_time=source_modified_time,
+ security_categories=security_categories,
+ overwrite=overwrite,
+ )
+ )
+
+ def multipart_upload_session(
+ self,
+ name: str,
+ parts: int,
+ external_id: str | None = None,
+ source: str | None = None,
+ mime_type: str | None = None,
+ metadata: dict[str, str] | None = None,
+ directory: str | None = None,
+ asset_ids: Sequence[int] | None = None,
+ data_set_id: int | None = None,
+ labels: Sequence[Label] | None = None,
+ geo_location: GeoLocation | None = None,
+ source_created_time: int | None = None,
+ source_modified_time: int | None = None,
+ security_categories: Sequence[int] | None = None,
+ overwrite: bool = False,
+ ) -> FileMultipartUploadSession:
+ """
+ Begin uploading a file in multiple parts. This allows uploading files larger than 5GiB.
+ Note that the size of each part may not exceed 4000MiB, and the size of each part except the last
+ must be greater than 5MiB.
+
+ The file chunks may be uploaded in any order, and in parallel, but the client must ensure that
+ the parts are stored in the correct order by uploading each chunk to the correct upload URL.
+
+ This returns a context manager you must enter (using the `with` keyword), then call `upload_part`
+ for each part before exiting.
+
+ Args:
+ name (str): Name of the file.
+ parts (int): The number of parts to upload, must be between 1 and 250.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ source (str | None): The source of the file.
+ mime_type (str | None): File type. E.g. text/plain, application/pdf,...
+ metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
+ directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids (Sequence[int] | None): No description.
+ data_set_id (int | None): Id of the data set.
+ labels (Sequence[Label] | None): A list of the labels associated with this resource item.
+ geo_location (GeoLocation | None): The geographic metadata of the file.
+ source_created_time (int | None): The timestamp for when the file was originally created in the source system.
+ source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
+ security_categories (Sequence[int] | None): Security categories to attach to this file.
+ overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+
+ Returns:
+ FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
+
+ Examples:
+
+ Upload binary data in two chunks:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> with client.files.multipart_upload_session("my_file.txt", parts=2) as session:
+ ... # Note that the minimum chunk size is 5 MiB.
+ ... session.upload_part(0, "hello" * 1_200_000)
+ ... session.upload_part(1, " world")
+ """
+ return run_sync(
+ self.__async_client.files.multipart_upload_session(
+ name=name,
+ parts=parts,
+ external_id=external_id,
+ source=source,
+ mime_type=mime_type,
+ metadata=metadata,
+ directory=directory,
+ asset_ids=asset_ids,
+ data_set_id=data_set_id,
+ labels=labels,
+ geo_location=geo_location,
+ source_created_time=source_created_time,
+ source_modified_time=source_modified_time,
+ security_categories=security_categories,
+ overwrite=overwrite,
+ )
+ )
+
+ def multipart_upload_content_session(
+ self, parts: int, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> FileMultipartUploadSession:
+ """
+ Begin uploading a file in multiple parts whose metadata is already created in CDF. This allows uploading files larger than 5GiB.
+ Note that the size of each part may not exceed 4000MiB, and the size of each part except the last
+ must be greater than 5MiB.
+
+ The file chunks may be uploaded in any order, and in parallel, but the client must ensure that
+ the parts are stored in the correct order by uploading each chunk to the correct upload URL.
+
+ This returns a context manager (that also supports async) you must enter (using the `with` keyword, or `async with`), then call `upload_part`
+ for each part before exiting, which will automatically finalize the multipart upload.
+
+ Args:
+ parts (int): The number of parts to upload, must be between 1 and 250.
+ external_id (str | None): The external ID provided by the client. Must be unique within the project.
+ instance_id (NodeId | None): Instance ID of the file.
+
+ Returns:
+ FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
+
+ Examples:
+
+ Upload binary data in two chunks:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> with client.files.multipart_upload_content_session(external_id="external-id", parts=2) as session:
+ ... # Note that the minimum chunk size is 5 MiB.
+ ... session.upload_part(0, "hello" * 1_200_000)
+ ... session.upload_part(1, " world")
+ """
+ return run_sync(
+ self.__async_client.files.multipart_upload_content_session(
+ parts=parts, external_id=external_id, instance_id=instance_id
+ )
+ )
+
+ def retrieve_download_urls(
+ self,
+ id: int | Sequence[int] | None = None,
+ external_id: str | SequenceNotStr[str] | None = None,
+ instance_id: NodeId | Sequence[NodeId] | None = None,
+ extended_expiration: bool = False,
+ ) -> dict[int | str | NodeId, str]:
+ """
+ Get download links by id or external id
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids.
+ external_id (str | SequenceNotStr[str] | None): External id or list of external ids.
+ instance_id (NodeId | Sequence[NodeId] | None): Instance id or list of instance ids.
+ extended_expiration (bool): Extend expiration time of download url to 1 hour. Defaults to false.
+
+ Returns:
+ dict[int | str | NodeId, str]: Dictionary containing download urls.
+ """
+ return run_sync(
+ self.__async_client.files.retrieve_download_urls(
+ id=id, external_id=external_id, instance_id=instance_id, extended_expiration=extended_expiration
+ )
+ )
+
+ def download(
+ self,
+ directory: str | Path,
+ id: int | Sequence[int] | None = None,
+ external_id: str | SequenceNotStr[str] | None = None,
+ instance_id: NodeId | Sequence[NodeId] | None = None,
+ keep_directory_structure: bool = False,
+ resolve_duplicate_file_names: bool = False,
+ ) -> None:
+ """
+ `Download files by id or external id. `_
+
+ This method will stream all files to disk, never keeping more than 2MB in memory per worker.
+ The files will be stored in the provided directory using the file name retrieved from the file metadata in CDF.
+ You can also choose to keep the directory structure from CDF so that the files will be stored in subdirectories
+ matching the directory attribute on the files. When missing, the (root) directory is used.
+ By default, duplicate file names to the same local folder will be resolved by only keeping one of the files.
+ You can choose to resolve this by appending a number to the file name using the resolve_duplicate_file_names argument.
+
+ Warning:
+ If you are downloading several files at once, be aware that file name collisions lead to all-but-one of
+ the files missing. A warning is issued when this happens, listing the affected files.
+
+ Args:
+ directory (str | Path): Directory to download the file(s) to.
+ id (int | Sequence[int] | None): Id or list of ids
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids.
+ instance_id (NodeId | Sequence[NodeId] | None): Instance ID or list of instance ids.
+ keep_directory_structure (bool): Whether or not to keep the directory hierarchy in CDF,
+ creating subdirectories as needed below the given directory.
+ resolve_duplicate_file_names (bool): Whether or not to resolve duplicate file names by appending a number on duplicate file names
+
+ Examples:
+
+ Download files by id and external id into directory 'my_directory':
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.files.download(directory="my_directory", id=[1,2,3], external_id=["abc", "def"])
+
+ Download files by id to the current directory:
+
+ >>> client.files.download(directory=".", id=[1,2,3])
+ """
+ return run_sync(
+ self.__async_client.files.download(
+ directory=directory,
+ id=id,
+ external_id=external_id,
+ instance_id=instance_id,
+ keep_directory_structure=keep_directory_structure,
+ resolve_duplicate_file_names=resolve_duplicate_file_names,
+ )
+ )
+
+ def download_to_path(
+ self, path: Path, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> None:
+ """
+ Download a file to a specific target.
+
+ Args:
+ path (Path): The path in which to place the file.
+ id (int | None): Id of of the file to download.
+ external_id (str | None): External id of the file to download.
+ instance_id (NodeId | None): Instance id of the file to download.
+
+ Examples:
+
+ Download a file by id:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.files.download_to_path("~/mydir/my_downloaded_file.txt", id=123)
+ """
+ return run_sync(
+ self.__async_client.files.download_to_path(
+ path=path, id=id, external_id=external_id, instance_id=instance_id
+ )
+ )
+
+ def download_bytes(
+ self, id: int | None = None, external_id: str | None = None, instance_id: NodeId | None = None
+ ) -> bytes:
+ """
+ Download a file as bytes.
+
+ Args:
+ id (int | None): Id of the file
+ external_id (str | None): External id of the file
+ instance_id (NodeId | None): Instance id of the file
+
+ Examples:
+
+ Download a file's content into memory:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> file_content = client.files.download_bytes(id=1)
+
+ Returns:
+ bytes: The file in binary format
+ """
+ return run_sync(
+ self.__async_client.files.download_bytes(id=id, external_id=external_id, instance_id=instance_id)
+ )
+
+ def list(
+ self,
+ name: str | None = None,
+ mime_type: str | None = None,
+ metadata: dict[str, str] | None = None,
+ asset_ids: Sequence[int] | None = None,
+ asset_external_ids: SequenceNotStr[str] | None = None,
+ asset_subtree_ids: int | Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ labels: LabelFilter | None = None,
+ geo_location: GeoLocationFilter | None = None,
+ source: str | None = None,
+ created_time: dict[str, Any] | TimestampRange | None = None,
+ last_updated_time: dict[str, Any] | TimestampRange | None = None,
+ source_created_time: dict[str, Any] | TimestampRange | None = None,
+ source_modified_time: dict[str, Any] | TimestampRange | None = None,
+ uploaded_time: dict[str, Any] | TimestampRange | None = None,
+ external_id_prefix: str | None = None,
+ directory_prefix: str | None = None,
+ uploaded: bool | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ partitions: int | None = None,
+ ) -> FileMetadataList:
+ """
+ `List files `_
+
+ Args:
+ name (str | None): Name of the file.
+ mime_type (str | None): File type. E.g. text/plain, application/pdf, ..
+ metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
+ asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs.
+ asset_external_ids (SequenceNotStr[str] | None): No description.
+ asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids.
+ labels (LabelFilter | None): Return only the files matching the specified label filter(s).
+ geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
+ source (str | None): The source of this event.
+ created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range.
+ source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range.
+ uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
+ external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
+ directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client.
+ uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
+ limit (int | None): Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+
+ Returns:
+ FileMetadataList: The requested files.
+
+ Examples:
+
+ List files metadata and filter on external id prefix:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> file_list = client.files.list(limit=5, external_id_prefix="prefix")
+
+ Iterate over files metadata, one-by-one:
+
+ >>> for file_metadata in client.files():
+ ... file_metadata # do something with the file metadata
+
+ Iterate over chunks of files metadata to reduce memory load:
+
+ >>> for file_list in client.files(chunk_size=2500):
+ ... file_list # do something with the files
+
+ Filter files based on labels:
+
+ >>> from cognite.client.data_classes import LabelFilter
+ >>> my_label_filter = LabelFilter(contains_all=["WELL LOG", "VERIFIED"])
+ >>> file_list = client.files.list(labels=my_label_filter)
+
+ Filter files based on geoLocation:
+
+ >>> from cognite.client.data_classes import GeoLocationFilter, GeometryFilter
+ >>> my_geo_location_filter = GeoLocationFilter(relation="intersects", shape=GeometryFilter(type="Point", coordinates=[35,10]))
+ >>> file_list = client.files.list(geo_location=my_geo_location_filter)
+ """
+ return run_sync(
+ self.__async_client.files.list(
+ name=name,
+ mime_type=mime_type,
+ metadata=metadata,
+ asset_ids=asset_ids,
+ asset_external_ids=asset_external_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ labels=labels,
+ geo_location=geo_location,
+ source=source,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ source_created_time=source_created_time,
+ source_modified_time=source_modified_time,
+ uploaded_time=uploaded_time,
+ external_id_prefix=external_id_prefix,
+ directory_prefix=directory_prefix,
+ uploaded=uploaded,
+ limit=limit,
+ partitions=partitions,
+ )
+ )
diff --git a/cognite/client/_sync_api/functions/__init__.py b/cognite/client/_sync_api/functions/__init__.py
new file mode 100644
index 0000000000..5b8b53e461
--- /dev/null
+++ b/cognite/client/_sync_api/functions/__init__.py
@@ -0,0 +1,448 @@
+"""
+===============================================================================
+f9f881cace1e6f653087b987c4df3c92
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api.functions.calls import SyncFunctionCallsAPI
+from cognite.client._sync_api.functions.schedules import SyncFunctionSchedulesAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ Function,
+ FunctionCall,
+ FunctionList,
+ FunctionsLimits,
+ TimestampRange,
+)
+from cognite.client.data_classes.functions import (
+ HANDLER_FILE_NAME,
+ FunctionHandle,
+ FunctionsStatus,
+ FunctionStatus,
+ FunctionWrite,
+ RunTime,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncFunctionsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.calls = SyncFunctionCallsAPI(async_client)
+ self.schedules = SyncFunctionSchedulesAPI(async_client)
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Function]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[FunctionList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ owner: str | None = None,
+ file_id: int | None = None,
+ status: FunctionStatus | None = None,
+ external_id_prefix: str | None = None,
+ created_time: dict[Literal["min", "max"], int] | TimestampRange | None = None,
+ metadata: dict[str, str] | None = None,
+ limit: int | None = None,
+ ) -> Iterator[Function | FunctionList]:
+ """
+ Iterate over functions.
+
+ Args:
+ chunk_size (int | None): Number of functions to yield per chunk. Defaults to yielding functions one by one.
+ name (str | None): The name of the function.
+ owner (str | None): Owner of the function.
+ file_id (int | None): The file ID of the zip-file used to create the function.
+ status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
+ external_id_prefix (str | None): External ID prefix to filter on.
+ created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ metadata (dict[str, str] | None): No description.
+ limit (int | None): Maximum number of functions to return. Defaults to yielding all functions.
+
+ Yields:
+ Function | FunctionList: An iterator over functions.
+ """
+ yield from SyncIterator(
+ self.__async_client.functions(
+ chunk_size=chunk_size,
+ name=name,
+ owner=owner,
+ file_id=file_id,
+ status=status,
+ external_id_prefix=external_id_prefix,
+ created_time=created_time,
+ metadata=metadata,
+ limit=limit,
+ )
+ )
+
+ def create(
+ self,
+ name: str | FunctionWrite,
+ folder: str | None = None,
+ file_id: int | None = None,
+ function_path: str = HANDLER_FILE_NAME,
+ function_handle: FunctionHandle | None = None,
+ external_id: str | None = None,
+ description: str | None = None,
+ owner: str | None = None,
+ secrets: dict[str, str] | None = None,
+ env_vars: dict[str, str] | None = None,
+ cpu: float | None = None,
+ memory: float | None = None,
+ runtime: RunTime | None = None,
+ metadata: dict[str, str] | None = None,
+ index_url: str | None = None,
+ extra_index_urls: list[str] | None = None,
+ skip_folder_validation: bool = False,
+ data_set_id: int | None = None,
+ ) -> Function:
+ """
+ `When creating a function, `_
+ the source code can be specified in one of three ways:
+
+ - Via the `folder` argument, which is the path to the folder where the source code is located. `function_path` must point to a python file in the folder within which a function named `handle` must be defined.
+ - Via the `file_id` argument, which is the ID of a zip-file uploaded to the files API. `function_path` must point to a python file in the zipped folder within which a function named `handle` must be defined.
+ - Via the `function_handle` argument, which is a reference to a function object, which must be named `handle`.
+
+ The function named `handle` is the entrypoint of the created function. Valid arguments to `handle` are `data`, `client`, `secrets` and `function_call_info`:
+ - If the user calls the function with input data, this is passed through the `data` argument.
+ - If the user gives one or more secrets when creating the function, these are passed through the `secrets` argument.
+ - Data about the function call can be accessed via the argument `function_call_info`, which is a dictionary with keys `function_id`, `call_id`, and, if the call is scheduled, `schedule_id` and `scheduled_time`.
+
+ By default, the function is deployed with the latest version of cognite-sdk. If a specific version is desired, it can be specified either in a requirements.txt file when deploying via the `folder` argument or between `[requirements]` tags when deploying via the `function_handle` argument (see example below).
+
+ For help with troubleshooting, please see `this page. `_
+
+ Args:
+ name (str | FunctionWrite): The name of the function or a FunctionWrite object. If a FunctionWrite
+ object is passed, all other arguments are ignored.
+ folder (str | None): Path to the folder where the function source code is located.
+ file_id (int | None): File ID of the code uploaded to the Files API.
+ function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format.
+ function_handle (FunctionHandle | None): Reference to a function object, which must be named `handle`.
+ external_id (str | None): External id of the function.
+ description (str | None): Description of the function.
+ owner (str | None): Owner of this function. Typically used to know who created it.
+ secrets (dict[str, str] | None): Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique.
+ env_vars (dict[str, str] | None): Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables.
+ cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
+ memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
+ runtime (RunTime | None): The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series.
+ metadata (dict[str, str] | None): Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes.
+ index_url (str | None): Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_
+ extra_index_urls (list[str] | None): Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_
+ skip_folder_validation (bool): When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False.
+ data_set_id (int | None): Data set to upload the function code to. Note: Does not affect the function itself.
+
+ Returns:
+ Function: The created function.
+
+ Examples:
+
+ Create function with source code in folder:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> function = client.functions.create(
+ ... name="myfunction",
+ ... folder="path/to/code",
+ ... function_path="path/to/function.py")
+
+ Create function with file_id from already uploaded source code:
+
+ >>> function = client.functions.create(
+ ... name="myfunction", file_id=123, function_path="path/to/function.py")
+
+ Create function with predefined function object named `handle`:
+
+ >>> function = client.functions.create(name="myfunction", function_handle=handle)
+
+ Create function with predefined function object named `handle` with dependencies:
+
+ >>> def handle(client, data):
+ >>> '''
+ >>> [requirements]
+ >>> numpy
+ >>> [/requirements]
+ >>> '''
+ >>> pass
+ >>>
+ >>> function = client.functions.create(name="myfunction", function_handle=handle)
+
+ .. note:
+ When using a predefined function object, you can list dependencies between the tags `[requirements]` and `[/requirements]` in the function's docstring.
+ The dependencies will be parsed and validated in accordance with requirement format specified in `PEP 508 `_.
+ """
+ return run_sync(
+ self.__async_client.functions.create(
+ name=name,
+ folder=folder,
+ file_id=file_id,
+ function_path=function_path,
+ function_handle=function_handle,
+ external_id=external_id,
+ description=description,
+ owner=owner,
+ secrets=secrets,
+ env_vars=env_vars,
+ cpu=cpu,
+ memory=memory,
+ runtime=runtime,
+ metadata=metadata,
+ index_url=index_url,
+ extra_index_urls=extra_index_urls,
+ skip_folder_validation=skip_folder_validation,
+ data_set_id=data_set_id,
+ )
+ )
+
+ def delete(
+ self, id: int | Sequence[int] | None = None, external_id: str | SequenceNotStr[str] | None = None
+ ) -> None:
+ """
+ `Delete one or more functions. `_
+
+ Args:
+ id (int | Sequence[int] | None): Id or list of ids.
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids.
+
+ Example:
+
+ Delete functions by id or external id::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.functions.delete(id=[1,2,3], external_id="function3")
+ """
+ return run_sync(self.__async_client.functions.delete(id=id, external_id=external_id))
+
+ def list(
+ self,
+ name: str | None = None,
+ owner: str | None = None,
+ file_id: int | None = None,
+ status: FunctionStatus | None = None,
+ external_id_prefix: str | None = None,
+ created_time: dict[Literal["min", "max"], int] | TimestampRange | None = None,
+ metadata: dict[str, str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> FunctionList:
+ """
+ `List all functions. `_
+
+ Args:
+ name (str | None): The name of the function.
+ owner (str | None): Owner of the function.
+ file_id (int | None): The file ID of the zip-file used to create the function.
+ status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
+ external_id_prefix (str | None): External ID prefix to filter on.
+ created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes.
+ limit (int | None): Maximum number of functions to return. Pass in -1, float('inf') or None to list all.
+
+ Returns:
+ FunctionList: List of functions
+
+ Example:
+
+ List functions::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> functions_list = client.functions.list()
+ """
+ return run_sync(
+ self.__async_client.functions.list(
+ name=name,
+ owner=owner,
+ file_id=file_id,
+ status=status,
+ external_id_prefix=external_id_prefix,
+ created_time=created_time,
+ metadata=metadata,
+ limit=limit,
+ )
+ )
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Function | None:
+ """
+ `Retrieve a single function by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ Function | None: Requested function or None if it does not exist.
+
+ Examples:
+
+ Get function by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.functions.retrieve(id=1)
+
+ Get function by external id:
+
+ >>> res = client.functions.retrieve(external_id="abc")
+ """
+ return run_sync(self.__async_client.functions.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> FunctionList:
+ """
+ `Retrieve multiple functions by id. `_
+
+ Args:
+ ids (Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ FunctionList: The requested functions.
+
+ Examples:
+
+ Get function by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.functions.retrieve_multiple(ids=[1, 2, 3])
+
+ Get functions by external id:
+
+ >>> res = client.functions.retrieve_multiple(external_ids=["func1", "func2"])
+ """
+ return run_sync(
+ self.__async_client.functions.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def call(
+ self,
+ id: int | None = None,
+ external_id: str | None = None,
+ data: dict[str, object] | None = None,
+ wait: bool = True,
+ nonce: str | None = None,
+ ) -> FunctionCall:
+ """
+ `Call a function by its ID or external ID. `_.
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+ data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.'
+ wait (bool): Wait until the function call is finished. Defaults to True.
+ nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials.
+
+ Tip:
+ You can create a session via the Sessions API, using the client.iam.session.create() method.
+
+ Returns:
+ FunctionCall: A function call object.
+
+ Examples:
+
+ Call a function by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> call = client.functions.call(id=1)
+
+ Call a function directly on the `Function` object:
+
+ >>> func = client.functions.retrieve(id=1)
+ >>> call = func.call()
+ """
+ return run_sync(
+ self.__async_client.functions.call(id=id, external_id=external_id, data=data, wait=wait, nonce=nonce)
+ )
+
+ def limits(self) -> FunctionsLimits:
+ """
+ `Get service limits. `_.
+
+ Returns:
+ FunctionsLimits: A function limits object.
+
+ Examples:
+
+ Call a function by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> limits = client.functions.limits()
+ """
+ return run_sync(self.__async_client.functions.limits())
+
+ def activate(self) -> FunctionsStatus:
+ """
+ `Activate functions for the Project. `_.
+
+ Note:
+ May take some time to take effect (hours).
+
+ Returns:
+ FunctionsStatus: A function activation status.
+
+ Examples:
+
+ Call activate:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> status = client.functions.activate()
+ """
+ return run_sync(self.__async_client.functions.activate())
+
+ def status(self) -> FunctionsStatus:
+ """
+ `Functions activation status for the Project. `_.
+
+ Returns:
+ FunctionsStatus: A function activation status.
+
+ Examples:
+
+ Call status:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> status = client.functions.status()
+ """
+ return run_sync(self.__async_client.functions.status())
diff --git a/cognite/client/_sync_api/functions/calls.py b/cognite/client/_sync_api/functions/calls.py
new file mode 100644
index 0000000000..1c3a526a31
--- /dev/null
+++ b/cognite/client/_sync_api/functions/calls.py
@@ -0,0 +1,174 @@
+"""
+===============================================================================
+7fed9b140979eb9672e692ff65c9dba7
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import FunctionCall, FunctionCallList, FunctionCallLog
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncFunctionCallsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def list(
+ self,
+ function_id: int | None = None,
+ function_external_id: str | None = None,
+ status: str | None = None,
+ schedule_id: int | None = None,
+ start_time: dict[str, int] | None = None,
+ end_time: dict[str, int] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> FunctionCallList:
+ """
+ `List all calls associated with a specific function id. `_ Either function_id or function_external_id must be specified.
+
+ Args:
+ function_id (int | None): ID of the function on which the calls were made.
+ function_external_id (str | None): External ID of the function on which the calls were made.
+ status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"].
+ schedule_id (int | None): Schedule id from which the call belongs (if any).
+ start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls.
+
+ Returns:
+ FunctionCallList: List of function calls
+
+ Examples:
+
+ List function calls:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> calls = client.functions.calls.list(function_id=1)
+
+ List function calls directly on a function object:
+
+ >>> func = client.functions.retrieve(id=1)
+ >>> calls = func.list_calls()
+ """
+ return run_sync(
+ self.__async_client.functions.calls.list(
+ function_id=function_id,
+ function_external_id=function_external_id,
+ status=status,
+ schedule_id=schedule_id,
+ start_time=start_time,
+ end_time=end_time,
+ limit=limit,
+ )
+ )
+
+ def retrieve(
+ self, call_id: int, function_id: int | None = None, function_external_id: str | None = None
+ ) -> FunctionCall | None:
+ """
+ `Retrieve a single function call by id. `_
+
+ Args:
+ call_id (int): ID of the call.
+ function_id (int | None): ID of the function on which the call was made.
+ function_external_id (str | None): External ID of the function on which the call was made.
+
+ Returns:
+ FunctionCall | None: Requested function call or None if either call ID or function identifier is not found.
+
+ Examples:
+
+ Retrieve single function call by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> call = client.functions.calls.retrieve(call_id=2, function_id=1)
+
+ Retrieve function call directly on a function object:
+
+ >>> func = client.functions.retrieve(id=1)
+ >>> call = func.retrieve_call(id=2)
+ """
+ return run_sync(
+ self.__async_client.functions.calls.retrieve(
+ call_id=call_id, function_id=function_id, function_external_id=function_external_id
+ )
+ )
+
+ def get_response(
+ self, call_id: int, function_id: int | None = None, function_external_id: str | None = None
+ ) -> dict[str, object] | None:
+ """
+ `Retrieve the response from a function call. `_
+
+ Args:
+ call_id (int): ID of the call.
+ function_id (int | None): ID of the function on which the call was made.
+ function_external_id (str | None): External ID of the function on which the call was made.
+
+ Returns:
+ dict[str, object] | None: Response from the function call.
+
+ Examples:
+
+ Retrieve function call response by call ID:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> response = client.functions.calls.get_response(call_id=2, function_id=1)
+
+ Retrieve function call response directly on a call object:
+
+ >>> call = client.functions.calls.retrieve(call_id=2, function_id=1)
+ >>> response = call.get_response()
+ """
+ return run_sync(
+ self.__async_client.functions.calls.get_response(
+ call_id=call_id, function_id=function_id, function_external_id=function_external_id
+ )
+ )
+
+ def get_logs(
+ self, call_id: int, function_id: int | None = None, function_external_id: str | None = None
+ ) -> FunctionCallLog:
+ """
+ `Retrieve logs for function call. `_
+
+ Args:
+ call_id (int): ID of the call.
+ function_id (int | None): ID of the function on which the call was made.
+ function_external_id (str | None): External ID of the function on which the call was made.
+
+ Returns:
+ FunctionCallLog: Log for the function call.
+
+ Examples:
+
+ Retrieve function call logs by call ID:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> logs = client.functions.calls.get_logs(call_id=2, function_id=1)
+
+ Retrieve function call logs directly on a call object:
+
+ >>> call = client.functions.calls.retrieve(call_id=2, function_id=1)
+ >>> logs = call.get_logs()
+ """
+ return run_sync(
+ self.__async_client.functions.calls.get_logs(
+ call_id=call_id, function_id=function_id, function_external_id=function_external_id
+ )
+ )
diff --git a/cognite/client/_sync_api/functions/schedules.py b/cognite/client/_sync_api/functions/schedules.py
new file mode 100644
index 0000000000..f6da07b012
--- /dev/null
+++ b/cognite/client/_sync_api/functions/schedules.py
@@ -0,0 +1,286 @@
+"""
+===============================================================================
+e069532ec367104abca73d4fad0704ab
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ ClientCredentials,
+ FunctionSchedule,
+ FunctionSchedulesList,
+ TimestampRange,
+)
+from cognite.client.data_classes.functions import FunctionScheduleWrite
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+
+
+class SyncFunctionSchedulesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[FunctionSchedule]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[FunctionSchedulesList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ function_id: int | None = None,
+ function_external_id: str | None = None,
+ created_time: dict[str, int] | TimestampRange | None = None,
+ cron_expression: str | None = None,
+ limit: int | None = None,
+ ) -> Iterator[FunctionSchedule | FunctionSchedulesList]:
+ """
+ Iterate over function schedules
+
+ Args:
+ chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time.
+ name (str | None): Name of the function schedule.
+ function_id (int | None): ID of the function the schedules are linked to.
+ function_external_id (str | None): External ID of the function the schedules are linked to.
+ created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ cron_expression (str | None): Cron expression.
+ limit (int | None): Maximum schedules to return. Defaults to return all schedules.
+
+ Yields:
+ FunctionSchedule | FunctionSchedulesList: Function schedules.
+ """
+ yield from SyncIterator(
+ self.__async_client.functions.schedules(
+ chunk_size=chunk_size,
+ name=name,
+ function_id=function_id,
+ function_external_id=function_external_id,
+ created_time=created_time,
+ cron_expression=cron_expression,
+ limit=limit,
+ )
+ )
+
+ @overload
+ def retrieve(self, id: int, ignore_unknown_ids: bool = False) -> FunctionSchedule | None: ...
+
+ @overload
+ def retrieve(self, id: Sequence[int], ignore_unknown_ids: bool = False) -> FunctionSchedulesList: ...
+
+ def retrieve(
+ self, id: int | Sequence[int], ignore_unknown_ids: bool = False
+ ) -> FunctionSchedule | None | FunctionSchedulesList:
+ """
+ `Retrieve a single function schedule by id. `_
+
+ Args:
+ id (int | Sequence[int]): Schedule ID
+ ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception.
+
+ Returns:
+ FunctionSchedule | None | FunctionSchedulesList: Requested function schedule or None if not found.
+
+ Examples:
+
+ Get function schedule by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.functions.schedules.retrieve(id=1)
+ """
+ return run_sync(self.__async_client.functions.schedules.retrieve(id=id, ignore_unknown_ids=ignore_unknown_ids))
+
+ def list(
+ self,
+ name: str | None = None,
+ function_id: int | None = None,
+ function_external_id: str | None = None,
+ created_time: dict[str, int] | TimestampRange | None = None,
+ cron_expression: str | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> FunctionSchedulesList:
+ """
+ `List all schedules associated with a specific project. `_
+
+ Args:
+ name (str | None): Name of the function schedule.
+ function_id (int | None): ID of the function the schedules are linked to.
+ function_external_id (str | None): External ID of the function the schedules are linked to.
+ created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ cron_expression (str | None): Cron expression.
+ limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all.
+
+ Returns:
+ FunctionSchedulesList: List of function schedules
+
+ Examples:
+
+ List function schedules:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> schedules = client.functions.schedules.list()
+
+ List schedules directly on a function object to get only schedules associated with this particular function:
+
+ >>> func = client.functions.retrieve(id=1)
+ >>> schedules = func.list_schedules(limit=None)
+ """
+ return run_sync(
+ self.__async_client.functions.schedules.list(
+ name=name,
+ function_id=function_id,
+ function_external_id=function_external_id,
+ created_time=created_time,
+ cron_expression=cron_expression,
+ limit=limit,
+ )
+ )
+
+ def create(
+ self,
+ name: str | FunctionScheduleWrite,
+ cron_expression: str | None = None,
+ function_id: int | None = None,
+ function_external_id: str | None = None,
+ client_credentials: dict[str, str] | ClientCredentials | None = None,
+ description: str | None = None,
+ data: dict[str, object] | None = None,
+ ) -> FunctionSchedule:
+ """
+ `Create a schedule associated with a specific project. `_
+
+ Args:
+ name (str | FunctionScheduleWrite): Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument.
+ cron_expression (str | None): Cron expression.
+ function_id (int | None): Id of the function to attach the schedule to.
+ function_external_id (str | None): (DEPRECATED) External id of the function to attach the schedule to.
+ Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID.
+ client_credentials (dict[str, str] | ClientCredentials | None): Instance of ClientCredentials
+ or a dictionary containing client credentials: 'client_id' and 'client_secret'.
+ description (str | None): Description of the schedule.
+ data (dict[str, object] | None): Data to be passed to the scheduled run.
+
+ Returns:
+ FunctionSchedule: Created function schedule.
+
+ Note:
+ There are several ways to authenticate the function schedule — the order of priority is as follows:
+ 1. ``nonce`` (if provided in the ``FunctionScheduleWrite`` object)
+ 2. ``client_credentials`` (if provided)
+ 3. The credentials of *this* AsyncCogniteClient.
+
+ Warning:
+ Do not pass secrets or other confidential information via the ``data`` argument. There is a dedicated
+ ``secrets`` argument in FunctionsAPI.create() for this purpose.
+
+ Passing the reference to the Function by ``function_external_id`` is just here as a convenience to the user.
+ The API require that all schedules *must* be attached to a Function by (internal) ID for authentication-
+ and security purposes. This means that the lookup to get the ID is first done on behalf of the user.
+
+ Examples:
+
+ Create a function schedule that runs using specified client credentials (**recommended**):
+
+ >>> import os
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import ClientCredentials
+ >>> client = CogniteClient()
+ >>> schedule = client.functions.schedules.create(
+ ... name="My schedule",
+ ... function_id=123,
+ ... cron_expression="*/5 * * * *",
+ ... client_credentials=ClientCredentials("my-client-id", os.environ["MY_CLIENT_SECRET"]),
+ ... description="This schedule does magic stuff.",
+ ... data={"magic": "stuff"},
+ ... )
+
+ You may also create a schedule that runs with your -current- credentials, i.e. the same credentials you used
+ to instantiate the ``AsyncCogniteClient`` (that you're using right now). **Note**: Unless you happen to already use
+ client credentials, *this is not a recommended way to create schedules*, as it will create an explicit dependency
+ on your user account, which it will run the function "on behalf of" (until the schedule is eventually removed):
+
+ >>> schedule = client.functions.schedules.create(
+ ... name="My schedule",
+ ... function_id=456,
+ ... cron_expression="*/5 * * * *",
+ ... description="A schedule just used for some temporary testing.",
+ ... )
+
+ Create a function schedule with an oneshot session (typically used for testing purposes):
+
+ >>> from cognite.client.data_classes.functions import FunctionScheduleWrite
+ >>> session = client.iam.sessions.create(session_type="ONESHOT_TOKEN_EXCHANGE")
+ >>> schedule = client.functions.schedules.create(
+ ... FunctionScheduleWrite(
+ ... name="My schedule",
+ ... function_id=456,
+ ... cron_expression="*/5 * * * *",
+ ... description="A schedule just used for some temporary testing.",
+ ... nonce=session.nonce
+ ... ),
+ ... )
+ """
+ return run_sync(
+ self.__async_client.functions.schedules.create(
+ name=name,
+ cron_expression=cron_expression,
+ function_id=function_id,
+ function_external_id=function_external_id,
+ client_credentials=client_credentials,
+ description=description,
+ data=data,
+ )
+ )
+
+ def delete(self, id: int) -> None:
+ """
+ `Delete a schedule associated with a specific project. `_
+
+ Args:
+ id (int): Id of the schedule
+
+ Examples:
+
+ Delete function schedule:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.functions.schedules.delete(id = 123)
+ """
+ return run_sync(self.__async_client.functions.schedules.delete(id=id))
+
+ def get_input_data(self, id: int) -> dict[str, object] | None:
+ """
+ `Retrieve the input data to the associated function. `_
+
+ Args:
+ id (int): Id of the schedule
+
+ Returns:
+ dict[str, object] | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument.
+
+ Examples:
+
+ Get schedule input data:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.functions.schedules.get_input_data(id=123)
+ """
+ return run_sync(self.__async_client.functions.schedules.get_input_data(id=id))
diff --git a/cognite/client/_sync_api/geospatial.py b/cognite/client/_sync_api/geospatial.py
new file mode 100644
index 0000000000..b37e327ccd
--- /dev/null
+++ b/cognite/client/_sync_api/geospatial.py
@@ -0,0 +1,1014 @@
+"""
+===============================================================================
+2278ee58d5848b6f27ba865ac38c4d47
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from pathlib import Path
+from typing import Any, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.geospatial import (
+ CoordinateReferenceSystem,
+ CoordinateReferenceSystemList,
+ CoordinateReferenceSystemWrite,
+ Feature,
+ FeatureAggregateList,
+ FeatureList,
+ FeatureType,
+ FeatureTypeList,
+ FeatureTypePatch,
+ FeatureTypeWrite,
+ FeatureWrite,
+ FeatureWriteList,
+ GeospatialComputedResponse,
+ GeospatialComputeFunction,
+ OrderSpec,
+ RasterMetadata,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncGeospatialAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def create_feature_types(self, feature_type: FeatureType | FeatureTypeWrite) -> FeatureType: ...
+
+ @overload
+ def create_feature_types(
+ self, feature_type: Sequence[FeatureType] | Sequence[FeatureTypeWrite]
+ ) -> FeatureTypeList: ...
+
+ def create_feature_types(
+ self, feature_type: FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]
+ ) -> FeatureType | FeatureTypeList:
+ """
+ `Creates feature types`
+
+
+ Args:
+ feature_type (FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]): feature type definition or list of feature type definitions to create.
+
+ Returns:
+ FeatureType | FeatureTypeList: Created feature type definition(s)
+
+ Examples:
+
+ Create new type definitions:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.geospatial import FeatureTypeWrite
+ >>> client = CogniteClient()
+ >>> feature_types = [
+ ... FeatureTypeWrite(external_id="wells", properties={"location": {"type": "POINT", "srid": 4326}})
+ ... FeatureTypeWrite(
+ ... external_id="cities",
+ ... properties={"name": {"type": "STRING", "size": 10}},
+ ... search_spec={"name_index": {"properties": ["name"]}}
+ ... )
+ ... ]
+ >>> res = client.geospatial.create_feature_types(feature_types)
+ """
+ return run_sync(self.__async_client.geospatial.create_feature_types(feature_type=feature_type))
+
+ def delete_feature_types(self, external_id: str | SequenceNotStr[str], recursive: bool = False) -> None:
+ """
+ `Delete one or more feature type`
+
+
+ Args:
+ external_id (str | SequenceNotStr[str]): External ID or list of external ids
+ recursive (bool): if `true` the features will also be dropped
+
+ Examples:
+
+ Delete feature type definitions external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.geospatial.delete_feature_types(external_id=["wells", "cities"])
+ """
+ return run_sync(
+ self.__async_client.geospatial.delete_feature_types(external_id=external_id, recursive=recursive)
+ )
+
+ def list_feature_types(self) -> FeatureTypeList:
+ """
+ `List feature types`
+
+
+ Returns:
+ FeatureTypeList: List of feature types
+
+ Examples:
+
+ Iterate over feature type definitions:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> for feature_type in client.geospatial.list_feature_types():
+ ... feature_type # do something with the feature type definition
+ """
+ return run_sync(self.__async_client.geospatial.list_feature_types())
+
+ @overload
+ def retrieve_feature_types(self, external_id: str) -> FeatureType: ...
+
+ @overload
+ def retrieve_feature_types(self, external_id: list[str]) -> FeatureTypeList: ...
+
+ def retrieve_feature_types(self, external_id: str | list[str]) -> FeatureType | FeatureTypeList:
+ """
+ `Retrieve feature types`
+
+
+ Args:
+ external_id (str | list[str]): External ID
+
+ Returns:
+ FeatureType | FeatureTypeList: Requested Type or None if it does not exist.
+
+ Examples:
+
+ Get Type by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.geospatial.retrieve_feature_types(external_id="1")
+ """
+ return run_sync(self.__async_client.geospatial.retrieve_feature_types(external_id=external_id))
+
+ def patch_feature_types(self, patch: FeatureTypePatch | Sequence[FeatureTypePatch]) -> FeatureTypeList:
+ """
+ `Patch feature types`
+
+
+ Args:
+ patch (FeatureTypePatch | Sequence[FeatureTypePatch]): the patch to apply
+
+ Returns:
+ FeatureTypeList: The patched feature types.
+
+ Examples:
+
+ Add one property to a feature type and add indexes
+
+ >>> from cognite.client.data_classes.geospatial import Patches
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.geospatial.patch_feature_types(
+ ... patch=FeatureTypePatch(
+ ... external_id="wells",
+ ... property_patches=Patches(add={"altitude": {"type": "DOUBLE"}}),
+ ... search_spec_patches=Patches(
+ ... add={
+ ... "altitude_idx": {"properties": ["altitude"]},
+ ... "composite_idx": {"properties": ["location", "altitude"]}
+ ... }
+ ... )
+ ... )
+ ... )
+
+ Add an additional index to an existing property
+
+ >>> from cognite.client.data_classes.geospatial import Patches
+ >>> res = client.geospatial.patch_feature_types(
+ ... patch=FeatureTypePatch(
+ ... external_id="wells",
+ ... search_spec_patches=Patches(add={"location_idx": {"properties": ["location"]}})
+ ... ))
+ """
+ return run_sync(self.__async_client.geospatial.patch_feature_types(patch=patch))
+
+ @overload
+ def create_features(
+ self,
+ feature_type_external_id: str,
+ feature: Feature | FeatureWrite,
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> Feature: ...
+
+ @overload
+ def create_features(
+ self,
+ feature_type_external_id: str,
+ feature: Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList,
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> FeatureList: ...
+
+ def create_features(
+ self,
+ feature_type_external_id: str,
+ feature: Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList,
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> Feature | FeatureList:
+ """
+ `Creates features`
+
+
+ Args:
+ feature_type_external_id (str): Feature type definition for the features to create.
+ feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList): one feature or a list of features to create or a FeatureList object
+ allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ chunk_size (int | None): maximum number of items in a single request to the api
+
+ Returns:
+ Feature | FeatureList: Created features
+
+ Examples:
+
+ Create a new feature type and corresponding feature:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.geospatial import FeatureTypeWrite, FeatureWrite
+ >>> client = CogniteClient()
+ >>> feature_types = [
+ ... FeatureTypeWrite(
+ ... external_id="my_feature_type",
+ ... properties={
+ ... "location": {"type": "POINT", "srid": 4326},
+ ... "temperature": {"type": "DOUBLE"}
+ ... }
+ ... )
+ ... ]
+ >>> res = client.geospatial.create_feature_types(feature_types)
+ >>> res = client.geospatial.create_features(
+ ... feature_type_external_id="my_feature_type",
+ ... feature=FeatureWrite(
+ ... external_id="my_feature",
+ ... location={"wkt": "POINT(1 1)"},
+ ... temperature=12.4
+ ... )
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.create_features(
+ feature_type_external_id=feature_type_external_id,
+ feature=feature,
+ allow_crs_transformation=allow_crs_transformation,
+ chunk_size=chunk_size,
+ )
+ )
+
+ def delete_features(
+ self, feature_type_external_id: str, external_id: str | SequenceNotStr[str] | None = None
+ ) -> None:
+ """
+ `Delete one or more feature`
+
+
+ Args:
+ feature_type_external_id (str): No description.
+ external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+
+ Examples:
+
+ Delete feature type definitions external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.geospatial.delete_features(
+ ... feature_type_external_id="my_feature_type",
+ ... external_id=my_feature
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.delete_features(
+ feature_type_external_id=feature_type_external_id, external_id=external_id
+ )
+ )
+
+ @overload
+ def retrieve_features(
+ self, feature_type_external_id: str, external_id: str, properties: dict[str, Any] | None = None
+ ) -> Feature: ...
+
+ @overload
+ def retrieve_features(
+ self, feature_type_external_id: str, external_id: list[str], properties: dict[str, Any] | None = None
+ ) -> FeatureList: ...
+
+ def retrieve_features(
+ self, feature_type_external_id: str, external_id: str | list[str], properties: dict[str, Any] | None = None
+ ) -> FeatureList | Feature:
+ """
+ `Retrieve features`
+
+
+ Args:
+ feature_type_external_id (str): No description.
+ external_id (str | list[str]): External ID or list of external ids
+ properties (dict[str, Any] | None): the output property selection
+
+ Returns:
+ FeatureList | Feature: Requested features or None if it does not exist.
+
+ Examples:
+
+ Retrieve one feature by its external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.geospatial.retrieve_features(
+ ... feature_type_external_id="my_feature_type",
+ ... external_id="my_feature"
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.retrieve_features(
+ feature_type_external_id=feature_type_external_id, external_id=external_id, properties=properties
+ )
+ )
+
+ @overload
+ def update_features(
+ self,
+ feature_type_external_id: str,
+ feature: Feature | FeatureWrite,
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> Feature: ...
+
+ @overload
+ def update_features(
+ self,
+ feature_type_external_id: str,
+ feature: Sequence[Feature] | Sequence[FeatureWrite],
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> FeatureList: ...
+
+ def update_features(
+ self,
+ feature_type_external_id: str,
+ feature: Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite],
+ allow_crs_transformation: bool = False,
+ chunk_size: int | None = None,
+ ) -> Feature | FeatureList:
+ """
+ `Update features`
+
+
+ Args:
+ feature_type_external_id (str): No description.
+ feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite]): feature or list of features.
+ allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ chunk_size (int | None): maximum number of items in a single request to the api
+
+ Returns:
+ Feature | FeatureList: Updated features
+
+ Examples:
+
+ Update one feature:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_feature = client.geospatial.create_features(
+ ... feature_type_external_id="my_feature_type",
+ ... feature=Feature(external_id="my_feature", temperature=12.4)
+ ... )
+ >>> my_updated_feature = client.geospatial.update_features(
+ ... feature_type_external_id="my_feature_type",
+ ... feature=Feature(external_id="my_feature", temperature=6.237)
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.update_features(
+ feature_type_external_id=feature_type_external_id,
+ feature=feature,
+ allow_crs_transformation=allow_crs_transformation,
+ chunk_size=chunk_size,
+ )
+ )
+
+ def list_features(
+ self,
+ feature_type_external_id: str,
+ filter: dict[str, Any] | None = None,
+ properties: dict[str, Any] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ allow_crs_transformation: bool = False,
+ ) -> FeatureList:
+ """
+ `List features`
+
+
+ This method allows to filter all features.
+
+ Args:
+ feature_type_external_id (str): the feature type to list features for
+ filter (dict[str, Any] | None): the list filter
+ properties (dict[str, Any] | None): the output property selection
+ limit (int | None): Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features.
+ allow_crs_transformation (bool): If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+
+ Returns:
+ FeatureList: The filtered features
+
+ Examples:
+
+ List features:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_feature_type = client.geospatial.retrieve_feature_types(
+ ... external_id="my_feature_type"
+ ... )
+ >>> my_feature = client.geospatial.create_features(
+ ... feature_type_external_id=my_feature_type,
+ ... feature=Feature(
+ ... external_id="my_feature",
+ ... temperature=12.4,
+ ... location={"wkt": "POINT(0 1)"}
+ ... )
+ ... )
+ >>> res = client.geospatial.list_features(
+ ... feature_type_external_id="my_feature_type",
+ ... filter={"range": {"property": "temperature", "gt": 12.0}}
+ ... )
+ >>> for f in res:
+ ... # do something with the features
+
+ Search for features and select output properties:
+
+ >>> res = client.geospatial.list_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={},
+ ... properties={"temperature": {}, "pressure": {}}
+ ... )
+
+ Search for features with spatial filters:
+
+ >>> res = client.geospatial.list_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={"stWithin": {
+ ... "property": "location",
+ ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"}
+ ... }}
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.list_features(
+ feature_type_external_id=feature_type_external_id,
+ filter=filter,
+ properties=properties,
+ limit=limit,
+ allow_crs_transformation=allow_crs_transformation,
+ )
+ )
+
+ def search_features(
+ self,
+ feature_type_external_id: str,
+ filter: dict[str, Any] | None = None,
+ properties: dict[str, Any] | None = None,
+ limit: int = DEFAULT_LIMIT_READ,
+ order_by: Sequence[OrderSpec] | None = None,
+ allow_crs_transformation: bool = False,
+ allow_dimensionality_mismatch: bool = False,
+ ) -> FeatureList:
+ """
+ `Search for features`
+
+
+ This method allows to order the result by one or more of the properties of the feature type.
+ However, the number of items returned is limited to 1000 and there is no support for cursors yet.
+ If you need to return more than 1000 items, use the `stream_features(...)` method instead.
+
+ Args:
+ feature_type_external_id (str): The feature type to search for
+ filter (dict[str, Any] | None): The search filter
+ properties (dict[str, Any] | None): The output property selection
+ limit (int): Maximum number of results
+ order_by (Sequence[OrderSpec] | None): The order specification
+ allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
+
+ Returns:
+ FeatureList: the filtered features
+
+ Examples:
+
+ Search for features:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_feature_type = client.geospatial.retrieve_feature_types(
+ ... external_id="my_feature_type"
+ ... )
+ >>> my_feature = client.geospatial.create_features(
+ ... feature_type_external_id=my_feature_type,
+ ... feature=Feature(
+ ... external_id="my_feature",
+ ... temperature=12.4,
+ ... location={"wkt": "POINT(0 1)"}
+ ... )
+ ... )
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id="my_feature_type",
+ ... filter={"range": {"property": "temperature", "gt": 12.0}}
+ ... )
+ >>> for f in res:
+ ... # do something with the features
+
+ Search for features and select output properties:
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={},
+ ... properties={"temperature": {}, "pressure": {}}
+ ... )
+
+ Search for features and do CRS conversion on an output property:
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={},
+ ... properties={"location": {"srid": 3995}}
+ ... )
+
+ Search for features and order results:
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={},
+ ... order_by=[
+ ... OrderSpec("temperature", "ASC"),
+ ... OrderSpec("pressure", "DESC")]
+ ... )
+
+ Search for features with spatial filters:
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={"stWithin": {
+ ... "property": "location",
+ ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"}
+ ... }}
+ ... )
+
+ Combining multiple filters:
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={"and": [
+ ... {"range": {"property": "temperature", "gt": 12.0}},
+ ... {"stWithin": {
+ ... "property": "location",
+ ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"}
+ ... }}
+ ... ]}
+ ... )
+
+ >>> res = client.geospatial.search_features(
+ ... feature_type_external_id=my_feature_type,
+ ... filter={"or": [
+ ... {"range": {"property": "temperature", "gt": 12.0}},
+ ... {"stWithin": {
+ ... "property": "location",
+ ... "value": {"wkt": "POLYGON((0 0, 0 1, 1 1, 0 0))"}
+ ... }}
+ ... ]}
+ ... )
+ """
+ return run_sync(
+ self.__async_client.geospatial.search_features(
+ feature_type_external_id=feature_type_external_id,
+ filter=filter,
+ properties=properties,
+ limit=limit,
+ order_by=order_by,
+ allow_crs_transformation=allow_crs_transformation,
+ allow_dimensionality_mismatch=allow_dimensionality_mismatch,
+ )
+ )
+
+ def stream_features(
+ self,
+ feature_type_external_id: str,
+ filter: dict[str, Any] | None = None,
+ properties: dict[str, Any] | None = None,
+ allow_crs_transformation: bool = False,
+ allow_dimensionality_mismatch: bool = False,
+ ) -> Iterator[Feature]:
+ """
+ `Stream features`
+
+
+ This method allows to return any number of items until the underlying
+ api calls times out. The order of the result items is not deterministic.
+ If you need to order the results, use the `search_features(...)` method instead.
+
+ Args:
+ feature_type_external_id (str): the feature type to search for
+ filter (dict[str, Any] | None): the search filter
+ properties (dict[str, Any] | None): the output property selection
+ allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
+
+ Yields:
+ Feature: a generator for the filtered features
+
+ Examples:
+
+ Stream features:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_feature = client.geospatial.create_features(
+ ... feature_type_external_id="my_feature_type",
+ ... feature=Feature(external_id="my_feature", temperature=12.4)
+ ... )
+ >>> features = client.geospatial.stream_features(
+ ... feature_type_external_id="my_feature_type",
+ ... filter={"range": {"property": "temperature", "gt": 12.0}}
+ ... )
+ >>> for f in features:
+ ... # do something with the features
+
+ Stream features and select output properties:
+
+ >>> features = client.geospatial.stream_features(
+ ... feature_type_external_id="my_feature_type",
+ ... filter={},
+ ... properties={"temperature": {}, "pressure": {}}
+ ... )
+ >>> for f in features:
+ ... # do something with the features
+ """
+ yield from SyncIterator(
+ self.__async_client.geospatial.stream_features(
+ feature_type_external_id=feature_type_external_id,
+ filter=filter,
+ properties=properties,
+ allow_crs_transformation=allow_crs_transformation,
+ allow_dimensionality_mismatch=allow_dimensionality_mismatch,
+ )
+ )
+
+ def aggregate_features(
+ self,
+ feature_type_external_id: str,
+ filter: dict[str, Any] | None = None,
+ group_by: SequenceNotStr[str] | None = None,
+ order_by: Sequence[OrderSpec] | None = None,
+ output: dict[str, Any] | None = None,
+ ) -> FeatureAggregateList:
+ """
+ `Aggregate filtered features`
+
+
+ Args:
+ feature_type_external_id (str): the feature type to filter features from
+ filter (dict[str, Any] | None): the search filter
+ group_by (SequenceNotStr[str] | None): list of properties to group by with
+ order_by (Sequence[OrderSpec] | None): the order specification
+ output (dict[str, Any] | None): the aggregate output
+
+ Returns:
+ FeatureAggregateList: the filtered features
+
+ Examples:
+
+ Aggregate property of features:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_feature = client.geospatial.create_features(
+ ... feature_type_external_id="my_feature_type",
+ ... feature=Feature(external_id="my_feature", temperature=12.4)
+ ... )
+ >>> res = client.geospatial.aggregate_features(
+ ... feature_type_external_id="my_feature_type",
+ ... filter={"range": {"property": "temperature", "gt": 12.0}},
+ ... group_by=["category"],
+ ... order_by=[OrderSpec("category", "ASC")],
+ ... output={"min_temperature": {"min": {"property": "temperature"}},
+ ... "max_volume": {"max": {"property": "volume"}}
+ ... }
+ ... )
+ >>> for a in res:
+ ... # loop over aggregates in different groups
+ """
+ return run_sync(
+ self.__async_client.geospatial.aggregate_features(
+ feature_type_external_id=feature_type_external_id,
+ filter=filter,
+ group_by=group_by,
+ order_by=order_by,
+ output=output,
+ )
+ )
+
+ def get_coordinate_reference_systems(self, srids: int | Sequence[int]) -> CoordinateReferenceSystemList:
+ """
+ `Get Coordinate Reference Systems`
+
+
+ Args:
+ srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs
+
+ Returns:
+ CoordinateReferenceSystemList: Requested CRSs.
+
+ Examples:
+
+ Get two CRS definitions:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> crs = client.geospatial.get_coordinate_reference_systems(srids=[4326, 4327])
+ """
+ return run_sync(self.__async_client.geospatial.get_coordinate_reference_systems(srids=srids))
+
+ def list_coordinate_reference_systems(self, only_custom: bool = False) -> CoordinateReferenceSystemList:
+ """
+ `List Coordinate Reference Systems`
+
+
+ Args:
+ only_custom (bool): list only custom CRSs or not
+
+ Returns:
+ CoordinateReferenceSystemList: list of CRSs.
+
+ Examples:
+
+ Fetch all custom CRSs:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> crs = client.geospatial.list_coordinate_reference_systems(only_custom=True)
+ """
+ return run_sync(self.__async_client.geospatial.list_coordinate_reference_systems(only_custom=only_custom))
+
+ def create_coordinate_reference_systems(
+ self,
+ crs: CoordinateReferenceSystem
+ | CoordinateReferenceSystemWrite
+ | Sequence[CoordinateReferenceSystem]
+ | Sequence[CoordinateReferenceSystemWrite],
+ ) -> CoordinateReferenceSystemList:
+ """
+ `Create Coordinate Reference System`
+
+
+ Args:
+ crs (CoordinateReferenceSystem | CoordinateReferenceSystemWrite | Sequence[CoordinateReferenceSystem] | Sequence[CoordinateReferenceSystemWrite]): a CoordinateReferenceSystem or a list of CoordinateReferenceSystem
+
+ Returns:
+ CoordinateReferenceSystemList: list of CRSs.
+
+ Examples:
+
+ Create a custom CRS:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import CoordinateReferenceSystemWrite
+ >>> client = CogniteClient()
+ >>> custom_crs = CoordinateReferenceSystemWrite(
+ ... srid = 121111,
+ ... wkt=(
+ ... 'PROJCS["NTF (Paris) / Lambert zone II",'
+ ... ' GEOGCS["NTF (Paris)",'
+ ... ' DATUM["Nouvelle_Triangulation_Francaise_Paris",'
+ ... ' SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936265,'
+ ... ' AUTHORITY["EPSG","7011"]],'
+ ... ' TOWGS84[-168,-60,320,0,0,0,0],'
+ ... ' AUTHORITY["EPSG","6807"]],'
+ ... ' PRIMEM["Paris",2.33722917,'
+ ... ' AUTHORITY["EPSG","8903"]],'
+ ... ' UNIT["grad",0.01570796326794897,'
+ ... ' AUTHORITY["EPSG","9105"]], '
+ ... ' AUTHORITY["EPSG","4807"]],'
+ ... ' PROJECTION["Lambert_Conformal_Conic_1SP"],'
+ ... ' PARAMETER["latitude_of_origin",52],'
+ ... ' PARAMETER["central_meridian",0],'
+ ... ' PARAMETER["scale_factor",0.99987742],'
+ ... ' PARAMETER["false_easting",600000],'
+ ... ' PARAMETER["false_northing",2200000],'
+ ... ' UNIT["metre",1,'
+ ... ' AUTHORITY["EPSG","9001"]],'
+ ... ' AXIS["X",EAST],'
+ ... ' AXIS["Y",NORTH],'
+ ... ' AUTHORITY["EPSG","27572"]]'
+ ... ),
+ ... proj_string=(
+ ... '+proj=lcc +lat_1=46.8 +lat_0=46.8 +lon_0=0 +k_0=0.99987742 '
+ ... '+x_0=600000 +y_0=2200000 +a=6378249.2 +b=6356515 '
+ ... '+towgs84=-168,-60,320,0,0,0,0 +pm=paris +units=m +no_defs'
+ ... )
+ ... )
+ >>> crs = client.geospatial.create_coordinate_reference_systems(custom_crs)
+ """
+ return run_sync(self.__async_client.geospatial.create_coordinate_reference_systems(crs=crs))
+
+ def delete_coordinate_reference_systems(self, srids: int | Sequence[int]) -> None:
+ """
+ `Delete Coordinate Reference System`
+
+
+ Args:
+ srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs
+
+ Examples:
+
+ Delete a custom CRS:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> crs = client.geospatial.delete_coordinate_reference_systems(srids=[121111])
+ """
+ return run_sync(self.__async_client.geospatial.delete_coordinate_reference_systems(srids=srids))
+
+ def put_raster(
+ self,
+ feature_type_external_id: str,
+ feature_external_id: str,
+ raster_property_name: str,
+ raster_format: str,
+ raster_srid: int,
+ file: str | Path,
+ allow_crs_transformation: bool = False,
+ raster_scale_x: float | None = None,
+ raster_scale_y: float | None = None,
+ ) -> RasterMetadata:
+ """
+ `Put raster `
+
+ Args:
+ feature_type_external_id (str): No description.
+ feature_external_id (str): one feature or a list of features to create
+ raster_property_name (str): the raster property name
+ raster_format (str): the raster input format
+ raster_srid (int): the associated SRID for the raster
+ file (str | Path): the path to the file of the raster
+ allow_crs_transformation (bool): When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
+ raster_scale_x (float | None): the X component of the pixel width in units of coordinate reference system
+ raster_scale_y (float | None): the Y component of the pixel height in units of coordinate reference system
+
+ Returns:
+ RasterMetadata: the raster metadata if it was ingested successfully
+
+ Examples:
+
+ Put a raster in a feature raster property:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> feature_type = ...
+ >>> feature = ...
+ >>> raster_property_name = ...
+ >>> metadata = client.geospatial.put_raster(feature_type.external_id, feature.external_id,
+ ... raster_property_name, "XYZ", 3857, file)
+ """
+ return run_sync(
+ self.__async_client.geospatial.put_raster(
+ feature_type_external_id=feature_type_external_id,
+ feature_external_id=feature_external_id,
+ raster_property_name=raster_property_name,
+ raster_format=raster_format,
+ raster_srid=raster_srid,
+ file=file,
+ allow_crs_transformation=allow_crs_transformation,
+ raster_scale_x=raster_scale_x,
+ raster_scale_y=raster_scale_y,
+ )
+ )
+
+ def delete_raster(self, feature_type_external_id: str, feature_external_id: str, raster_property_name: str) -> None:
+ """
+ `Delete raster `
+
+ Args:
+ feature_type_external_id (str): No description.
+ feature_external_id (str): one feature or a list of features to create
+ raster_property_name (str): the raster property name
+
+ Examples:
+
+ Delete a raster in a feature raster property:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> feature_type = ...
+ >>> feature = ...
+ >>> raster_property_name = ...
+ >>> client.geospatial.delete_raster(feature_type.external_id, feature.external_id, raster_property_name)
+ """
+ return run_sync(
+ self.__async_client.geospatial.delete_raster(
+ feature_type_external_id=feature_type_external_id,
+ feature_external_id=feature_external_id,
+ raster_property_name=raster_property_name,
+ )
+ )
+
+ def get_raster(
+ self,
+ feature_type_external_id: str,
+ feature_external_id: str,
+ raster_property_name: str,
+ raster_format: str,
+ raster_options: dict[str, Any] | None = None,
+ raster_srid: int | None = None,
+ raster_scale_x: float | None = None,
+ raster_scale_y: float | None = None,
+ allow_crs_transformation: bool = False,
+ ) -> bytes:
+ """
+ `Get raster `
+
+ Args:
+ feature_type_external_id (str): Feature type definition for the features to create.
+ feature_external_id (str): one feature or a list of features to create
+ raster_property_name (str): the raster property name
+ raster_format (str): the raster output format
+ raster_options (dict[str, Any] | None): GDAL raster creation key-value options
+ raster_srid (int | None): the SRID for the output raster
+ raster_scale_x (float | None): the X component of the output pixel width in units of coordinate reference system
+ raster_scale_y (float | None): the Y component of the output pixel height in units of coordinate reference system
+ allow_crs_transformation (bool): When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
+
+ Returns:
+ bytes: the raster data
+
+ Examples:
+
+ Get a raster from a feature raster property:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> feature_type = ...
+ >>> feature = ...
+ >>> raster_property_name = ...
+ >>> raster_data = client.geospatial.get_raster(feature_type.external_id, feature.external_id,
+ ... raster_property_name, "XYZ", {"SIGNIFICANT_DIGITS": "4"})
+ """
+ return run_sync(
+ self.__async_client.geospatial.get_raster(
+ feature_type_external_id=feature_type_external_id,
+ feature_external_id=feature_external_id,
+ raster_property_name=raster_property_name,
+ raster_format=raster_format,
+ raster_options=raster_options,
+ raster_srid=raster_srid,
+ raster_scale_x=raster_scale_x,
+ raster_scale_y=raster_scale_y,
+ allow_crs_transformation=allow_crs_transformation,
+ )
+ )
+
+ def compute(self, output: dict[str, GeospatialComputeFunction]) -> GeospatialComputedResponse:
+ """
+ `Compute `
+
+ Args:
+ output (dict[str, GeospatialComputeFunction]): No description.
+
+ Returns:
+ GeospatialComputedResponse: Mapping of keys to computed items.
+
+ Examples:
+
+ Compute the transformation of an ewkt geometry from one SRID to another:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.geospatial import GeospatialGeometryTransformComputeFunction, GeospatialGeometryValueComputeFunction
+ >>> client = CogniteClient()
+ >>> compute_function = GeospatialGeometryTransformComputeFunction(GeospatialGeometryValueComputeFunction("SRID=4326;POLYGON((0 0,10 0,10 10,0 10,0 0))"), srid=23031)
+ >>> compute_result = client.geospatial.compute(output = {"output": compute_function})
+ """
+ return run_sync(self.__async_client.geospatial.compute(output=output))
diff --git a/cognite/client/_sync_api/hosted_extractors/__init__.py b/cognite/client/_sync_api/hosted_extractors/__init__.py
new file mode 100644
index 0000000000..62603921c0
--- /dev/null
+++ b/cognite/client/_sync_api/hosted_extractors/__init__.py
@@ -0,0 +1,31 @@
+"""
+===============================================================================
+a13269ade1cded310610304c48e405b6
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.hosted_extractors.destinations import SyncDestinationsAPI
+from cognite.client._sync_api.hosted_extractors.jobs import SyncJobsAPI
+from cognite.client._sync_api.hosted_extractors.mappings import SyncMappingsAPI
+from cognite.client._sync_api.hosted_extractors.sources import SyncSourcesAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncHostedExtractorsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.sources = SyncSourcesAPI(async_client)
+ self.destinations = SyncDestinationsAPI(async_client)
+ self.jobs = SyncJobsAPI(async_client)
+ self.mappings = SyncMappingsAPI(async_client)
diff --git a/cognite/client/_sync_api/hosted_extractors/destinations.py b/cognite/client/_sync_api/hosted_extractors/destinations.py
new file mode 100644
index 0000000000..a72d83bba1
--- /dev/null
+++ b/cognite/client/_sync_api/hosted_extractors/destinations.py
@@ -0,0 +1,219 @@
+"""
+===============================================================================
+b9cc50d0274c8bd1cef9f8f73abb5509
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.hosted_extractors.destinations import (
+ Destination,
+ DestinationList,
+ DestinationUpdate,
+ DestinationWrite,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncDestinationsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Destination]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[Destination]: ...
+
+ def __call__(
+ self, chunk_size: int | None = None, limit: int | None = None
+ ) -> Iterator[Destination | DestinationList]:
+ """
+ Iterate over destinations
+
+ Fetches Destination as they are iterated over, so you keep a limited number of destinations in memory.
+
+ Args:
+ chunk_size (int | None): Number of Destinations to return in each chunk. Defaults to yielding one Destination a time.
+ limit (int | None): Maximum number of Destination to return. Defaults to returning all items.
+
+ Yields:
+ Destination | DestinationList: yields Destination one by one if chunk_size is not specified, else DestinationList objects.
+ """
+ yield from SyncIterator(self.__async_client.hosted_extractors.destinations(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Destination: ...
+
+ @overload
+ def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> DestinationList: ...
+
+ def retrieve(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> Destination | DestinationList:
+ """
+ `Retrieve one or more destinations. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+
+
+ Returns:
+ Destination | DestinationList: Requested destinations
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.destinations.retrieve('myDestination')
+
+ Get multiple destinations by id:
+
+ >>> res = client.hosted_extractors.destinations.retrieve(["myDestination", "myDestination2"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.destinations.retrieve(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def delete(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False
+ ) -> None:
+ """
+ `Delete one or more destsinations `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+ force (bool): Delete any jobs associated with each item.
+
+ Examples:
+
+ Delete destinations by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.hosted_extractors.destinations.delete(["myDest", "MyDest2"])
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.destinations.delete(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force
+ )
+ )
+
+ @overload
+ def create(self, items: DestinationWrite) -> Destination: ...
+
+ @overload
+ def create(self, items: Sequence[DestinationWrite]) -> DestinationList: ...
+
+ def create(self, items: DestinationWrite | Sequence[DestinationWrite]) -> Destination | DestinationList:
+ """
+ `Create one or more destinations. `_
+
+ Args:
+ items (DestinationWrite | Sequence[DestinationWrite]): Destination(s) to create.
+
+ Returns:
+ Destination | DestinationList: Created destination(s)
+
+ Examples:
+
+ Create new destination:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import DestinationWrite, SessionWrite
+ >>> client = CogniteClient()
+ >>> destination = DestinationWrite(external_id='my_dest', credentials=SessionWrite("my_nonce"), target_data_set_id=123)
+ >>> res = client.hosted_extractors.destinations.create(destination)
+ """
+ return run_sync(self.__async_client.hosted_extractors.destinations.create(items=items))
+
+ @overload
+ def update(
+ self,
+ items: DestinationWrite | DestinationUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Destination: ...
+
+ @overload
+ def update(
+ self,
+ items: Sequence[DestinationWrite | DestinationUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> DestinationList: ...
+
+ def update(
+ self,
+ items: DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Destination | DestinationList:
+ """
+ `Update one or more destinations. `_
+
+ Args:
+ items (DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate]): Destination(s) to update.
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Destination | DestinationList: Updated destination(s)
+
+ Examples:
+
+ Update destination:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import DestinationUpdate
+ >>> client = CogniteClient()
+ >>> destination = DestinationUpdate('my_dest').target_data_set_id.set(123)
+ >>> res = client.hosted_extractors.destinations.update(destination)
+ """
+ return run_sync(self.__async_client.hosted_extractors.destinations.update(items=items, mode=mode))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DestinationList:
+ """
+ `List destinations `_
+
+ Args:
+ limit (int | None): Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ DestinationList: List of requested destinations
+
+ Examples:
+
+ List destinations:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> destination_list = client.hosted_extractors.destinations.list(limit=5)
+
+ Iterate over destinations, one-by-one:
+
+ >>> for destination in client.hosted_extractors.destinations():
+ ... destination # do something with the destination
+
+ Iterate over chunks of destinations to reduce memory load:
+
+ >>> for destination_list in client.hosted_extractors.destinations(chunk_size=25):
+ ... destination_list # do something with the destinationss
+ """
+ return run_sync(self.__async_client.hosted_extractors.destinations.list(limit=limit))
diff --git a/cognite/client/_sync_api/hosted_extractors/jobs.py b/cognite/client/_sync_api/hosted_extractors/jobs.py
new file mode 100644
index 0000000000..4d26031f92
--- /dev/null
+++ b/cognite/client/_sync_api/hosted_extractors/jobs.py
@@ -0,0 +1,282 @@
+"""
+===============================================================================
+6ca49a702912a31decfb46b9c6b61e22
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.hosted_extractors.jobs import (
+ Job,
+ JobList,
+ JobLogsList,
+ JobMetricsList,
+ JobUpdate,
+ JobWrite,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncJobsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Job]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[JobList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Job | JobList]:
+ """
+ Iterate over jobs
+
+ Fetches jobs as they are iterated over, so you keep a limited number of jobs in memory.
+
+ Args:
+ chunk_size (int | None): Number of jobs to return in each chunk. Defaults to yielding one job a time.
+ limit (int | None): Maximum number of jobs to return. Defaults to returning all items.
+
+ Yields:
+ Job | JobList: yields Job one by one if chunk_size is not specified, else JobList objects.
+ """
+ yield from SyncIterator(self.__async_client.hosted_extractors.jobs(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Job | None: ...
+
+ @overload
+ def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> JobList: ...
+
+ def retrieve(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> Job | None | JobList:
+ """
+ `Retrieve one or more jobs. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the job type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+
+ Returns:
+ Job | None | JobList: Requested jobs
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.jobs.retrieve('myJob')
+
+ Get multiple jobs by id:
+
+ >>> res = client.hosted_extractors.jobs.retrieve(["myJob", "myOtherJob"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.jobs.retrieve(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete one or more jobs `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+ Examples:
+
+ Delete jobs by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.hosted_extractors.jobs.delete(["myMQTTJob", "MyEventHubJob"])
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.jobs.delete(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ @overload
+ def create(self, items: JobWrite) -> Job: ...
+
+ @overload
+ def create(self, items: Sequence[JobWrite]) -> JobList: ...
+
+ def create(self, items: JobWrite | Sequence[JobWrite]) -> Job | JobList:
+ """
+ `Create one or more jobs. `_
+
+ Args:
+ items (JobWrite | Sequence[JobWrite]): Job(s) to create.
+
+ Returns:
+ Job | JobList: Created job(s)
+
+ Examples:
+
+ Create new job:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceWrite
+ >>> client = CogniteClient()
+ >>> job_write = EventHubSourceWrite('my_event_hub', 'http://myeventhub.com', "My EventHub", 'my_key', 'my_value')
+ >>> job = client.hosted_extractors.jobs.create(job_write)
+ """
+ return run_sync(self.__async_client.hosted_extractors.jobs.create(items=items))
+
+ @overload
+ def update(
+ self,
+ items: JobWrite | JobUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Job: ...
+
+ @overload
+ def update(
+ self,
+ items: Sequence[JobWrite | JobUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> JobList: ...
+
+ def update(
+ self,
+ items: JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Job | JobList:
+ """
+ `Update one or more jobs. `_
+
+ Args:
+ items (JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate]): Job(s) to update.
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Job | JobList: Updated job(s)
+
+ Examples:
+
+ Update job:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceUpdate
+ >>> client = CogniteClient()
+ >>> job = EventHubSourceUpdate('my_event_hub').event_hub_name.set("My Updated EventHub")
+ >>> updated_job = client.hosted_extractors.jobs.update(job)
+ """
+ return run_sync(self.__async_client.hosted_extractors.jobs.update(items=items, mode=mode))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> JobList:
+ """
+ `List jobs `_
+
+ Args:
+ limit (int | None): Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ JobList: List of requested jobs
+
+ Examples:
+
+ List jobs:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> job_list = client.hosted_extractors.jobs.list(limit=5)
+
+ Iterate over jobs, one-by-one:
+
+ >>> for job in client.hosted_extractors.jobs():
+ ... job # do something with the job
+
+ Iterate over chunks of jobs to reduce memory load:
+
+ >>> for job_list in client.hosted_extractors.jobs(chunk_size=25):
+ ... job_list # do something with the jobs
+ """
+ return run_sync(self.__async_client.hosted_extractors.jobs.list(limit=limit))
+
+ def list_logs(
+ self,
+ job: str | None = None,
+ source: str | None = None,
+ destination: str | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> JobLogsList:
+ """
+ `List job logs. `_
+
+ Args:
+ job (str | None): Require returned logs to belong to the job given by this external ID.
+ source (str | None): Require returned logs to belong to the any job with source given by this external ID.
+ destination (str | None): Require returned logs to belong to the any job with destination given by this external ID.
+ limit (int | None): Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ JobLogsList: List of requested job logs
+
+ Examples:
+
+ Reqests logs for a specific job:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.jobs.list_logs(job="myJob")
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.jobs.list_logs(
+ job=job, source=source, destination=destination, limit=limit
+ )
+ )
+
+ def list_metrics(
+ self,
+ job: str | None = None,
+ source: str | None = None,
+ destination: str | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> JobMetricsList:
+ """
+ `List job metrics. `_
+
+ Args:
+ job (str | None): Require returned metrics to belong to the job given by this external ID.
+ source (str | None): Require returned metrics to belong to the any job with source given by this external ID.
+ destination (str | None): Require returned metrics to belong to the any job with destination given by this external ID.
+ limit (int | None): Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ JobMetricsList: List of requested job metrics
+
+ Examples:
+
+ Reqests metrics for a specific job:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.jobs.list_metrics(job="myJob")
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.jobs.list_metrics(
+ job=job, source=source, destination=destination, limit=limit
+ )
+ )
diff --git a/cognite/client/_sync_api/hosted_extractors/mappings.py b/cognite/client/_sync_api/hosted_extractors/mappings.py
new file mode 100644
index 0000000000..8b5eaef658
--- /dev/null
+++ b/cognite/client/_sync_api/hosted_extractors/mappings.py
@@ -0,0 +1,201 @@
+"""
+===============================================================================
+095cf76b161e9f80a1d645fe4494034b
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.hosted_extractors import Mapping, MappingList, MappingUpdate, MappingWrite
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncMappingsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Mapping]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[Mapping]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Mapping | MappingList]:
+ """
+ Iterate over mappings
+
+ Fetches Mapping as they are iterated over, so you keep a limited number of mappings in memory.
+
+ Args:
+ chunk_size (int | None): Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time.
+ limit (int | None): Maximum number of mappings to return. Defaults to returning all items.
+
+ Yields:
+ Mapping | MappingList: yields Mapping one by one if chunk_size is not specified, else MappingList objects.
+ """
+ yield from SyncIterator(self.__async_client.hosted_extractors.mappings(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Mapping: ...
+
+ @overload
+ def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> MappingList: ...
+
+ def retrieve(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> Mapping | MappingList:
+ """
+ `Retrieve one or more mappings. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+
+
+ Returns:
+ Mapping | MappingList: Requested mappings
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.mappings.retrieve('myMapping')
+
+ Get multiple mappings by id:
+
+ >>> res = client.hosted_extractors.mappings.retrieve(["myMapping", "myMapping2"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.mappings.retrieve(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def delete(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False
+ ) -> None:
+ """
+ `Delete one or more mappings `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found
+ force (bool): Delete any jobs associated with each item.
+
+ Examples:
+
+ Delete mappings by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.hosted_extractors.mappings.delete(["myMapping", "MyMapping2"])
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.mappings.delete(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force
+ )
+ )
+
+ @overload
+ def create(self, items: MappingWrite) -> Mapping: ...
+
+ @overload
+ def create(self, items: Sequence[MappingWrite]) -> MappingList: ...
+
+ def create(self, items: MappingWrite | Sequence[MappingWrite]) -> Mapping | MappingList:
+ """
+ `Create one or more mappings. `_
+
+ Args:
+ items (MappingWrite | Sequence[MappingWrite]): Mapping(s) to create.
+
+ Returns:
+ Mapping | MappingList: Created mapping(s)
+
+ Examples:
+
+ Create new mapping:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import MappingWrite, CustomMapping
+ >>> client = CogniteClient()
+ >>> mapping = MappingWrite(external_id="my_mapping", mapping=CustomMapping("some expression"), published=True, input="json")
+ >>> res = client.hosted_extractors.mappings.create(mapping)
+ """
+ return run_sync(self.__async_client.hosted_extractors.mappings.create(items=items))
+
+ @overload
+ def update(self, items: MappingWrite | MappingUpdate) -> Mapping: ...
+
+ @overload
+ def update(self, items: Sequence[MappingWrite | MappingUpdate]) -> MappingList: ...
+
+ def update(
+ self, items: MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]
+ ) -> Mapping | MappingList:
+ """
+ `Update one or more mappings. `_
+
+ Args:
+ items (MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]): Mapping(s) to update.
+
+ Returns:
+ Mapping | MappingList: Updated mapping(s)
+
+ Examples:
+
+ Update mapping:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import MappingUpdate
+ >>> client = CogniteClient()
+ >>> mapping = MappingUpdate('my_mapping').published.set(False)
+ >>> res = client.hosted_extractors.mappings.update(mapping)
+ """
+ return run_sync(self.__async_client.hosted_extractors.mappings.update(items=items))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> MappingList:
+ """
+ `List mappings `_
+
+ Args:
+ limit (int | None): Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ MappingList: List of requested mappings
+
+ Examples:
+
+ List mappings:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> mapping_list = client.hosted_extractors.mappings.list(limit=5)
+
+ Iterate over mappings, one-by-one:
+
+ >>> for mapping in client.hosted_extractors.mappings():
+ ... mapping # do something with the mapping
+
+ Iterate over chunks of mappings to reduce memory load:
+
+ >>> for mapping_list in client.hosted_extractors.mappings(chunk_size=25):
+ ... mapping_list # do something with the mappings
+ """
+ return run_sync(self.__async_client.hosted_extractors.mappings.list(limit=limit))
diff --git a/cognite/client/_sync_api/hosted_extractors/sources.py b/cognite/client/_sync_api/hosted_extractors/sources.py
new file mode 100644
index 0000000000..23f9a720d3
--- /dev/null
+++ b/cognite/client/_sync_api/hosted_extractors/sources.py
@@ -0,0 +1,210 @@
+"""
+===============================================================================
+05d0853992db787473021dbdd653b725
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.hosted_extractors.sources import Source, SourceList, SourceUpdate, SourceWrite
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncSourcesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Source]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[SourceList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Source | SourceList]:
+ """
+ Iterate over sources
+
+ Fetches sources as they are iterated over, so you keep a limited number of sources in memory.
+
+ Args:
+ chunk_size (int | None): Number of sources to return in each chunk. Defaults to yielding one source a time.
+ limit (int | None): Maximum number of sources to return. Defaults to returning all items.
+
+ Yields:
+ Source | SourceList: yields Source one by one if chunk_size is not specified, else SourceList objects.
+ """
+ yield from SyncIterator(self.__async_client.hosted_extractors.sources(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def retrieve(self, external_ids: str, ignore_unknown_ids: bool = False) -> Source: ...
+
+ @overload
+ def retrieve(self, external_ids: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> SourceList: ...
+
+ def retrieve(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> Source | SourceList:
+ """
+ `Retrieve one or more sources. `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
+
+ Returns:
+ Source | SourceList: Requested sources
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.hosted_extractors.sources.retrieve('myMQTTSource')
+
+ Get multiple sources by id:
+
+ >>> res = client.hosted_extractors.sources.retrieve(["myMQTTSource", "MyEventHubSource"], ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.sources.retrieve(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def delete(
+ self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bool = False, force: bool = False
+ ) -> None:
+ """
+ `Delete one or more sources `_
+
+ Args:
+ external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
+ force (bool): Delete any jobs associated with each item.
+ Examples:
+
+ Delete sources by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.hosted_extractors.sources.delete(["myMQTTSource", "MyEventHubSource"])
+ """
+ return run_sync(
+ self.__async_client.hosted_extractors.sources.delete(
+ external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, force=force
+ )
+ )
+
+ @overload
+ def create(self, items: SourceWrite) -> Source: ...
+
+ @overload
+ def create(self, items: Sequence[SourceWrite]) -> SourceList: ...
+
+ def create(self, items: SourceWrite | Sequence[SourceWrite]) -> Source | SourceList:
+ """
+ `Create one or more sources. `_
+
+ Args:
+ items (SourceWrite | Sequence[SourceWrite]): Source(s) to create.
+
+ Returns:
+ Source | SourceList: Created source(s)
+
+ Examples:
+
+ Create new source:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceWrite
+ >>> client = CogniteClient()
+ >>> source = EventHubSourceWrite('my_event_hub', 'http://myeventhub.com', "My EventHub", 'my_key', 'my_value')
+ >>> res = client.hosted_extractors.sources.create(source)
+ """
+ return run_sync(self.__async_client.hosted_extractors.sources.create(items=items))
+
+ @overload
+ def update(
+ self,
+ items: SourceWrite | SourceUpdate,
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Source: ...
+
+ @overload
+ def update(
+ self,
+ items: Sequence[SourceWrite | SourceUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> SourceList: ...
+
+ def update(
+ self,
+ items: SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Source | SourceList:
+ """
+ `Update one or more sources. `_
+
+ Args:
+ items (SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate]): Source(s) to update.
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Source | SourceList: Updated source(s)
+
+ Examples:
+
+ Update source:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.hosted_extractors import EventHubSourceUpdate
+ >>> client = CogniteClient()
+ >>> source = EventHubSourceUpdate('my_event_hub').event_hub_name.set("My Updated EventHub")
+ >>> res = client.hosted_extractors.sources.update(source)
+ """
+ return run_sync(self.__async_client.hosted_extractors.sources.update(items=items, mode=mode))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SourceList:
+ """
+ `List sources `_
+
+ Args:
+ limit (int | None): Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ SourceList: List of requested sources
+
+ Examples:
+
+ List sources:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> source_list = client.hosted_extractors.sources.list(limit=5)
+
+ Iterate over sources, one-by-one:
+
+ >>> for source in client.hosted_extractors.sources():
+ ... source # do something with the source
+
+ Iterate over chunks of sources to reduce memory load:
+
+ >>> for source_list in client.hosted_extractors.sources(chunk_size=25):
+ ... source_list # do something with the sources
+ """
+ return run_sync(self.__async_client.hosted_extractors.sources.list(limit=limit))
diff --git a/cognite/client/_sync_api/iam/__init__.py b/cognite/client/_sync_api/iam/__init__.py
new file mode 100644
index 0000000000..316affb07f
--- /dev/null
+++ b/cognite/client/_sync_api/iam/__init__.py
@@ -0,0 +1,155 @@
+"""
+===============================================================================
+4757d559b846e5066a596fd5502a1c79
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.iam import ComparableCapability
+from cognite.client._sync_api.iam.groups import SyncGroupsAPI
+from cognite.client._sync_api.iam.security_categories import SyncSecurityCategoriesAPI
+from cognite.client._sync_api.iam.sessions import SyncSessionsAPI
+from cognite.client._sync_api.iam.token import SyncTokenAPI
+from cognite.client._sync_api.org_apis.principals import SyncPrincipalsAPI
+from cognite.client._sync_api.user_profiles import SyncUserProfilesAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.capabilities import (
+ Capability,
+)
+from cognite.client.utils._async_helpers import run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncIAMAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.groups = SyncGroupsAPI(async_client)
+ self.security_categories = SyncSecurityCategoriesAPI(async_client)
+ self.sessions = SyncSessionsAPI(async_client)
+ self.user_profiles = SyncUserProfilesAPI(async_client)
+ self.principals = SyncPrincipalsAPI(async_client)
+ self.token = SyncTokenAPI(async_client)
+
+ def compare_capabilities(
+ self,
+ existing_capabilities: ComparableCapability,
+ desired_capabilities: ComparableCapability,
+ project: str | None = None,
+ ) -> list[Capability]:
+ """
+ Helper method to compare capabilities across two groups (of capabilities) to find which are missing from the first.
+
+ Note:
+ Capabilities that are no longer in use by the API will be ignored. These have names prefixed with `Legacy` and
+ all inherit from the base class `LegacyCapability`. If you want to check for these, you must do so manually.
+
+ Tip:
+ If you just want to check against your existing capabilities, you may use the helper method
+ ``client.iam.verify_capabilities`` instead.
+
+ Args:
+ existing_capabilities (ComparableCapability): List of existing capabilities.
+ desired_capabilities (ComparableCapability): List of wanted capabilities to check against existing.
+ project (str | None): If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project
+ to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList
+ is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect.
+
+ Returns:
+ list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
+
+ Examples:
+
+ Ensure that a user's groups grant access to read- and write for assets in all scope,
+ and events write, scoped to a specific dataset with id=123:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl
+ >>> client = CogniteClient()
+ >>> my_groups = client.iam.groups.list(all=False)
+ >>> to_check = [
+ ... AssetsAcl(
+ ... actions=[AssetsAcl.Action.Read, AssetsAcl.Action.Write],
+ ... scope=AssetsAcl.Scope.All()),
+ ... EventsAcl(
+ ... actions=[EventsAcl.Action.Write],
+ ... scope=EventsAcl.Scope.DataSet([123]),
+ ... )]
+ >>> missing = client.iam.compare_capabilities(
+ ... existing_capabilities=my_groups,
+ ... desired_capabilities=to_check)
+ >>> if missing:
+ ... pass # do something
+
+ Capabilities can also be passed as dictionaries:
+
+ >>> to_check = [
+ ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}},
+ ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}},
+ ... ]
+ >>> missing = client.iam.compare_capabilities(
+ ... existing_capabilities=my_groups,
+ ... desired_capabilities=to_check)
+
+ You may also load capabilities from a dict-representation directly into ACLs (access-control list)
+ by using ``Capability.load``. This will also ensure that the capabilities are valid.
+
+ >>> from cognite.client.data_classes.capabilities import Capability
+ >>> acls = [Capability.load(cap) for cap in to_check]
+ """
+ return self.__async_client.iam.compare_capabilities(
+ existing_capabilities=existing_capabilities, desired_capabilities=desired_capabilities, project=project
+ )
+
+ def verify_capabilities(self, desired_capabilities: ComparableCapability) -> list[Capability]:
+ """
+ Helper method to compare your current capabilities with a set of desired capabilities and return any missing.
+
+ Args:
+ desired_capabilities (ComparableCapability): List of desired capabilities to check against existing.
+
+ Returns:
+ list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
+
+ Examples:
+
+ Ensure that the user's credentials have access to read- and write assets in all scope,
+ and write events scoped to a specific dataset with id=123:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl
+ >>> client = CogniteClient()
+ >>> to_check = [
+ ... AssetsAcl(
+ ... actions=[AssetsAcl.Action.Read, AssetsAcl.Action.Write],
+ ... scope=AssetsAcl.Scope.All()),
+ ... EventsAcl(
+ ... actions=[EventsAcl.Action.Write],
+ ... scope=EventsAcl.Scope.DataSet([123]),
+ ... )]
+ >>> if missing := client.iam.verify_capabilities(to_check):
+ ... pass # do something
+
+ Capabilities can also be passed as dictionaries:
+
+ >>> to_check = [
+ ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}},
+ ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}},
+ ... ]
+ >>> missing = client.iam.verify_capabilities(to_check)
+
+ You may also load capabilities from a dict-representation directly into ACLs (access-control list)
+ by using ``Capability.load``. This will also ensure that the capabilities are valid.
+
+ >>> from cognite.client.data_classes.capabilities import Capability
+ >>> acls = [Capability.load(cap) for cap in to_check]
+ """
+ return run_sync(self.__async_client.iam.verify_capabilities(desired_capabilities=desired_capabilities))
diff --git a/cognite/client/_sync_api/iam/groups.py b/cognite/client/_sync_api/iam/groups.py
new file mode 100644
index 0000000000..d198967141
--- /dev/null
+++ b/cognite/client/_sync_api/iam/groups.py
@@ -0,0 +1,140 @@
+"""
+===============================================================================
+f25241082c8f9272e0bb26f7b5814867
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import Group, GroupList
+from cognite.client.data_classes.iam import GroupWrite
+from cognite.client.utils._async_helpers import run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncGroupsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def list(self, all: bool = False) -> GroupList:
+ """
+ `List groups. `_
+
+ Args:
+ all (bool): Whether to get all groups, only available with the groups:list acl.
+
+ Returns:
+ GroupList: List of groups.
+
+ Example:
+
+ List your own groups:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> my_groups = client.iam.groups.list()
+
+ List all groups:
+
+ >>> all_groups = client.iam.groups.list(all=True)
+ """
+ return run_sync(self.__async_client.iam.groups.list(all=all))
+
+ @overload
+ def create(self, group: Group | GroupWrite) -> Group: ...
+
+ @overload
+ def create(self, group: Sequence[Group] | Sequence[GroupWrite]) -> GroupList: ...
+
+ def create(self, group: Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]) -> Group | GroupList:
+ """
+ `Create one or more groups. `_
+
+ Args:
+ group (Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]): Group or list of groups to create.
+ Returns:
+ Group | GroupList: The created group(s).
+
+ Example:
+
+ Create a group without any members:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import GroupWrite
+ >>> from cognite.client.data_classes.capabilities import AssetsAcl, EventsAcl
+ >>> client = CogniteClient()
+ >>> my_capabilities = [
+ ... AssetsAcl([AssetsAcl.Action.Read], AssetsAcl.Scope.All()),
+ ... EventsAcl([EventsAcl.Action.Write], EventsAcl.Scope.DataSet([123, 456]))]
+ >>> my_group = GroupWrite(name="My Group", capabilities=my_capabilities)
+ >>> res = client.iam.groups.create(my_group)
+
+ Create a group whose members are managed externally (by your company's identity provider (IdP)).
+ This is done by using the ``source_id`` field. If this is the same ID as a group in the IdP,
+ a user in that group will implicitly be a part of this group as well.
+
+ >>> grp = GroupWrite(
+ ... name="Externally managed group",
+ ... capabilities=my_capabilities,
+ ... source_id="b7c9a5a4...")
+ >>> res = client.iam.groups.create(grp)
+
+ Create a group whose members are managed internally by Cognite. This group may grant access through
+ listing specific users or include them all. This is done by passing the ``members`` field, either a
+ list of strings with the unique user identifiers or as the constant ``ALL_USER_ACCOUNTS``. To find the
+ user identifiers, you may use the UserProfilesAPI: ``client.iam.user_profiles.list()``.
+
+ >>> from cognite.client.data_classes import ALL_USER_ACCOUNTS
+ >>> all_group = GroupWrite(
+ ... name="Everyone is welcome!",
+ ... capabilities=my_capabilities,
+ ... members=ALL_USER_ACCOUNTS,
+ ... )
+ >>> user_list_group = GroupWrite(
+ ... name="Specfic users only",
+ ... capabilities=my_capabilities,
+ ... members=["XRsSD1k3mTIKG", "M0SxY6bM9Jl"])
+ >>> res = client.iam.groups.create([user_list_group, all_group])
+
+ Capabilities are often defined in configuration files, like YAML or JSON. You may convert capabilities
+ from a dict-representation directly into ACLs (access-control list) by using ``Capability.load``.
+ This will also ensure that the capabilities are valid.
+
+ >>> from cognite.client.data_classes.capabilities import Capability
+ >>> unparsed_capabilities = [
+ ... {'assetsAcl': {'actions': ['READ', 'WRITE'], 'scope': {'all': {}}}},
+ ... {'eventsAcl': {'actions': ['WRITE'], 'scope': {'datasetScope': {'ids': [123]}}}},
+ ... ]
+ >>> acls = [Capability.load(cap) for cap in unparsed_capabilities]
+ >>> group = GroupWrite(name="Another group", capabilities=acls)
+ """
+ return run_sync(self.__async_client.iam.groups.create(group=group))
+
+ def delete(self, id: int | Sequence[int]) -> None:
+ """
+ `Delete one or more groups. `_
+
+ Args:
+ id (int | Sequence[int]): ID or list of IDs of groups to delete.
+
+ Example:
+
+ Delete group::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.iam.groups.delete(1)
+ """
+ return run_sync(self.__async_client.iam.groups.delete(id=id))
diff --git a/cognite/client/_sync_api/iam/security_categories.py b/cognite/client/_sync_api/iam/security_categories.py
new file mode 100644
index 0000000000..5924edd3a8
--- /dev/null
+++ b/cognite/client/_sync_api/iam/security_categories.py
@@ -0,0 +1,100 @@
+"""
+===============================================================================
+7f0398db4522da948046b7d45c0017ad
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import SecurityCategory, SecurityCategoryList
+from cognite.client.data_classes.iam import SecurityCategoryWrite
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncSecurityCategoriesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SecurityCategoryList:
+ """
+ `List security categories. `_
+
+ Args:
+ limit (int | None): Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ SecurityCategoryList: List of security categories
+
+ Example:
+
+ List security categories::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.iam.security_categories.list()
+ """
+ return run_sync(self.__async_client.iam.security_categories.list(limit=limit))
+
+ @overload
+ def create(self, security_category: SecurityCategory | SecurityCategoryWrite) -> SecurityCategory: ...
+
+ @overload
+ def create(
+ self, security_category: Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]
+ ) -> SecurityCategoryList: ...
+
+ def create(
+ self,
+ security_category: SecurityCategory
+ | SecurityCategoryWrite
+ | Sequence[SecurityCategory]
+ | Sequence[SecurityCategoryWrite],
+ ) -> SecurityCategory | SecurityCategoryList:
+ """
+ `Create one or more security categories. `_
+
+ Args:
+ security_category (SecurityCategory | SecurityCategoryWrite | Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]): Security category or list of categories to create.
+
+ Returns:
+ SecurityCategory | SecurityCategoryList: The created security category or categories.
+
+ Example:
+
+ Create security category::
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import SecurityCategoryWrite
+ >>> client = CogniteClient()
+ >>> my_category = SecurityCategoryWrite(name="My Category")
+ >>> res = client.iam.security_categories.create(my_category)
+ """
+ return run_sync(self.__async_client.iam.security_categories.create(security_category=security_category))
+
+ def delete(self, id: int | Sequence[int]) -> None:
+ """
+ `Delete one or more security categories. `_
+
+ Args:
+ id (int | Sequence[int]): ID or list of IDs of security categories to delete.
+
+ Example:
+
+ Delete security category::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.iam.security_categories.delete(1)
+ """
+ return run_sync(self.__async_client.iam.security_categories.delete(id=id))
diff --git a/cognite/client/_sync_api/iam/sessions.py b/cognite/client/_sync_api/iam/sessions.py
new file mode 100644
index 0000000000..e0d609cf86
--- /dev/null
+++ b/cognite/client/_sync_api/iam/sessions.py
@@ -0,0 +1,109 @@
+"""
+===============================================================================
+edd85437841df66e4e84593b2edf78d2
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import ClientCredentials, CreatedSession, Session, SessionList
+from cognite.client.data_classes.iam import SessionStatus, SessionType
+from cognite.client.utils._async_helpers import run_sync
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncSessionsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def create(
+ self,
+ client_credentials: ClientCredentials | None = None,
+ session_type: SessionType | Literal["DEFAULT"] = "DEFAULT",
+ ) -> CreatedSession:
+ """
+ `Create a session. `_
+
+ Args:
+ client_credentials (ClientCredentials | None): The client credentials to create the session. This is required
+ if session_type is set to 'CLIENT_CREDENTIALS'.
+ session_type (SessionType | Literal['DEFAULT']): The type of session to create. Can be
+ either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'.
+ Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session.
+ If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if
+ this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used.
+
+ Session Types:
+
+ * **client_credentials**: Credentials for a session using client credentials from an identity provider.
+ * **token_exchange**: Credentials for a session using token exchange to reuse the user's credentials.
+ * **one_shot_token_exchange**: Credentials for a session using one-shot token exchange to reuse the user's credentials. One-shot sessions are short-lived sessions that are not refreshed and do not require support for token exchange from the identity provider.
+
+ Returns:
+ CreatedSession: The object with token inspection details.
+ """
+ return run_sync(
+ self.__async_client.iam.sessions.create(client_credentials=client_credentials, session_type=session_type)
+ )
+
+ @overload
+ def revoke(self, id: int) -> Session: ...
+
+ @overload
+ def revoke(self, id: Sequence[int]) -> SessionList: ...
+
+ def revoke(self, id: int | Sequence[int]) -> Session | SessionList:
+ """
+ `Revoke access to a session. Revocation of a session may in some cases take up to 1 hour to take effect. `_
+
+ Args:
+ id (int | Sequence[int]): Id or list of session ids
+
+ Returns:
+ Session | SessionList: List of revoked sessions. If the user does not have the sessionsAcl:LIST capability, then only the session IDs will be present in the response.
+ """
+ return run_sync(self.__async_client.iam.sessions.revoke(id=id))
+
+ @overload
+ def retrieve(self, id: int) -> Session: ...
+
+ @overload
+ def retrieve(self, id: Sequence[int]) -> SessionList: ...
+
+ def retrieve(self, id: int | Sequence[int]) -> Session | SessionList:
+ """
+ `Retrieves sessions with given IDs. `_
+
+ The request will fail if any of the IDs does not belong to an existing session.
+
+ Args:
+ id (int | Sequence[int]): Id or list of session ids
+
+ Returns:
+ Session | SessionList: Session or list of sessions.
+ """
+ return run_sync(self.__async_client.iam.sessions.retrieve(id=id))
+
+ def list(self, status: SessionStatus | None = None, limit: int = DEFAULT_LIMIT_READ) -> SessionList:
+ """
+ `List all sessions in the current project. `_
+
+ Args:
+ status (SessionStatus | None): If given, only sessions with the given status are returned.
+ limit (int): Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ SessionList: a list of sessions in the current project.
+ """
+ return run_sync(self.__async_client.iam.sessions.list(status=status, limit=limit))
diff --git a/cognite/client/_sync_api/iam/token.py b/cognite/client/_sync_api/iam/token.py
new file mode 100644
index 0000000000..6be8edab7e
--- /dev/null
+++ b/cognite/client/_sync_api/iam/token.py
@@ -0,0 +1,40 @@
+"""
+===============================================================================
+5268479111509d912fc224eb231afa08
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.iam import TokenInspection
+from cognite.client.utils._async_helpers import run_sync
+
+
+class SyncTokenAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def inspect(self) -> TokenInspection:
+ """
+ Inspect a token.
+
+ Get details about which projects it belongs to and which capabilities are granted to it.
+
+ Returns:
+ TokenInspection: The object with token inspection details.
+
+ Example:
+
+ Inspect token::
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.iam.token.inspect()
+ """
+ return run_sync(self.__async_client.iam.token.inspect())
diff --git a/cognite/client/_sync_api/labels.py b/cognite/client/_sync_api/labels.py
new file mode 100644
index 0000000000..1a846d1ce6
--- /dev/null
+++ b/cognite/client/_sync_api/labels.py
@@ -0,0 +1,205 @@
+"""
+===============================================================================
+1f594af9469b8c7776ec026b65f20394
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ LabelDefinition,
+ LabelDefinitionList,
+ LabelDefinitionWrite,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncLabelsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[LabelDefinition]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[LabelDefinitionList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ external_id_prefix: str | None = None,
+ limit: int | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ ) -> Iterator[LabelDefinition | LabelDefinitionList]:
+ """
+ Iterate over Labels
+
+ Args:
+ chunk_size (int | None): Number of Labels to return in each chunk. Defaults to yielding one Label a time.
+ name (str | None): returns the label definitions matching that name
+ external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified
+ limit (int | None): Maximum number of label definitions to return. Defaults return all labels.
+ data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids.
+
+ Yields:
+ LabelDefinition | LabelDefinitionList: yields Labels one by one or in chunks.
+ """
+ yield from SyncIterator(
+ self.__async_client.labels(
+ chunk_size=chunk_size,
+ name=name,
+ external_id_prefix=external_id_prefix,
+ limit=limit,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ )
+ )
+
+ @overload
+ def retrieve(self, external_id: str, ignore_unknown_ids: Literal[True]) -> LabelDefinition | None: ...
+
+ @overload
+ def retrieve(self, external_id: str, ignore_unknown_ids: Literal[False] = False) -> LabelDefinition: ...
+
+ @overload
+ def retrieve(self, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> LabelDefinitionList: ...
+
+ def retrieve(
+ self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> LabelDefinition | LabelDefinitionList | None:
+ """
+ `Retrieve one or more label definitions by external id. `_
+
+ Args:
+ external_id (str | SequenceNotStr[str]): External ID or list of external ids
+ ignore_unknown_ids (bool): If True, ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ LabelDefinition | LabelDefinitionList | None: The requested label definition(s)
+
+ Examples:
+
+ Get label by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.labels.retrieve(external_id="my_label", ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.labels.retrieve(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ def list(
+ self,
+ name: str | None = None,
+ external_id_prefix: str | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> LabelDefinitionList:
+ """
+ `List Labels `_
+
+ Args:
+ name (str | None): returns the label definitions matching that name
+ external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified
+ data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids.
+ limit (int | None): Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ LabelDefinitionList: List of requested Labels
+
+ Examples:
+
+ List Labels and filter on name:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> label_list = client.labels.list(limit=5, name="Pump")
+
+ Iterate over label definitions, one-by-one:
+
+ >>> for label in client.labels():
+ ... label # do something with the label definition
+
+ Iterate over chunks of label definitions to reduce memory load:
+
+ >>> for label_list in client.labels(chunk_size=2500):
+ ... label_list # do something with the type definitions
+ """
+ return run_sync(
+ self.__async_client.labels.list(
+ name=name,
+ external_id_prefix=external_id_prefix,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ limit=limit,
+ )
+ )
+
+ @overload
+ def create(self, label: LabelDefinition | LabelDefinitionWrite) -> LabelDefinition: ...
+
+ @overload
+ def create(self, label: Sequence[LabelDefinition | LabelDefinitionWrite]) -> LabelDefinitionList: ...
+
+ def create(
+ self, label: LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]
+ ) -> LabelDefinition | LabelDefinitionList:
+ """
+ `Create one or more label definitions. `_
+
+ Args:
+ label (LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]): The label definition(s) to create.
+
+ Returns:
+ LabelDefinition | LabelDefinitionList: Created label definition(s)
+
+ Raises:
+ TypeError: Function input 'label' is of the wrong type
+
+ Examples:
+
+ Create new label definitions:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import LabelDefinitionWrite
+ >>> client = CogniteClient()
+ >>> labels = [LabelDefinitionWrite(external_id="ROTATING_EQUIPMENT", name="Rotating equipment"), LabelDefinitionWrite(external_id="PUMP", name="pump")]
+ >>> res = client.labels.create(labels)
+ """
+ return run_sync(self.__async_client.labels.create(label=label))
+
+ def delete(self, external_id: str | SequenceNotStr[str] | None = None) -> None:
+ """
+ `Delete one or more label definitions `_
+
+ Args:
+ external_id (str | SequenceNotStr[str] | None): One or more label external ids
+
+ Examples:
+
+ Delete label definitions by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.labels.delete(external_id=["big_pump", "small_pump"])
+ """
+ return run_sync(self.__async_client.labels.delete(external_id=external_id))
diff --git a/cognite/client/_sync_api/org_apis/principals.py b/cognite/client/_sync_api/org_apis/principals.py
new file mode 100644
index 0000000000..53b3791335
--- /dev/null
+++ b/cognite/client/_sync_api/org_apis/principals.py
@@ -0,0 +1,116 @@
+"""
+===============================================================================
+c58c6fb6020424956202225e2d5994fc
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.principals import Principal, PrincipalList
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncPrincipalsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def me(self) -> Principal:
+ """
+ `Get the current caller's information. `_
+
+ Returns:
+ Principal: The principal of the user running the code, i.e. the
+ principal *this* AsyncCogniteClient was instantiated with.
+
+ Examples:
+ Get your own principal:
+ >>> from cognite.client import CogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.iam.principals.me()
+ """
+ return run_sync(self.__async_client.iam.principals.me())
+
+ @overload
+ def retrieve(self, id: str) -> Principal | None: ...
+
+ @overload
+ def retrieve(self, *, external_id: str) -> Principal | None: ...
+
+ @overload
+ def retrieve(self, id: SequenceNotStr[str], *, ignore_unknown_ids: bool = False) -> PrincipalList: ...
+
+ @overload
+ def retrieve(self, *, external_id: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> PrincipalList: ...
+
+ @overload
+ def retrieve(self, id: None = None, *, ignore_unknown_ids: bool = False) -> PrincipalList: ...
+
+ @overload
+ def retrieve(self, *, external_id: None = None, ignore_unknown_ids: bool = False) -> PrincipalList: ...
+
+ def retrieve(
+ self,
+ id: str | Sequence[str] | None = None,
+ external_id: str | Sequence[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> Principal | PrincipalList | None:
+ """
+ `Retrieve principal by reference in the organization `_
+
+ Args:
+ id (str | Sequence[str] | None): The ID(s) of the principal(s) to retrieve.
+ external_id (str | Sequence[str] | None): The external ID(s) of the principal to retrieve.
+ ignore_unknown_ids (bool): This is only relevant when retrieving multiple principals. If set to True,
+ the method will return the principals that were found and ignore the ones that were not found.
+ If set to False, the method will raise a CogniteAPIError if any of the
+ specified principals were not found. Defaults to False.
+
+ Returns:
+ Principal | PrincipalList | None: The principal(s) with the specified ID(s) or external ID(s).
+
+ Examples:
+ Retrieve a principal by ID:
+ >>> from cognite.client import CogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.iam.principals.retrieve(id="20u3of8-1234-5678-90ab-cdef12345678")
+
+ Retrieve a principal by external ID:
+ >>> res = client.iam.principals.retrieve(external_id="my_external_id")
+ """
+ return run_sync(
+ self.__async_client.iam.principals.retrieve(
+ id=id, external_id=external_id, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def list(self, types: str | Sequence[str] | None = None, limit: int = DEFAULT_LIMIT_READ) -> PrincipalList:
+ """
+ `List principals in the organization `_
+
+ Args:
+ types (str | Sequence[str] | None): Filter by principal type(s). Defaults to None, which means no filtering.
+ limit (int): The maximum number of principals to return. Defaults to 25.
+
+ Returns:
+ PrincipalList: The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with.
+
+ Examples:
+ List principals in the organization:
+ >>> from cognite.client import CogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.iam.principals.list(types="USER", limit=10)
+ """
+ return run_sync(self.__async_client.iam.principals.list(types=types, limit=limit))
diff --git a/cognite/client/_sync_api/postgres_gateway/__init__.py b/cognite/client/_sync_api/postgres_gateway/__init__.py
new file mode 100644
index 0000000000..df15957f5b
--- /dev/null
+++ b/cognite/client/_sync_api/postgres_gateway/__init__.py
@@ -0,0 +1,27 @@
+"""
+===============================================================================
+506bda1e5a8fa5d128a4da3ae05bb18b
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.postgres_gateway.tables import SyncTablesAPI
+from cognite.client._sync_api.postgres_gateway.users import SyncUsersAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncPostgresGatewaysAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.users = SyncUsersAPI(async_client)
+ self.tables = SyncTablesAPI(async_client)
diff --git a/cognite/client/_sync_api/postgres_gateway/tables.py b/cognite/client/_sync_api/postgres_gateway/tables.py
new file mode 100644
index 0000000000..a1a02d3954
--- /dev/null
+++ b/cognite/client/_sync_api/postgres_gateway/tables.py
@@ -0,0 +1,193 @@
+"""
+===============================================================================
+cee739aab6c29319b2333faea5e5a88d
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+import cognite.client.data_classes.postgres_gateway.tables as pg
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncTablesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[pg.Table]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[pg.TableList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[pg.Table | pg.TableList]:
+ """
+ Iterate over custom tables
+
+ Fetches custom tables as they are iterated over, so you keep a limited number of custom tables in memory.
+
+ Args:
+ chunk_size (int | None): Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time.
+ limit (int | None): Maximum number of custom tables to return. Defaults to return all.
+
+ Yields:
+ pg.Table | pg.TableList: yields Table one by one if chunk_size is not specified, else TableList objects.
+ """
+ yield from SyncIterator(self.__async_client.postgres_gateway.tables(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def create(self, username: str, items: pg.TableWrite) -> pg.Table: ...
+
+ @overload
+ def create(self, username: str, items: Sequence[pg.TableWrite]) -> pg.TableList: ...
+
+ def create(self, username: str, items: pg.TableWrite | Sequence[pg.TableWrite]) -> pg.Table | pg.TableList:
+ """
+ `Create tables `_
+
+ Args:
+ username (str): The name of the username (a.k.a. database) to be managed from the API
+ items (pg.TableWrite | Sequence[pg.TableWrite]): The table(s) to create
+
+ Returns:
+ pg.Table | pg.TableList: Created tables
+
+ Examples:
+
+ Create custom table:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.data_modeling import ViewId
+ >>> from cognite.client.data_classes.postgres_gateway import ViewTableWrite
+ >>> client = CogniteClient()
+ >>> table = ViewTableWrite(tablename="myCustom", options=ViewId(space="mySpace", external_id="myExternalId", version="v1"))
+ >>> res = client.postgres_gateway.tables.create("myUserName",table)
+ """
+ return run_sync(self.__async_client.postgres_gateway.tables.create(username=username, items=items))
+
+ @overload
+ def retrieve(self, username: str, tablename: str, ignore_unknown_ids: Literal[False] = False) -> pg.Table: ...
+
+ @overload
+ def retrieve(self, username: str, tablename: str, ignore_unknown_ids: Literal[True]) -> pg.Table | None: ...
+
+ @overload
+ def retrieve(
+ self, username: str, tablename: SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> pg.TableList: ...
+
+ def retrieve(
+ self, username: str, tablename: str | SequenceNotStr[str], ignore_unknown_ids: bool = False
+ ) -> pg.Table | pg.TableList | None:
+ """
+ `Retrieve a list of tables by their tables names `_
+
+ Retrieve a list of Postgres tables for a user by their table names, optionally ignoring unknown table names
+
+ Args:
+ username (str): The username (a.k.a. database) to be managed from the API
+ tablename (str | SequenceNotStr[str]): The name of the table(s) to be retrieved
+ ignore_unknown_ids (bool): Ignore table names not found
+
+ Returns:
+ pg.Table | pg.TableList | None: Foreign tables
+
+ Examples:
+
+ Retrieve custom table:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.postgres_gateway.tables.retrieve("myUserName", 'myCustom')
+
+ Get multiple custom tables by id:
+
+ >>> res = client.postgres_gateway.tables.retrieve("myUserName", ["myCustom", "myCustom2"])
+ """
+ return run_sync(
+ self.__async_client.postgres_gateway.tables.retrieve(
+ username=username, tablename=tablename, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def delete(self, username: str, tablename: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete postgres table(s) `_
+
+ Args:
+ username (str): The name of the username (a.k.a. database) to be managed from the API
+ tablename (str | SequenceNotStr[str]): The name of the table(s) to be deleted
+ ignore_unknown_ids (bool): Ignore table names that are not found
+
+ Examples:
+
+ Delete custom table:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.postgres_gateway.tables.delete("myUserName", ["myCustom", "myCustom2"])
+ """
+ return run_sync(
+ self.__async_client.postgres_gateway.tables.delete(
+ username=username, tablename=tablename, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def list(
+ self,
+ username: str,
+ include_built_ins: Literal["yes", "no"] | None = "no",
+ limit: int | None = DEFAULT_LIMIT_READ,
+ ) -> pg.TableList:
+ """
+ `List postgres tables `_
+
+ List all tables in a given project.
+
+ Args:
+ username (str): The name of the username (a.k.a. database) to be managed from the API
+ include_built_ins (Literal['yes', 'no'] | None): Determines if API should return built-in tables or not
+ limit (int | None): Limits the number of results to be returned.
+
+ Returns:
+ pg.TableList: Foreign tables
+
+ Examples:
+
+ List tables:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> custom_table_list = client.postgres_gateway.tables.list("myUserName", limit=5)
+
+ Iterate over tables, one-by-one:
+
+ >>> for table in client.postgres_gateway.tables():
+ ... table # do something with the custom table
+
+ Iterate over chunks of tables to reduce memory load:
+
+ >>> for table_list in client.postgres_gateway.tables(chunk_size=25):
+ ... table_list # do something with the custom tables
+ """
+ return run_sync(
+ self.__async_client.postgres_gateway.tables.list(
+ username=username, include_built_ins=include_built_ins, limit=limit
+ )
+ )
diff --git a/cognite/client/_sync_api/postgres_gateway/users.py b/cognite/client/_sync_api/postgres_gateway/users.py
new file mode 100644
index 0000000000..abe567627f
--- /dev/null
+++ b/cognite/client/_sync_api/postgres_gateway/users.py
@@ -0,0 +1,219 @@
+"""
+===============================================================================
+a98416ce52e9dbf7e93c562767fc169e
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.postgres_gateway.users import (
+ User,
+ UserCreated,
+ UserCreatedList,
+ UserList,
+ UserUpdate,
+ UserWrite,
+)
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncUsersAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[User]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[UserList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[User | UserList]:
+ """
+ Iterate over users
+
+ Fetches user as they are iterated over, so you keep a limited number of users in memory.
+
+ Args:
+ chunk_size (int | None): Number of users to return in each chunk. Defaults to yielding one user at a time.
+ limit (int | None): Maximum number of users to return. Defaults to return all.
+
+ Yields:
+ User | UserList: yields User one by one if chunk_size is not specified, else UserList objects.
+ """
+ yield from SyncIterator(self.__async_client.postgres_gateway.users(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def create(self, user: UserWrite) -> UserCreated: ...
+
+ @overload
+ def create(self, user: Sequence[UserWrite]) -> UserCreatedList: ...
+
+ def create(self, user: UserWrite | Sequence[UserWrite]) -> UserCreated | UserCreatedList:
+ """
+ `Create Users `_
+
+ Create postgres users.
+
+ Args:
+ user (UserWrite | Sequence[UserWrite]): The user(s) to create.
+
+ Returns:
+ UserCreated | UserCreatedList: The created user(s)
+
+ Examples:
+
+ Create user:
+
+ >>> import os
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.postgres_gateway import UserWrite, SessionCredentials
+ >>> from cognite.client.data_classes import ClientCredentials
+ >>> client = CogniteClient()
+ >>> session = client.iam.sessions.create(
+ ... ClientCredentials(os.environ["IDP_CLIENT_ID"], os.environ["IDP_CLIENT_SECRET"]),
+ ... session_type="CLIENT_CREDENTIALS"
+ ... )
+ >>> user = UserWrite(credentials=SessionCredentials(nonce=session.nonce))
+ >>> res = client.postgres_gateway.users.create(user)
+ """
+ return run_sync(self.__async_client.postgres_gateway.users.create(user=user))
+
+ @overload
+ def update(self, items: UserUpdate | UserWrite) -> User: ...
+
+ @overload
+ def update(self, items: Sequence[UserUpdate | UserWrite]) -> UserList: ...
+
+ def update(self, items: UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]) -> User | UserList:
+ """
+ `Update users `_
+
+ Update postgres users
+
+ Args:
+ items (UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]): The user(s) to update.
+
+ Returns:
+ User | UserList: The updated user(s)
+
+ Examples:
+
+ Update user:
+
+ >>> import os
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.postgres_gateway import UserUpdate, SessionCredentials
+ >>> from cognite.client.data_classes import ClientCredentials
+ >>> client = CogniteClient()
+ >>> session = client.iam.sessions.create(
+ ... ClientCredentials(os.environ["IDP_CLIENT_ID"], os.environ["IDP_CLIENT_SECRET"]),
+ ... session_type="CLIENT_CREDENTIALS"
+ ... )
+ >>> update = UserUpdate('myUser').credentials.set(SessionCredentials(nonce=session.nonce))
+ >>> res = client.postgres_gateway.users.update(update)
+ """
+ return run_sync(self.__async_client.postgres_gateway.users.update(items=items))
+
+ def delete(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete postgres user(s) `_
+
+ Delete postgres users
+
+ Args:
+ username (str | SequenceNotStr[str]): Usernames of the users to delete.
+ ignore_unknown_ids (bool): Ignore usernames that are not found
+
+
+ Examples:
+
+ Delete users:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.postgres_gateway.users.delete(["myUser", "myUser2"])
+ """
+ return run_sync(
+ self.__async_client.postgres_gateway.users.delete(username=username, ignore_unknown_ids=ignore_unknown_ids)
+ )
+
+ @overload
+ def retrieve(self, username: str, ignore_unknown_ids: bool = False) -> User: ...
+
+ @overload
+ def retrieve(self, username: SequenceNotStr[str], ignore_unknown_ids: bool = False) -> UserList: ...
+
+ def retrieve(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> User | UserList:
+ """
+ `Retrieve a list of users by their usernames `_
+
+ Retrieve a list of postgres users by their usernames, optionally ignoring unknown usernames
+
+ Args:
+ username (str | SequenceNotStr[str]): Usernames of the users to retrieve.
+ ignore_unknown_ids (bool): Ignore usernames that are not found
+
+ Returns:
+ User | UserList: The retrieved user(s).
+
+ Examples:
+
+ Retrieve user:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.postgres_gateway.users.retrieve("myUser", ignore_unknown_ids=True)
+ """
+ return run_sync(
+ self.__async_client.postgres_gateway.users.retrieve(
+ username=username, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def list(self, limit: int = DEFAULT_LIMIT_READ) -> UserList:
+ """
+ `Fetch scoped users `_
+
+ List all users in a given project.
+
+ Args:
+ limit (int): Limits the number of results to be returned.
+
+ Returns:
+ UserList: A list of users
+
+ Examples:
+
+ List users:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> user_list = client.postgres_gateway.users.list(limit=5)
+
+ Iterate over users, one-by-one:
+
+ >>> for user in client.postgres_gateway.users():
+ ... user # do something with the user
+
+ Iterate over chunks of users to reduce memory load:
+
+ >>> for user_list in client.postgres_gateway.users(chunk_size=25):
+ ... user_list # do something with the users
+ """
+ return run_sync(self.__async_client.postgres_gateway.users.list(limit=limit))
diff --git a/cognite/client/_sync_api/raw/__init__.py b/cognite/client/_sync_api/raw/__init__.py
new file mode 100644
index 0000000000..4a7de2f137
--- /dev/null
+++ b/cognite/client/_sync_api/raw/__init__.py
@@ -0,0 +1,29 @@
+"""
+===============================================================================
+c4064290ea82271873bbc2c0a330fc30
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api.raw.databases import SyncRawDatabasesAPI
+from cognite.client._sync_api.raw.rows import SyncRawRowsAPI
+from cognite.client._sync_api.raw.tables import SyncRawTablesAPI
+from cognite.client._sync_api_client import SyncAPIClient
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncRawAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.databases = SyncRawDatabasesAPI(async_client)
+ self.tables = SyncRawTablesAPI(async_client)
+ self.rows = SyncRawRowsAPI(async_client)
diff --git a/cognite/client/_sync_api/raw/databases.py b/cognite/client/_sync_api/raw/databases.py
new file mode 100644
index 0000000000..43fe28bd08
--- /dev/null
+++ b/cognite/client/_sync_api/raw/databases.py
@@ -0,0 +1,123 @@
+"""
+===============================================================================
+06bccd4498118fae6c78fda684de0367
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.raw import Database, DatabaseList
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncRawDatabasesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Database]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[DatabaseList]: ...
+
+ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> Iterator[Database | DatabaseList]:
+ """
+ Iterate over databases
+
+ Fetches dbs as they are iterated over, so you keep a limited number of dbs in memory.
+
+ Args:
+ chunk_size (int | None): Number of dbs to return in each chunk. Defaults to yielding one db a time.
+ limit (int | None): Maximum number of dbs to return. Defaults to return all items.
+
+ Yields:
+ Database | DatabaseList: No description.
+ """
+ yield from SyncIterator(self.__async_client.raw.databases(chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def create(self, name: str) -> Database: ...
+
+ @overload
+ def create(self, name: list[str]) -> DatabaseList: ...
+
+ def create(self, name: str | list[str]) -> Database | DatabaseList:
+ """
+ `Create one or more databases. `_
+
+ Args:
+ name (str | list[str]): A db name or list of db names to create.
+
+ Returns:
+ Database | DatabaseList: Database or list of databases that has been created.
+
+ Examples:
+
+ Create a new database:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.raw.databases.create("db1")
+ """
+ return run_sync(self.__async_client.raw.databases.create(name=name))
+
+ def delete(self, name: str | SequenceNotStr[str], recursive: bool = False) -> None:
+ """
+ `Delete one or more databases. `_
+
+ Args:
+ name (str | SequenceNotStr[str]): A db name or list of db names to delete.
+ recursive (bool): Recursively delete all tables in the database(s).
+
+ Examples:
+
+ Delete a list of databases:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.raw.databases.delete(["db1", "db2"])
+ """
+ return run_sync(self.__async_client.raw.databases.delete(name=name, recursive=recursive))
+
+ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatabaseList:
+ """
+ `List databases `_
+
+ Args:
+ limit (int | None): Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ DatabaseList: List of requested databases.
+
+ Examples:
+
+ List the first 5 databases:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> db_list = client.raw.databases.list(limit=5)
+
+ Iterate over databases, one-by-one:
+
+ >>> for db in client.raw.databases():
+ ... db # do something with the db
+
+ Iterate over chunks of databases to reduce memory load:
+
+ >>> for db_list in client.raw.databases(chunk_size=2500):
+ ... db_list # do something with the dbs
+ """
+ return run_sync(self.__async_client.raw.databases.list(limit=limit))
diff --git a/cognite/client/_sync_api/raw/rows.py b/cognite/client/_sync_api/raw/rows.py
new file mode 100644
index 0000000000..d30532e5d5
--- /dev/null
+++ b/cognite/client/_sync_api/raw/rows.py
@@ -0,0 +1,343 @@
+"""
+===============================================================================
+d83244377c50c368e66b22af80198a2c
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes.raw import Row, RowList, RowWrite
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+
+class SyncRawRowsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, db_name: str, table_name: str, *, chunk_size: None, partitions: None) -> Iterator[Row]: ...
+
+ @overload
+ def __call__(self, db_name: str, table_name: str, *, chunk_size: None, partitions: int) -> Iterator[RowList]: ...
+
+ @overload
+ def __call__(self, db_name: str, table_name: str, *, chunk_size: int, partitions: None) -> Iterator[RowList]: ...
+
+ def __call__(
+ self,
+ db_name: str,
+ table_name: str,
+ chunk_size: int | None = None,
+ limit: int | None = None,
+ min_last_updated_time: int | None = None,
+ max_last_updated_time: int | None = None,
+ columns: list[str] | None = None,
+ partitions: int | None = None,
+ ) -> Iterator[Row | RowList]:
+ """
+ Iterate over rows.
+
+ Fetches rows as they are iterated over, so you keep a limited number of rows in memory.
+
+ Note:
+ When iterating using partitions > 1, the memory usage is bounded at 2 x partitions x chunk_size. This is implemented
+ by halting retrieval speed when the callers code can't keep up.
+
+ Args:
+ db_name (str): Name of the database
+ table_name (str): Name of the table to iterate over rows for
+ chunk_size (int | None): Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time.
+ Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows.
+ limit (int | None): Maximum number of rows to return. Can be used with partitions. Defaults to returning all items.
+ min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). ms since epoch.
+ max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). ms since epoch.
+ columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys.
+ partitions (int | None): Retrieve rows in parallel using this number of workers. Defaults to not use concurrency.
+ The setting is capped at ``global_config.max_workers`` and _can_ be used with a finite limit. To prevent unexpected
+ problems and maximize read throughput, check out `concurrency limits in the API documentation. `_
+
+ Yields:
+ Row | RowList: An iterator yielding the requested row or rows.
+ """
+ yield from SyncIterator(
+ self.__async_client.raw.rows(
+ db_name=db_name,
+ table_name=table_name,
+ chunk_size=chunk_size,
+ limit=limit,
+ min_last_updated_time=min_last_updated_time,
+ max_last_updated_time=max_last_updated_time,
+ columns=columns,
+ partitions=partitions,
+ )
+ )
+
+ def insert(
+ self,
+ db_name: str,
+ table_name: str,
+ row: Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict,
+ ensure_parent: bool = False,
+ ) -> None:
+ """
+ `Insert one or more rows into a table. `_
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ row (Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict): The row(s) to insert
+ ensure_parent (bool): Create database/table if they don't already exist.
+
+ Examples:
+
+ Insert new rows into a table:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import RowWrite
+ >>> client = CogniteClient()
+ >>> rows = [RowWrite(key="r1", columns={"col1": "val1", "col2": "val1"}),
+ ... RowWrite(key="r2", columns={"col1": "val2", "col2": "val2"})]
+ >>> client.raw.rows.insert("db1", "table1", rows)
+
+ You may also insert a dictionary directly:
+
+ >>> rows = {
+ ... "key-1": {"col1": 1, "col2": 2},
+ ... "key-2": {"col1": 3, "col2": 4, "col3": "high five"},
+ ... }
+ >>> client.raw.rows.insert("db1", "table1", rows)
+ """
+ return run_sync(
+ self.__async_client.raw.rows.insert(
+ db_name=db_name, table_name=table_name, row=row, ensure_parent=ensure_parent
+ )
+ )
+
+ def insert_dataframe(
+ self, db_name: str, table_name: str, dataframe: pd.DataFrame, ensure_parent: bool = False, dropna: bool = True
+ ) -> None:
+ """
+ `Insert pandas dataframe into a table `_
+
+ Uses index for row keys.
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ dataframe (pd.DataFrame): The dataframe to insert. Index will be used as row keys.
+ ensure_parent (bool): Create database/table if they don't already exist.
+ dropna (bool): Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True
+
+ Examples:
+
+ Insert new rows into a table:
+
+ >>> import pandas as pd
+ >>> from cognite.client import CogniteClient
+ >>>
+ >>> client = CogniteClient()
+ >>> df = pd.DataFrame(
+ ... {"col-a": [1, 3, None], "col-b": [2, -1, 9]},
+ ... index=["r1", "r2", "r3"])
+ >>> res = client.raw.rows.insert_dataframe(
+ ... "db1", "table1", df, dropna=True)
+ """
+ return run_sync(
+ self.__async_client.raw.rows.insert_dataframe(
+ db_name=db_name, table_name=table_name, dataframe=dataframe, ensure_parent=ensure_parent, dropna=dropna
+ )
+ )
+
+ def delete(self, db_name: str, table_name: str, key: str | SequenceNotStr[str]) -> None:
+ """
+ `Delete rows from a table. `_
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ key (str | SequenceNotStr[str]): The key(s) of the row(s) to delete.
+
+ Examples:
+
+ Delete rows from table:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> keys_to_delete = ["k1", "k2", "k3"]
+ >>> client.raw.rows.delete("db1", "table1", keys_to_delete)
+ """
+ return run_sync(self.__async_client.raw.rows.delete(db_name=db_name, table_name=table_name, key=key))
+
+ def retrieve(self, db_name: str, table_name: str, key: str) -> Row | None:
+ """
+ `Retrieve a single row by key. `_
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ key (str): The key of the row to retrieve.
+
+ Returns:
+ Row | None: The requested row.
+
+ Examples:
+
+ Retrieve a row with key 'k1' from table 't1' in database 'db1':
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> row = client.raw.rows.retrieve("db1", "t1", "k1")
+
+ You may access the data directly on the row (like a dict), or use '.get' when keys can be missing:
+
+ >>> val1 = row["col1"]
+ >>> val2 = row.get("col2")
+ """
+ return run_sync(self.__async_client.raw.rows.retrieve(db_name=db_name, table_name=table_name, key=key))
+
+ def retrieve_dataframe(
+ self,
+ db_name: str,
+ table_name: str,
+ min_last_updated_time: int | None = None,
+ max_last_updated_time: int | None = None,
+ columns: list[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ partitions: int | None = None,
+ last_updated_time_in_index: bool = False,
+ infer_dtypes: bool = True,
+ ) -> pd.DataFrame:
+ """
+ `Retrieve rows in a table as a pandas dataframe. `_
+
+ Rowkeys are used as the index.
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ min_last_updated_time (int | None): Rows must have been last updated after this time. ms since epoch.
+ max_last_updated_time (int | None): Rows must have been last updated before this time. ms since epoch.
+ columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys.
+ limit (int | None): The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit.
+ When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.max_workers`` for an unlimited query
+ (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out
+ `concurrency limits in the API documentation. `_
+ last_updated_time_in_index (bool): Use a MultiIndex with row keys and last_updated_time as index.
+ infer_dtypes (bool): If True, pandas will try to infer dtypes of the columns. Defaults to True.
+
+ Returns:
+ pd.DataFrame: The requested rows in a pandas dataframe.
+
+ Examples:
+
+ Get dataframe:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> df = client.raw.rows.retrieve_dataframe("db1", "t1", limit=5)
+ """
+ return run_sync(
+ self.__async_client.raw.rows.retrieve_dataframe(
+ db_name=db_name,
+ table_name=table_name,
+ min_last_updated_time=min_last_updated_time,
+ max_last_updated_time=max_last_updated_time,
+ columns=columns,
+ limit=limit,
+ partitions=partitions,
+ last_updated_time_in_index=last_updated_time_in_index,
+ infer_dtypes=infer_dtypes,
+ )
+ )
+
+ def list(
+ self,
+ db_name: str,
+ table_name: str,
+ min_last_updated_time: int | None = None,
+ max_last_updated_time: int | None = None,
+ columns: list[str] | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ partitions: int | None = None,
+ ) -> RowList:
+ """
+ `List rows in a table. `_
+
+ Args:
+ db_name (str): Name of the database.
+ table_name (str): Name of the table.
+ min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). ms since epoch.
+ max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). ms since epoch.
+ columns (list[str] | None): List of column keys. Set to `None` for retrieving all, use [] to retrieve only row keys.
+ limit (int | None): The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit.
+ When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.max_workers`` for an unlimited query
+ (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out
+ `concurrency limits in the API documentation. `_
+
+ Returns:
+ RowList: The requested rows.
+
+ Examples:
+
+ List a few rows:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> row_list = client.raw.rows.list("db1", "tbl1", limit=5)
+
+ Read an entire table efficiently by using concurrency (default behavior when ``limit=None``):
+
+ >>> row_list = client.raw.rows.list("db1", "tbl1", limit=None)
+
+ Iterate through all rows one-by-one to reduce memory load (no concurrency used):
+
+ >>> for row in client.raw.rows("db1", "t1", columns=["col1","col2"]):
+ ... val1 = row["col1"] # You may access the data directly
+ ... val2 = row.get("col2") # ...or use '.get' when keys can be missing
+
+ Iterate through all rows, one chunk at a time, to reduce memory load (no concurrency used):
+
+ >>> for row_list in client.raw.rows("db1", "t1", chunk_size=2500):
+ ... row_list # Do something with the rows
+
+ Iterate through a massive table to reduce memory load while using concurrency for high throughput.
+ Note: ``partitions`` must be specified for concurrency to be used (this is different from ``list()``
+ to keep backward compatibility). Supplying a finite ``limit`` does not affect concurrency settings
+ (except for very small values).
+
+ >>> rows_iterator = client.raw.rows(
+ ... db_name="db1", table_name="t1", partitions=5, chunk_size=5000, limit=1_000_000
+ ... )
+ >>> for row_list in rows_iterator:
+ ... row_list # Do something with the rows
+ """
+ return run_sync(
+ self.__async_client.raw.rows.list(
+ db_name=db_name,
+ table_name=table_name,
+ min_last_updated_time=min_last_updated_time,
+ max_last_updated_time=max_last_updated_time,
+ columns=columns,
+ limit=limit,
+ partitions=partitions,
+ )
+ )
diff --git a/cognite/client/_sync_api/raw/tables.py b/cognite/client/_sync_api/raw/tables.py
new file mode 100644
index 0000000000..75e96b95b9
--- /dev/null
+++ b/cognite/client/_sync_api/raw/tables.py
@@ -0,0 +1,128 @@
+"""
+===============================================================================
+854dbb194c522a53e14d24961ede0b14
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import raw
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+
+class SyncRawTablesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, db_name: str, chunk_size: None = None) -> Iterator[raw.Table]: ...
+
+ @overload
+ def __call__(self, db_name: str, chunk_size: int) -> Iterator[raw.TableList]: ...
+
+ def __call__(
+ self, db_name: str, chunk_size: int | None = None, limit: int | None = None
+ ) -> Iterator[raw.Table | raw.TableList]:
+ """
+ Iterate over tables
+
+ Fetches tables as they are iterated over, so you keep a limited number of tables in memory.
+
+ Args:
+ db_name (str): Name of the database to iterate over tables for
+ chunk_size (int | None): Number of tables to return in each chunk. Defaults to yielding one table a time.
+ limit (int | None): Maximum number of tables to return. Defaults to return all items.
+
+ Yields:
+ raw.Table | raw.TableList: No description.
+ """
+ yield from SyncIterator(self.__async_client.raw.tables(db_name=db_name, chunk_size=chunk_size, limit=limit))
+
+ @overload
+ def create(self, db_name: str, name: str) -> raw.Table: ...
+
+ @overload
+ def create(self, db_name: str, name: list[str]) -> raw.TableList: ...
+
+ def create(self, db_name: str, name: str | list[str]) -> raw.Table | raw.TableList:
+ """
+ `Create one or more tables. `_
+
+ Args:
+ db_name (str): Database to create the tables in.
+ name (str | list[str]): A table name or list of table names to create.
+
+ Returns:
+ raw.Table | raw.TableList: raw.Table or list of tables that has been created.
+
+ Examples:
+
+ Create a new table in a database:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.raw.tables.create("db1", "table1")
+ """
+ return run_sync(self.__async_client.raw.tables.create(db_name=db_name, name=name))
+
+ def delete(self, db_name: str, name: str | SequenceNotStr[str]) -> None:
+ """
+ `Delete one or more tables. `_
+
+ Args:
+ db_name (str): Database to delete tables from.
+ name (str | SequenceNotStr[str]): A table name or list of table names to delete.
+
+ Examples:
+
+ Delete a list of tables:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.raw.tables.delete("db1", ["table1", "table2"])
+ """
+ return run_sync(self.__async_client.raw.tables.delete(db_name=db_name, name=name))
+
+ def list(self, db_name: str, limit: int | None = DEFAULT_LIMIT_READ) -> raw.TableList:
+ """
+ `List tables `_
+
+ Args:
+ db_name (str): The database to list tables from.
+ limit (int | None): Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+
+ Returns:
+ raw.TableList: List of requested tables.
+
+ Examples:
+
+ List the first 5 tables:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> table_list = client.raw.tables.list("db1", limit=5)
+
+ Iterate over tables, one-by-one:
+
+ >>> for table in client.raw.tables(db_name="db1"):
+ ... table # do something with the table
+
+ Iterate over chunks of tables to reduce memory load:
+
+ >>> for table_list in client.raw.tables(db_name="db1", chunk_size=25):
+ ... table_list # do something with the tables
+ """
+ return run_sync(self.__async_client.raw.tables.list(db_name=db_name, limit=limit))
diff --git a/cognite/client/_sync_api/relationships.py b/cognite/client/_sync_api/relationships.py
new file mode 100644
index 0000000000..9d7df9f976
--- /dev/null
+++ b/cognite/client/_sync_api/relationships.py
@@ -0,0 +1,416 @@
+"""
+===============================================================================
+c16ec03a20daca0713c052b395d984dc
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterator, Sequence
+from typing import TYPE_CHECKING, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import (
+ Relationship,
+ RelationshipList,
+ RelationshipUpdate,
+ RelationshipWrite,
+)
+from cognite.client.data_classes.labels import LabelFilter
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncRelationshipsAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Relationship]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[RelationshipList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ source_external_ids: SequenceNotStr[str] | None = None,
+ source_types: SequenceNotStr[str] | None = None,
+ target_external_ids: SequenceNotStr[str] | None = None,
+ target_types: SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ start_time: dict[str, int] | None = None,
+ end_time: dict[str, int] | None = None,
+ confidence: dict[str, int] | None = None,
+ last_updated_time: dict[str, int] | None = None,
+ created_time: dict[str, int] | None = None,
+ active_at_time: dict[str, int] | None = None,
+ labels: LabelFilter | None = None,
+ limit: int | None = None,
+ fetch_resources: bool = False,
+ ) -> Iterator[Relationship | RelationshipList]:
+ """
+ Iterate over relationships
+
+ Fetches relationships as they are iterated over, so you keep a limited number of relationships in memory.
+
+ Args:
+ chunk_size (int | None): Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time.
+ source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field
+ source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field
+ target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field
+ target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field
+ data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids.
+ start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ confidence (dict[str, int] | None): Range to filter the field for (inclusive).
+ last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive).
+ created_time (dict[str, int] | None): Range to filter the field for (inclusive).
+ active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
+ labels (LabelFilter | None): Return only the resource matching the specified label constraints.
+ limit (int | None): No description.
+ fetch_resources (bool): No description.
+
+ Yields:
+ Relationship | RelationshipList: yields Relationship one by one if chunk_size is not specified, else RelationshipList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.relationships(
+ chunk_size=chunk_size,
+ source_external_ids=source_external_ids,
+ source_types=source_types,
+ target_external_ids=target_external_ids,
+ target_types=target_types,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ start_time=start_time,
+ end_time=end_time,
+ confidence=confidence,
+ last_updated_time=last_updated_time,
+ created_time=created_time,
+ active_at_time=active_at_time,
+ labels=labels,
+ limit=limit,
+ fetch_resources=fetch_resources,
+ )
+ )
+
+ def retrieve(self, external_id: str, fetch_resources: bool = False) -> Relationship | None:
+ """
+ Retrieve a single relationship by external id.
+
+ Args:
+ external_id (str): External ID
+ fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the source and target fields.
+
+ Returns:
+ Relationship | None: Requested relationship or None if it does not exist.
+
+ Examples:
+
+ Get relationship by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.relationships.retrieve(external_id="1")
+ """
+ return run_sync(
+ self.__async_client.relationships.retrieve(external_id=external_id, fetch_resources=fetch_resources)
+ )
+
+ def retrieve_multiple(
+ self, external_ids: SequenceNotStr[str], fetch_resources: bool = False, ignore_unknown_ids: bool = False
+ ) -> RelationshipList:
+ """
+ `Retrieve multiple relationships by external id. `_
+
+ Args:
+ external_ids (SequenceNotStr[str]): External IDs
+ fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the
+ source and target fields.
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ RelationshipList: The requested relationships.
+
+ Examples:
+
+ Get relationships by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.relationships.retrieve_multiple(external_ids=["abc", "def"])
+ """
+ return run_sync(
+ self.__async_client.relationships.retrieve_multiple(
+ external_ids=external_ids, fetch_resources=fetch_resources, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def list(
+ self,
+ source_external_ids: SequenceNotStr[str] | None = None,
+ source_types: SequenceNotStr[str] | None = None,
+ target_external_ids: SequenceNotStr[str] | None = None,
+ target_types: SequenceNotStr[str] | None = None,
+ data_set_ids: int | Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ start_time: dict[str, int] | None = None,
+ end_time: dict[str, int] | None = None,
+ confidence: dict[str, int] | None = None,
+ last_updated_time: dict[str, int] | None = None,
+ created_time: dict[str, int] | None = None,
+ active_at_time: dict[str, int] | None = None,
+ labels: LabelFilter | None = None,
+ limit: int | None = DEFAULT_LIMIT_READ,
+ partitions: int | None = None,
+ fetch_resources: bool = False,
+ ) -> RelationshipList:
+ """
+ `Lists relationships stored in the project based on a query filter given in the payload of this request. Up to 1000 relationships can be retrieved in one operation. `_
+
+ Args:
+ source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field
+ source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field
+ target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field
+ target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field
+ data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids.
+ start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ confidence (dict[str, int] | None): Range to filter the field for (inclusive).
+ last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive).
+ created_time (dict[str, int] | None): Range to filter the field for (inclusive).
+ active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
+ labels (LabelFilter | None): Return only the resource matching the specified label constraints.
+ limit (int | None): Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions (int | None): Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed.
+ fetch_resources (bool): if true, will try to return the full resources referenced by the relationship in the source and target fields.
+
+ Returns:
+ RelationshipList: List of requested relationships
+
+ Examples:
+
+ List relationships:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> relationship_list = client.relationships.list(limit=5)
+
+ Iterate over relationships, one-by-one:
+
+ >>> for relationship in client.relationships():
+ ... relationship # do something with the relationship
+ """
+ return run_sync(
+ self.__async_client.relationships.list(
+ source_external_ids=source_external_ids,
+ source_types=source_types,
+ target_external_ids=target_external_ids,
+ target_types=target_types,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ start_time=start_time,
+ end_time=end_time,
+ confidence=confidence,
+ last_updated_time=last_updated_time,
+ created_time=created_time,
+ active_at_time=active_at_time,
+ labels=labels,
+ limit=limit,
+ partitions=partitions,
+ fetch_resources=fetch_resources,
+ )
+ )
+
+ @overload
+ def create(self, relationship: Relationship | RelationshipWrite) -> Relationship: ...
+
+ @overload
+ def create(self, relationship: Sequence[Relationship | RelationshipWrite]) -> RelationshipList: ...
+
+ def create(
+ self, relationship: Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]
+ ) -> Relationship | RelationshipList:
+ """
+ `Create one or more relationships. `_
+
+ Args:
+ relationship (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to create.
+
+ Returns:
+ Relationship | RelationshipList: Created relationship(s)
+
+ Note:
+ - The source_type and target_type field in the Relationship(s) can be any string among "Asset", "TimeSeries", "File", "Event", "Sequence".
+ - Do not provide the value for the source and target arguments of the Relationship class, only source_external_id / source_type and target_external_id / target_type. These (source and target) are used as part of fetching actual resources specified in other fields.
+
+ Examples:
+
+ Create a new relationship specifying object type and external id for source and target:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import RelationshipWrite
+ >>> client = CogniteClient()
+ >>> flowrel1 = RelationshipWrite(
+ ... external_id="flow_1",
+ ... source_external_id="source_ext_id",
+ ... source_type="asset",
+ ... target_external_id="target_ext_id",
+ ... target_type="event",
+ ... confidence=0.1,
+ ... data_set_id=1234
+ ... )
+ >>> flowrel2 = RelationshipWrite(
+ ... external_id="flow_2",
+ ... source_external_id="source_ext_id",
+ ... source_type="asset",
+ ... target_external_id="target_ext_id",
+ ... target_type="event",
+ ... confidence=0.1,
+ ... data_set_id=1234
+ ... )
+ >>> res = client.relationships.create([flowrel1,flowrel2])
+ """
+ return run_sync(self.__async_client.relationships.create(relationship=relationship))
+
+ @overload
+ def update(self, item: Relationship | RelationshipWrite | RelationshipUpdate) -> Relationship: ...
+
+ @overload
+ def update(self, item: Sequence[Relationship | RelationshipWrite | RelationshipUpdate]) -> RelationshipList: ...
+
+ def update(
+ self,
+ item: Relationship
+ | RelationshipWrite
+ | RelationshipUpdate
+ | Sequence[Relationship | RelationshipWrite | RelationshipUpdate],
+ mode: Literal["replace_ignore_null", "patch", "replace"] = "replace_ignore_null",
+ ) -> Relationship | RelationshipList:
+ """
+ `Update one or more relationships `_
+ Currently, a full replacement of labels on a relationship is not supported (only partial add/remove updates). See the example below on how to perform partial labels update.
+
+ Args:
+ item (Relationship | RelationshipWrite | RelationshipUpdate | Sequence[Relationship | RelationshipWrite | RelationshipUpdate]): Relationship(s) to update
+ mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+
+ Returns:
+ Relationship | RelationshipList: Updated relationship(s)
+
+ Examples:
+ Update a data set that you have fetched. This will perform a full update of the data set:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> rel = client.relationships.retrieve(external_id="flow1")
+ >>> rel.confidence = 0.75
+ >>> res = client.relationships.update(rel)
+
+ Perform a partial update on a relationship, setting a source_external_id and a confidence:
+
+ >>> from cognite.client.data_classes import RelationshipUpdate
+ >>> my_update = RelationshipUpdate(external_id="flow_1").source_external_id.set("alternate_source").confidence.set(0.97)
+ >>> res1 = client.relationships.update(my_update)
+ >>> # Remove an already set optional field like so
+ >>> another_update = RelationshipUpdate(external_id="flow_1").confidence.set(None)
+ >>> res2 = client.relationships.update(another_update)
+
+ Attach labels to a relationship:
+
+ >>> from cognite.client.data_classes import RelationshipUpdate
+ >>> my_update = RelationshipUpdate(external_id="flow_1").labels.add(["PUMP", "VERIFIED"])
+ >>> res = client.relationships.update(my_update)
+
+ Detach a single label from a relationship:
+
+ >>> from cognite.client.data_classes import RelationshipUpdate
+ >>> my_update = RelationshipUpdate(external_id="flow_1").labels.remove("PUMP")
+ >>> res = client.relationships.update(my_update)
+ """
+ return run_sync(self.__async_client.relationships.update(item=item, mode=mode))
+
+ @overload
+ def upsert(
+ self, item: Sequence[Relationship | RelationshipWrite], mode: Literal["patch", "replace"] = "patch"
+ ) -> RelationshipList: ...
+
+ @overload
+ def upsert(
+ self, item: Relationship | RelationshipWrite, mode: Literal["patch", "replace"] = "patch"
+ ) -> Relationship: ...
+
+ def upsert(
+ self,
+ item: Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite],
+ mode: Literal["patch", "replace"] = "patch",
+ ) -> Relationship | RelationshipList:
+ """
+ Upsert relationships, i.e., update if it exists, and create if it does not exist.
+ Note this is a convenience method that handles the upserting for you by first calling update on all items,
+ and if any of them fail because they do not exist, it will create them instead.
+
+ For more details, see :ref:`appendix-upsert`.
+
+ Args:
+ item (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to upsert.
+ mode (Literal['patch', 'replace']): Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+
+ Returns:
+ Relationship | RelationshipList: The upserted relationship(s).
+
+ Examples:
+
+ Upsert for relationships:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import RelationshipWrite
+ >>> client = CogniteClient()
+ >>> existing_relationship = client.relationships.retrieve(external_id="foo")
+ >>> existing_relationship.description = "New description"
+ >>> new_relationship = RelationshipWrite(
+ ... external_id="new_relationship",
+ ... source_external_id="new_source",
+ ... source_type="asset",
+ ... target_external_id="new_target",
+ ... target_type="event"
+ ... )
+ >>> res = client.relationships.upsert([existing_relationship, new_relationship], mode="replace")
+ """
+ return run_sync(self.__async_client.relationships.upsert(item=item, mode=mode))
+
+ def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: bool = False) -> None:
+ """
+ `Delete one or more relationships. `_
+
+ Args:
+ external_id (str | SequenceNotStr[str]): External ID or list of external ids
+ ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
+ Examples:
+
+ Delete relationships by external id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.relationships.delete(external_id=["a","b"])
+ """
+ return run_sync(
+ self.__async_client.relationships.delete(external_id=external_id, ignore_unknown_ids=ignore_unknown_ids)
+ )
diff --git a/cognite/client/_sync_api/sequence_data.py b/cognite/client/_sync_api/sequence_data.py
new file mode 100644
index 0000000000..2d145b0ad8
--- /dev/null
+++ b/cognite/client/_sync_api/sequence_data.py
@@ -0,0 +1,326 @@
+"""
+===============================================================================
+9812a01728a87fc7f57b745b3b175322
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+import typing
+from typing import TYPE_CHECKING, Any, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import SequenceRows, SequenceRowsList
+from cognite.client.utils._async_helpers import run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ import pandas as pd
+
+
+class SyncSequencesDataAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+
+ def insert(
+ self,
+ rows: SequenceRows
+ | dict[int, typing.Sequence[int | float | str]]
+ | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]]
+ | typing.Sequence[dict[str, Any]],
+ columns: SequenceNotStr[str] | None = None,
+ id: int | None = None,
+ external_id: str | None = None,
+ ) -> None:
+ """
+ `Insert rows into a sequence `_
+
+ Args:
+ rows (SequenceRows | dict[int, typing.Sequence[int | float | str]] | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] | typing.Sequence[dict[str, Any]]): The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below.
+ columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence.
+ id (int | None): Id of sequence to insert rows into.
+ external_id (str | None): External id of sequence to insert rows into.
+
+ Examples:
+ Your rows of data can be a list of tuples where the first element is the rownumber and the second element is the data to be inserted:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes import SequenceWrite, SequenceColumnWrite
+ >>> client = CogniteClient()
+ >>> seq = client.sequences.create(
+ ... SequenceWrite(
+ ... columns=[
+ ... SequenceColumnWrite(value_type="STRING", external_id="col_a"),
+ ... SequenceColumnWrite(value_type="DOUBLE", external_id ="col_b")
+ ... ],
+ ... )
+ ... )
+ >>> data = [(1, ['pi',3.14]), (2, ['e',2.72]) ]
+ >>> client.sequences.data.insert(columns=["col_a","col_b"], rows=data, id=1)
+
+ They can also be provided as a list of API-style objects with a rowNumber and values field:
+
+ >>> data = [{"rowNumber": 123, "values": ['str',3]}, {"rowNumber": 456, "values": ["bar",42]} ]
+ >>> client.sequences.data.insert(data, id=1, columns=["col_a","col_b"]) # implicit columns are retrieved from metadata
+
+ Or they can be a given as a dictionary with row number as the key, and the value is the data to be inserted at that row:
+
+ >>> data = {123 : ['str',3], 456 : ['bar',42] }
+ >>> client.sequences.data.insert(columns=['stringColumn','intColumn'], rows=data, id=1)
+
+ Finally, they can be a SequenceData object retrieved from another request. In this case columns from this object are used as well.
+
+ >>> data = client.sequences.data.retrieve(id=2,start=0,end=10)
+ >>> client.sequences.data.insert(rows=data, id=1,columns=None)
+ """
+ return run_sync(
+ self.__async_client.sequences.data.insert(rows=rows, columns=columns, id=id, external_id=external_id)
+ )
+
+ def insert_dataframe(
+ self, dataframe: pd.DataFrame, id: int | None = None, external_id: str | None = None, dropna: bool = True
+ ) -> None:
+ """
+ `Insert a Pandas dataframe. `_
+
+ The index of the dataframe must contain the row numbers. The names of the remaining columns specify the column external ids.
+ The sequence and columns must already exist.
+
+ Args:
+ dataframe (pd.DataFrame): Pandas DataFrame object containing the sequence data.
+ id (int | None): Id of sequence to insert rows into.
+ external_id (str | None): External id of sequence to insert rows into.
+ dropna (bool): Whether to drop rows where all values are missing. Default: True.
+
+ Examples:
+ Insert three rows into columns 'col_a' and 'col_b' of the sequence with id=123:
+
+ >>> from cognite.client import CogniteClient
+ >>> import pandas as pd
+ >>> client = CogniteClient()
+ >>> df = pd.DataFrame({'col_a': [1, 2, 3], 'col_b': [4, 5, 6]}, index=[1, 2, 3])
+ >>> client.sequences.data.insert_dataframe(df, id=123)
+ """
+ return run_sync(
+ self.__async_client.sequences.data.insert_dataframe(
+ dataframe=dataframe, id=id, external_id=external_id, dropna=dropna
+ )
+ )
+
+ def delete(self, rows: typing.Sequence[int], id: int | None = None, external_id: str | None = None) -> None:
+ """
+ `Delete rows from a sequence `_
+
+ Args:
+ rows (typing.Sequence[int]): List of row numbers.
+ id (int | None): Id of sequence to delete rows from.
+ external_id (str | None): External id of sequence to delete rows from.
+
+ Examples:
+
+ Delete rows from a sequence:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.sequences.data.delete(id=1, rows=[1,2,42])
+ """
+ return run_sync(self.__async_client.sequences.data.delete(rows=rows, id=id, external_id=external_id))
+
+ def delete_range(self, start: int, end: int | None, id: int | None = None, external_id: str | None = None) -> None:
+ """
+ `Delete a range of rows from a sequence. Note this operation is potentially slow, as retrieves each row before deleting. `_
+
+ Args:
+ start (int): Row number to start from (inclusive).
+ end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence.
+ id (int | None): Id of sequence to delete rows from.
+ external_id (str | None): External id of sequence to delete rows from.
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> client.sequences.data.delete_range(id=1, start=0, end=None)
+ """
+ return run_sync(
+ self.__async_client.sequences.data.delete_range(start=start, end=end, id=id, external_id=external_id)
+ )
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ external_id: str,
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRows: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ external_id: SequenceNotStr[str],
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRowsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: int,
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRows: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: typing.Sequence[int],
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRowsList: ...
+
+ @overload
+ def retrieve(
+ self,
+ *,
+ id: typing.Sequence[int] | int,
+ external_id: SequenceNotStr[str] | str,
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRowsList: ...
+
+ def retrieve(
+ self,
+ external_id: str | SequenceNotStr[str] | None = None,
+ id: int | typing.Sequence[int] | None = None,
+ start: int = 0,
+ end: int | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ limit: int | None = None,
+ ) -> SequenceRows | SequenceRowsList:
+ """
+ `Retrieve data from a sequence `_
+
+ Args:
+ external_id (str | SequenceNotStr[str] | None): The external id of the sequence to retrieve from.
+ id (int | typing.Sequence[int] | None): The internal if the sequence to retrieve from.
+ start (int): Row number to start from (inclusive).
+ end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence.
+ columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ limit (int | None): Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end').
+
+ Returns:
+ SequenceRows | SequenceRowsList: SequenceRows if a single identifier was given, else SequenceDataList
+
+ Examples:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.sequences.data.retrieve(id=1)
+ >>> tuples = [(r,v) for r,v in res.items()] # You can use this iterator in for loops and list comprehensions,
+ >>> single_value = res[23] # ... get the values at a single row number,
+ >>> col = res.get_column(external_id='columnExtId') # ... get the array of values for a specific column,
+ >>> df = res.to_pandas() # ... or convert the result to a dataframe
+ """
+ return run_sync(
+ self.__async_client.sequences.data.retrieve(
+ external_id=external_id, id=id, start=start, end=end, columns=columns, limit=limit
+ )
+ )
+
+ def retrieve_last_row(
+ self,
+ id: int | None = None,
+ external_id: str | None = None,
+ columns: SequenceNotStr[str] | None = None,
+ before: int | None = None,
+ ) -> SequenceRows:
+ """
+ `Retrieves the last row (i.e the row with the highest row number) in a sequence. `_
+
+ Args:
+ id (int | None): Id or list of ids.
+ external_id (str | None): External id or list of external ids.
+ columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ before (int | None): (optional, int): Get latest datapoint before this row number.
+
+ Returns:
+ SequenceRows: A Datapoints object containing the requested data, or a list of such objects.
+
+ Examples:
+
+ Getting the latest row in a sequence before row number 1000:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.sequences.data.retrieve_last_row(id=1, before=1000)
+ """
+ return run_sync(
+ self.__async_client.sequences.data.retrieve_last_row(
+ id=id, external_id=external_id, columns=columns, before=before
+ )
+ )
+
+ def retrieve_dataframe(
+ self,
+ start: int,
+ end: int | None,
+ columns: list[str] | None = None,
+ external_id: str | None = None,
+ column_names: str | None = None,
+ id: int | None = None,
+ limit: int | None = None,
+ ) -> pd.DataFrame:
+ """
+ `Retrieve data from a sequence as a pandas dataframe `_
+
+ Args:
+ start (int): (inclusive) row number to start from.
+ end (int | None): (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence.
+ columns (list[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ external_id (str | None): External id of sequence.
+ column_names (str | None): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence.
+ id (int | None): Id of sequence
+ limit (int | None): Maximum number of rows to return per sequence.
+
+ Returns:
+ pd.DataFrame: The requested sequence data in a pandas DataFrame
+
+ Examples:
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> df = client.sequences.data.retrieve_dataframe(id=1, start=0, end=None)
+ """
+ return run_sync(
+ self.__async_client.sequences.data.retrieve_dataframe(
+ start=start,
+ end=end,
+ columns=columns,
+ external_id=external_id,
+ column_names=column_names,
+ id=id,
+ limit=limit,
+ )
+ )
diff --git a/cognite/client/_sync_api/sequences.py b/cognite/client/_sync_api/sequences.py
new file mode 100644
index 0000000000..257f2d7156
--- /dev/null
+++ b/cognite/client/_sync_api/sequences.py
@@ -0,0 +1,725 @@
+"""
+===============================================================================
+487393dca45db624fbbc00b67ff1d158
+This file is auto-generated from the Async API modules, - do not edit manually!
+===============================================================================
+"""
+
+from __future__ import annotations
+
+import typing
+from collections.abc import Iterator
+from typing import TYPE_CHECKING, Any, Literal, overload
+
+from cognite.client import AsyncCogniteClient
+from cognite.client._api.sequences import SortSpec
+from cognite.client._constants import DEFAULT_LIMIT_READ
+from cognite.client._sync_api.sequence_data import SyncSequencesDataAPI
+from cognite.client._sync_api_client import SyncAPIClient
+from cognite.client.data_classes import Sequence, SequenceFilter, SequenceList, SequenceUpdate
+from cognite.client.data_classes.aggregations import AggregationFilter, UniqueResultList
+from cognite.client.data_classes.filters import Filter
+from cognite.client.data_classes.sequences import (
+ SequenceProperty,
+ SequenceWrite,
+)
+from cognite.client.data_classes.shared import TimestampRange
+from cognite.client.utils._async_helpers import SyncIterator, run_sync
+from cognite.client.utils.useful_types import SequenceNotStr
+
+if TYPE_CHECKING:
+ from cognite.client import AsyncCogniteClient
+
+
+class SyncSequencesAPI(SyncAPIClient):
+ """Auto-generated, do not modify manually."""
+
+ def __init__(self, async_client: AsyncCogniteClient):
+ self.__async_client = async_client
+ self.data = SyncSequencesDataAPI(async_client)
+
+ @overload
+ def __call__(self, chunk_size: None = None) -> Iterator[Sequence]: ...
+
+ @overload
+ def __call__(self, chunk_size: int) -> Iterator[SequenceList]: ...
+
+ def __call__(
+ self,
+ chunk_size: int | None = None,
+ name: str | None = None,
+ external_id_prefix: str | None = None,
+ metadata: dict[str, str] | None = None,
+ asset_ids: typing.Sequence[int] | None = None,
+ asset_subtree_ids: int | typing.Sequence[int] | None = None,
+ asset_subtree_external_ids: str | SequenceNotStr[str] | None = None,
+ data_set_ids: int | typing.Sequence[int] | None = None,
+ data_set_external_ids: str | SequenceNotStr[str] | None = None,
+ created_time: dict[str, Any] | None = None,
+ last_updated_time: dict[str, Any] | None = None,
+ limit: int | None = None,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ sort: SortSpec | list[SortSpec] | None = None,
+ ) -> Iterator[Sequence | SequenceList]:
+ """
+ Iterate over sequences
+
+ Fetches sequences as they are iterated over, so you keep a limited number of objects in memory.
+
+ Args:
+ chunk_size (int | None): Number of sequences to return in each chunk. Defaults to yielding one event a time.
+ name (str | None): Filter out sequences that do not have this *exact* name.
+ external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId
+ metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}.
+ asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets.
+ asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids.
+ data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids.
+ created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit (int | None): Max number of sequences to return. Defaults to return all items.
+ advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+
+ Yields:
+ Sequence | SequenceList: yields Sequence one by one if chunk_size is not specified, else SequenceList objects.
+ """
+ yield from SyncIterator(
+ self.__async_client.sequences(
+ chunk_size=chunk_size,
+ name=name,
+ external_id_prefix=external_id_prefix,
+ metadata=metadata,
+ asset_ids=asset_ids,
+ asset_subtree_ids=asset_subtree_ids,
+ asset_subtree_external_ids=asset_subtree_external_ids,
+ data_set_ids=data_set_ids,
+ data_set_external_ids=data_set_external_ids,
+ created_time=created_time,
+ last_updated_time=last_updated_time,
+ limit=limit,
+ advanced_filter=advanced_filter,
+ sort=sort,
+ )
+ )
+
+ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Sequence | None:
+ """
+ `Retrieve a single sequence by id. `_
+
+ Args:
+ id (int | None): ID
+ external_id (str | None): External ID
+
+ Returns:
+ Sequence | None: Requested sequence or None if it does not exist.
+
+ Examples:
+
+ Get sequence by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.sequences.retrieve(id=1)
+
+ Get sequence by external id:
+
+ >>> res = client.sequences.retrieve()
+ """
+ return run_sync(self.__async_client.sequences.retrieve(id=id, external_id=external_id))
+
+ def retrieve_multiple(
+ self,
+ ids: typing.Sequence[int] | None = None,
+ external_ids: SequenceNotStr[str] | None = None,
+ ignore_unknown_ids: bool = False,
+ ) -> SequenceList:
+ """
+ `Retrieve multiple sequences by id. `_
+
+ Args:
+ ids (typing.Sequence[int] | None): IDs
+ external_ids (SequenceNotStr[str] | None): External IDs
+ ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+
+ Returns:
+ SequenceList: The requested sequences.
+
+ Examples:
+
+ Get sequences by id:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> res = client.sequences.retrieve_multiple(ids=[1, 2, 3])
+
+ Get sequences by external id:
+
+ >>> res = client.sequences.retrieve_multiple(external_ids=["abc", "def"])
+ """
+ return run_sync(
+ self.__async_client.sequences.retrieve_multiple(
+ ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids
+ )
+ )
+
+ def aggregate_count(
+ self,
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ filter: SequenceFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Count of sequences matching the specified filters and search. `_
+
+ Args:
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count.
+ filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down sequences to count requiring exact match.
+
+ Returns:
+ int: The number of sequences matching the specified filters and search.
+
+ Examples:
+
+ Count the number of time series in your CDF project:
+
+ >>> from cognite.client import CogniteClient, AsyncCogniteClient
+ >>> client = CogniteClient()
+ >>> # async_client = AsyncCogniteClient() # another option
+ >>> count = client.sequences.aggregate_count()
+
+ Count the number of sequences with external id prefixed with "mapping:" in your CDF project:
+
+ >>> from cognite.client.data_classes import filters
+ >>> from cognite.client.data_classes.sequences import SequenceProperty
+ >>> is_mapping = filters.Prefix(SequenceProperty.external_id, "mapping:")
+ >>> count = client.sequences.aggregate_count(advanced_filter=is_mapping)
+ """
+ return run_sync(self.__async_client.sequences.aggregate_count(advanced_filter=advanced_filter, filter=filter))
+
+ def aggregate_cardinality_values(
+ self,
+ property: SequenceProperty | str | list[str],
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: SequenceFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate property count for sequences. `_
+
+ Args:
+ property (SequenceProperty | str | list[str]): The property to count the cardinality of.
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+
+ Returns:
+ int: The number of properties matching the specified filters and search.
+
+ Examples:
+
+ Count the number of different values for the metadata key "efficiency" used for sequences in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.sequences import SequenceProperty
+ >>> client = CogniteClient()
+ >>> count = client.sequences.aggregate_cardinality_values(SequenceProperty.metadata_key("efficiency"))
+
+ Count the number of timezones (metadata key) for sequences with the word "critical" in the description
+ in your CDF project, but exclude timezones from america:
+
+ >>> from cognite.client.data_classes import filters, aggregations as aggs
+ >>> from cognite.client.data_classes.sequences import SequenceProperty
+ >>> not_america = aggs.Not(aggs.Prefix("america"))
+ >>> is_critical = filters.Search(SequenceProperty.description, "critical")
+ >>> timezone_count = client.sequences.aggregate_cardinality_values(
+ ... SequenceProperty.metadata_key("timezone"),
+ ... advanced_filter=is_critical,
+ ... aggregate_filter=not_america)
+ """
+ return run_sync(
+ self.__async_client.sequences.aggregate_cardinality_values(
+ property=property, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_cardinality_properties(
+ self,
+ path: SequenceProperty | str | list[str],
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: SequenceFilter | dict[str, Any] | None = None,
+ ) -> int:
+ """
+ `Find approximate paths count for sequences. `_
+
+ Args:
+ path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
+ aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+
+ Returns:
+ int: The number of properties matching the specified filters and search.
+
+ Examples:
+
+ Count the number of different metadata keys in your CDF project:
+
+ >>> from cognite.client import CogniteClient
+ >>> from cognite.client.data_classes.sequences import SequenceProperty
+ >>> client = CogniteClient()
+ >>> count = client.sequences.aggregate_cardinality_values(SequenceProperty.metadata)
+ """
+ return run_sync(
+ self.__async_client.sequences.aggregate_cardinality_properties(
+ path=path, advanced_filter=advanced_filter, aggregate_filter=aggregate_filter, filter=filter
+ )
+ )
+
+ def aggregate_unique_values(
+ self,
+ property: SequenceProperty | str | list[str],
+ advanced_filter: Filter | dict[str, Any] | None = None,
+ aggregate_filter: AggregationFilter | dict[str, Any] | None = None,
+ filter: SequenceFilter | dict[str, Any] | None = None,
+ ) -> UniqueResultList:
+ """
+ `Get unique paths with counts for sequences.