diff --git a/README.md b/README.md
index e3175f97..f1351e8e 100644
--- a/README.md
+++ b/README.md
@@ -4,4 +4,5 @@ This is the official Python SDK for Koyeb, a platform that allows you to deploy
# Modules
-- `koyeb.api`: Contains the API client and methods to interact with Koyeb's REST API. [Documentation](./koyeb/api_README.md)
+- `koyeb.api`: Contains the API client and methods to interact with Koyeb's REST API. [Documentation](./docs/api.md)
+- `koyeb.sandbox`: Contains the Sandbox module. [Documentation](./docs/sandbox.md)
diff --git a/bumpver.toml b/bumpver.toml
index 56db0e16..cb66d24a 100644
--- a/bumpver.toml
+++ b/bumpver.toml
@@ -13,3 +13,4 @@ push = false
"koyeb/api/__init__.py" = ['__version__ = "{version}"']
"koyeb/api/api_client.py" = ['OpenAPI-Generator/{version}']
"koyeb/api/configuration.py" = ['SDK Package Version: {version}']
+"koyeb/sandbox/__init__.py" = ['__version__ = "{version}"']
diff --git a/docs/api.md b/docs/api.md
index 1b2e6d44..f1209b37 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -51201,13 +51201,55 @@ Do not edit the class manually.
```python
@validate_call
def query_logs(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
start: Annotated[
Optional[datetime],
Field(
@@ -51239,6 +51281,12 @@ def query_logs(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51256,19 +51304,22 @@ Query logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `regional_deployment_id` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
- `start` (`datetime`): (Optional) Must always be before `end`. Defaults to 15 minutes ago.
- `end` (`datetime`): (Optional) Must always be after `start`. Defaults to now.
- `order` (`str`): (Optional) `asc` or `desc`. Defaults to `desc`.
- `limit` (`str`): (Optional) Defaults to 100. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -51295,13 +51346,55 @@ Returns the result object.
```python
@validate_call
def query_logs_with_http_info(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
start: Annotated[
Optional[datetime],
Field(
@@ -51333,6 +51426,12 @@ def query_logs_with_http_info(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51350,19 +51449,22 @@ Query logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `regional_deployment_id` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
- `start` (`datetime`): (Optional) Must always be before `end`. Defaults to 15 minutes ago.
- `end` (`datetime`): (Optional) Must always be after `start`. Defaults to now.
- `order` (`str`): (Optional) `asc` or `desc`. Defaults to `desc`.
- `limit` (`str`): (Optional) Defaults to 100. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -51389,13 +51491,55 @@ Returns the result object.
```python
@validate_call
def query_logs_without_preload_content(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
start: Annotated[
Optional[datetime],
Field(
@@ -51427,6 +51571,12 @@ def query_logs_without_preload_content(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51444,19 +51594,22 @@ Query logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `regional_deployment_id` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
- `start` (`datetime`): (Optional) Must always be before `end`. Defaults to 15 minutes ago.
- `end` (`datetime`): (Optional) Must always be after `start`. Defaults to now.
- `order` (`str`): (Optional) `asc` or `desc`. Defaults to `desc`.
- `limit` (`str`): (Optional) Defaults to 100. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -51483,15 +51636,62 @@ Returns the result object.
```python
@validate_call
def tail_logs(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
+ start: Annotated[
+ Optional[datetime],
+ Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[
+ Optional[StrictStr],
+ Field(description="(Optional) Defaults to 1000. Maximum of 1000."
+ )] = None,
regex: Annotated[
Optional[StrictStr],
Field(
@@ -51504,6 +51704,12 @@ def tail_logs(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51521,17 +51727,20 @@ Tails logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `regional_deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `start` (`datetime`):
-- `limit` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
+- `start` (`datetime`): (Optional) Defaults to 24 hours ago.
+- `limit` (`str`): (Optional) Defaults to 1000. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -51558,15 +51767,62 @@ Returns the result object.
```python
@validate_call
def tail_logs_with_http_info(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
+ start: Annotated[
+ Optional[datetime],
+ Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[
+ Optional[StrictStr],
+ Field(description="(Optional) Defaults to 1000. Maximum of 1000."
+ )] = None,
regex: Annotated[
Optional[StrictStr],
Field(
@@ -51579,6 +51835,12 @@ def tail_logs_with_http_info(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51596,17 +51858,20 @@ Tails logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `regional_deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `start` (`datetime`):
-- `limit` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
+- `start` (`datetime`): (Optional) Defaults to 24 hours ago.
+- `limit` (`str`): (Optional) Defaults to 1000. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -51633,15 +51898,62 @@ Returns the result object.
```python
@validate_call
def tail_logs_without_preload_content(
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\"."
+ )] = None,
+ app_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ service_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ regional_deployment_id: Annotated[
+ Optional[StrictStr],
+ Field(
+ description=
+ "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ instance_id: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using instance_ids instead."
+ )] = None,
+ instance_ids: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set."
+ )] = None,
+ stream: Annotated[
+ Optional[StrictStr],
+ Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs)."
+ )] = None,
+ start: Annotated[
+ Optional[datetime],
+ Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[
+ Optional[StrictStr],
+ Field(description="(Optional) Defaults to 1000. Maximum of 1000."
+ )] = None,
regex: Annotated[
Optional[StrictStr],
Field(
@@ -51654,6 +51966,12 @@ def tail_logs_without_preload_content(
description=
"(Optional) Looks for this string in logs. Can't be used with `regex`."
)] = None,
+ regions: Annotated[
+ Optional[List[StrictStr]],
+ Field(
+ description=
+ "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"])."
+ )] = None,
_request_timeout: Union[None, Annotated[StrictFloat,
Field(gt=0)],
Tuple[Annotated[StrictFloat,
@@ -51671,17 +51989,20 @@ Tails logs
**Arguments**:
-- `type` (`str`):
-- `app_id` (`str`):
-- `service_id` (`str`):
-- `deployment_id` (`str`):
-- `regional_deployment_id` (`str`):
-- `instance_id` (`str`):
-- `stream` (`str`):
-- `start` (`datetime`):
-- `limit` (`str`):
+- `type` (`str`): Type of logs to retrieve, either "build" or "runtime". Defaults to "runtime".
+- `app_id` (`str`): (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `service_id` (`str`): (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `deployment_id` (`str`): (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `regional_deployment_id` (`str`): (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `instance_id` (`str`): Deprecated, prefer using instance_ids instead.
+- `instance_ids` (`List[str]`): (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+- `stream` (`str`): Deprecated, prefer using streams instead.
+- `streams` (`List[str]`): (Optional) Filter on stream: either "stdout", "stderr" or "koyeb" (for system logs).
+- `start` (`datetime`): (Optional) Defaults to 24 hours ago.
+- `limit` (`str`): (Optional) Defaults to 1000. Maximum of 1000.
- `regex` (`str`): (Optional) Apply a regex to filter logs. Can't be used with `text`.
- `text` (`str`): (Optional) Looks for this string in logs. Can't be used with `regex`.
+- `regions` (`List[str]`): (Optional) Filter on the provided regions (e.g. ["fra", "was"]).
- `_request_timeout` (`int, tuple(int, int), optional`): timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
diff --git a/docs/sandbox.md b/docs/sandbox.md
new file mode 100644
index 00000000..257268ac
--- /dev/null
+++ b/docs/sandbox.md
@@ -0,0 +1,2535 @@
+
+
+# koyeb/sandbox
+
+Koyeb Sandbox - Interactive execution environment for running arbitrary code on Koyeb
+
+
+
+# koyeb/sandbox.exec
+
+Command execution utilities for Koyeb Sandbox instances
+Using SandboxClient HTTP API
+
+
+
+## CommandStatus Objects
+
+```python
+class CommandStatus(str, Enum)
+```
+
+Command execution status
+
+
+
+## CommandResult Objects
+
+```python
+@dataclass
+class CommandResult()
+```
+
+Result of a command execution using Koyeb API models
+
+
+
+#### success
+
+```python
+@property
+def success() -> bool
+```
+
+Check if command executed successfully
+
+
+
+#### output
+
+```python
+@property
+def output() -> str
+```
+
+Get combined stdout and stderr output
+
+
+
+## SandboxCommandError Objects
+
+```python
+class SandboxCommandError(SandboxError)
+```
+
+Raised when command execution fails
+
+
+
+## SandboxExecutor Objects
+
+```python
+class SandboxExecutor()
+```
+
+Synchronous command execution interface for Koyeb Sandbox instances.
+Bound to a specific sandbox instance.
+
+For async usage, use AsyncSandboxExecutor instead.
+
+
+
+#### \_\_call\_\_
+
+```python
+def __call__(
+ command: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: int = 30,
+ on_stdout: Optional[Callable[[str], None]] = None,
+ on_stderr: Optional[Callable[[str], None]] = None) -> CommandResult
+```
+
+Execute a command in a shell synchronously. Supports streaming output via callbacks.
+
+**Arguments**:
+
+- `command` - Command to execute as a string (e.g., "python -c 'print(2+2)'")
+- `cwd` - Working directory for the command
+- `env` - Environment variables for the command
+- `timeout` - Command timeout in seconds (enforced for HTTP requests)
+- `on_stdout` - Optional callback for streaming stdout chunks
+- `on_stderr` - Optional callback for streaming stderr chunks
+
+
+**Returns**:
+
+- `CommandResult` - Result of the command execution
+
+
+**Example**:
+
+ ```python
+ # Synchronous execution
+ result = sandbox.exec("echo hello")
+
+ # With streaming callbacks
+ result = sandbox.exec(
+ "echo hello; sleep 1; echo world",
+ on_stdout=lambda data: print(f"OUT: {data}"),
+ on_stderr=lambda data: print(f"ERR: {data}"),
+ )
+ ```
+
+
+
+## AsyncSandboxExecutor Objects
+
+```python
+class AsyncSandboxExecutor(SandboxExecutor)
+```
+
+Async command execution interface for Koyeb Sandbox instances.
+Bound to a specific sandbox instance.
+
+Inherits from SandboxExecutor and provides async command execution.
+
+
+
+#### \_\_call\_\_
+
+```python
+async def __call__(
+ command: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: int = 30,
+ on_stdout: Optional[Callable[[str], None]] = None,
+ on_stderr: Optional[Callable[[str], None]] = None) -> CommandResult
+```
+
+Execute a command in a shell asynchronously. Supports streaming output via callbacks.
+
+**Arguments**:
+
+- `command` - Command to execute as a string (e.g., "python -c 'print(2+2)'")
+- `cwd` - Working directory for the command
+- `env` - Environment variables for the command
+- `timeout` - Command timeout in seconds (enforced for HTTP requests)
+- `on_stdout` - Optional callback for streaming stdout chunks
+- `on_stderr` - Optional callback for streaming stderr chunks
+
+
+**Returns**:
+
+- `CommandResult` - Result of the command execution
+
+
+**Example**:
+
+ ```python
+ # Async execution
+ result = await sandbox.exec("echo hello")
+
+ # With streaming callbacks
+ result = await sandbox.exec(
+ "echo hello; sleep 1; echo world",
+ on_stdout=lambda data: print(f"OUT: {data}"),
+ on_stderr=lambda data: print(f"ERR: {data}"),
+ )
+ ```
+
+
+
+# koyeb/sandbox.filesystem
+
+Filesystem operations for Koyeb Sandbox instances
+Using SandboxClient HTTP API
+
+
+
+## SandboxFilesystemError Objects
+
+```python
+class SandboxFilesystemError(SandboxError)
+```
+
+Base exception for filesystem operations
+
+
+
+## SandboxFileNotFoundError Objects
+
+```python
+class SandboxFileNotFoundError(SandboxFilesystemError)
+```
+
+Raised when file or directory not found
+
+
+
+## SandboxFileExistsError Objects
+
+```python
+class SandboxFileExistsError(SandboxFilesystemError)
+```
+
+Raised when file already exists
+
+
+
+## FileInfo Objects
+
+```python
+@dataclass
+class FileInfo()
+```
+
+File information
+
+
+
+## SandboxFilesystem Objects
+
+```python
+class SandboxFilesystem()
+```
+
+Synchronous filesystem operations for Koyeb Sandbox instances.
+Using SandboxClient HTTP API.
+
+For async usage, use AsyncSandboxFilesystem instead.
+
+
+
+#### write\_file
+
+```python
+def write_file(path: str,
+ content: Union[str, bytes],
+ encoding: str = "utf-8") -> None
+```
+
+Write content to a file synchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+- `content` - Content to write (string or bytes)
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary data.
+
+
+
+#### read\_file
+
+```python
+def read_file(path: str, encoding: str = "utf-8") -> FileInfo
+```
+
+Read a file from the sandbox synchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary data.
+
+
+**Returns**:
+
+- `FileInfo` - Object with content and encoding
+
+
+
+#### mkdir
+
+```python
+def mkdir(path: str, recursive: bool = False) -> None
+```
+
+Create a directory synchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the directory
+- `recursive` - Create parent directories if needed (default: False, not used - API always creates parents)
+
+
+
+#### list\_dir
+
+```python
+def list_dir(path: str = ".") -> List[str]
+```
+
+List contents of a directory synchronously.
+
+**Arguments**:
+
+- `path` - Path to the directory (default: current directory)
+
+
+**Returns**:
+
+- `List[str]` - Names of files and directories within the specified path.
+
+
+
+#### delete\_file
+
+```python
+def delete_file(path: str) -> None
+```
+
+Delete a file synchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+
+
+
+#### delete\_dir
+
+```python
+def delete_dir(path: str) -> None
+```
+
+Delete a directory synchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the directory
+
+
+
+#### rename\_file
+
+```python
+def rename_file(old_path: str, new_path: str) -> None
+```
+
+Rename a file synchronously.
+
+**Arguments**:
+
+- `old_path` - Current file path
+- `new_path` - New file path
+
+
+
+#### move\_file
+
+```python
+def move_file(source_path: str, destination_path: str) -> None
+```
+
+Move a file to a different directory synchronously.
+
+**Arguments**:
+
+- `source_path` - Current file path
+- `destination_path` - Destination path
+
+
+
+#### write\_files
+
+```python
+def write_files(files: List[Dict[str, str]]) -> None
+```
+
+Write multiple files in a single operation synchronously.
+
+**Arguments**:
+
+- `files` - List of dictionaries, each with 'path', 'content', and optional 'encoding'.
+
+
+
+#### exists
+
+```python
+def exists(path: str) -> bool
+```
+
+Check if file/directory exists synchronously
+
+
+
+#### is\_file
+
+```python
+def is_file(path: str) -> bool
+```
+
+Check if path is a file synchronously
+
+
+
+#### is\_dir
+
+```python
+def is_dir(path: str) -> bool
+```
+
+Check if path is a directory synchronously
+
+
+
+#### upload\_file
+
+```python
+def upload_file(local_path: str,
+ remote_path: str,
+ encoding: str = "utf-8") -> None
+```
+
+Upload a local file to the sandbox synchronously.
+
+**Arguments**:
+
+- `local_path` - Path to the local file
+- `remote_path` - Destination path in the sandbox
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary files.
+
+
+**Raises**:
+
+- `SandboxFileNotFoundError` - If local file doesn't exist
+- `UnicodeDecodeError` - If file cannot be decoded with specified encoding
+
+
+
+#### download\_file
+
+```python
+def download_file(remote_path: str,
+ local_path: str,
+ encoding: str = "utf-8") -> None
+```
+
+Download a file from the sandbox to a local path synchronously.
+
+**Arguments**:
+
+- `remote_path` - Path to the file in the sandbox
+- `local_path` - Destination path on the local filesystem
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary files.
+
+
+**Raises**:
+
+- `SandboxFileNotFoundError` - If remote file doesn't exist
+
+
+
+#### ls
+
+```python
+def ls(path: str = ".") -> List[str]
+```
+
+List directory contents synchronously.
+
+**Arguments**:
+
+- `path` - Path to list
+
+
+**Returns**:
+
+ List of file/directory names
+
+
+
+#### rm
+
+```python
+def rm(path: str, recursive: bool = False) -> None
+```
+
+Remove file or directory synchronously.
+
+**Arguments**:
+
+- `path` - Path to remove
+- `recursive` - Remove recursively
+
+
+
+#### open
+
+```python
+def open(path: str, mode: str = "r") -> SandboxFileIO
+```
+
+Open a file in the sandbox synchronously.
+
+**Arguments**:
+
+- `path` - Path to the file
+- `mode` - Open mode ('r', 'w', 'a', etc.)
+
+
+**Returns**:
+
+- `SandboxFileIO` - File handle
+
+
+
+## AsyncSandboxFilesystem Objects
+
+```python
+class AsyncSandboxFilesystem(SandboxFilesystem)
+```
+
+Async filesystem operations for Koyeb Sandbox instances.
+Inherits from SandboxFilesystem and provides async methods.
+
+
+
+#### write\_file
+
+```python
+@async_wrapper("write_file")
+async def write_file(path: str,
+ content: Union[str, bytes],
+ encoding: str = "utf-8") -> None
+```
+
+Write content to a file asynchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+- `content` - Content to write (string or bytes)
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary data.
+
+
+
+#### read\_file
+
+```python
+@async_wrapper("read_file")
+async def read_file(path: str, encoding: str = "utf-8") -> FileInfo
+```
+
+Read a file from the sandbox asynchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary data.
+
+
+**Returns**:
+
+- `FileInfo` - Object with content and encoding
+
+
+
+#### mkdir
+
+```python
+@async_wrapper("mkdir")
+async def mkdir(path: str, recursive: bool = False) -> None
+```
+
+Create a directory asynchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the directory
+- `recursive` - Create parent directories if needed (default: False, not used - API always creates parents)
+
+
+
+#### list\_dir
+
+```python
+@async_wrapper("list_dir")
+async def list_dir(path: str = ".") -> List[str]
+```
+
+List contents of a directory asynchronously.
+
+**Arguments**:
+
+- `path` - Path to the directory (default: current directory)
+
+
+**Returns**:
+
+- `List[str]` - Names of files and directories within the specified path.
+
+
+
+#### delete\_file
+
+```python
+@async_wrapper("delete_file")
+async def delete_file(path: str) -> None
+```
+
+Delete a file asynchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the file
+
+
+
+#### delete\_dir
+
+```python
+@async_wrapper("delete_dir")
+async def delete_dir(path: str) -> None
+```
+
+Delete a directory asynchronously.
+
+**Arguments**:
+
+- `path` - Absolute path to the directory
+
+
+
+#### rename\_file
+
+```python
+@async_wrapper("rename_file")
+async def rename_file(old_path: str, new_path: str) -> None
+```
+
+Rename a file asynchronously.
+
+**Arguments**:
+
+- `old_path` - Current file path
+- `new_path` - New file path
+
+
+
+#### move\_file
+
+```python
+@async_wrapper("move_file")
+async def move_file(source_path: str, destination_path: str) -> None
+```
+
+Move a file to a different directory asynchronously.
+
+**Arguments**:
+
+- `source_path` - Current file path
+- `destination_path` - Destination path
+
+
+
+#### write\_files
+
+```python
+async def write_files(files: List[Dict[str, str]]) -> None
+```
+
+Write multiple files in a single operation asynchronously.
+
+**Arguments**:
+
+- `files` - List of dictionaries, each with 'path', 'content', and optional 'encoding'.
+
+
+
+#### exists
+
+```python
+@async_wrapper("exists")
+async def exists(path: str) -> bool
+```
+
+Check if file/directory exists asynchronously
+
+
+
+#### is\_file
+
+```python
+@async_wrapper("is_file")
+async def is_file(path: str) -> bool
+```
+
+Check if path is a file asynchronously
+
+
+
+#### is\_dir
+
+```python
+@async_wrapper("is_dir")
+async def is_dir(path: str) -> bool
+```
+
+Check if path is a directory asynchronously
+
+
+
+#### upload\_file
+
+```python
+@async_wrapper("upload_file")
+async def upload_file(local_path: str,
+ remote_path: str,
+ encoding: str = "utf-8") -> None
+```
+
+Upload a local file to the sandbox asynchronously.
+
+**Arguments**:
+
+- `local_path` - Path to the local file
+- `remote_path` - Destination path in the sandbox
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary files.
+
+
+
+#### download\_file
+
+```python
+@async_wrapper("download_file")
+async def download_file(remote_path: str,
+ local_path: str,
+ encoding: str = "utf-8") -> None
+```
+
+Download a file from the sandbox to a local path asynchronously.
+
+**Arguments**:
+
+- `remote_path` - Path to the file in the sandbox
+- `local_path` - Destination path on the local filesystem
+- `encoding` - File encoding (default: "utf-8"). Use "base64" for binary files.
+
+
+
+#### ls
+
+```python
+async def ls(path: str = ".") -> List[str]
+```
+
+List directory contents asynchronously.
+
+**Arguments**:
+
+- `path` - Path to list
+
+
+**Returns**:
+
+ List of file/directory names
+
+
+
+#### rm
+
+```python
+@async_wrapper("rm")
+async def rm(path: str, recursive: bool = False) -> None
+```
+
+Remove file or directory asynchronously.
+
+**Arguments**:
+
+- `path` - Path to remove
+- `recursive` - Remove recursively
+
+
+
+#### open
+
+```python
+def open(path: str, mode: str = "r") -> AsyncSandboxFileIO
+```
+
+Open a file in the sandbox asynchronously.
+
+**Arguments**:
+
+- `path` - Path to the file
+- `mode` - Open mode ('r', 'w', 'a', etc.)
+
+
+**Returns**:
+
+- `AsyncSandboxFileIO` - Async file handle
+
+
+
+## SandboxFileIO Objects
+
+```python
+class SandboxFileIO()
+```
+
+Synchronous file I/O handle for sandbox files
+
+
+
+#### read
+
+```python
+def read() -> str
+```
+
+Read file content synchronously
+
+
+
+#### write
+
+```python
+def write(content: str) -> None
+```
+
+Write content to file synchronously
+
+
+
+#### close
+
+```python
+def close() -> None
+```
+
+Close the file
+
+
+
+## AsyncSandboxFileIO Objects
+
+```python
+class AsyncSandboxFileIO()
+```
+
+Async file I/O handle for sandbox files
+
+
+
+#### read
+
+```python
+async def read() -> str
+```
+
+Read file content asynchronously
+
+
+
+#### write
+
+```python
+async def write(content: str) -> None
+```
+
+Write content to file asynchronously
+
+
+
+#### close
+
+```python
+def close() -> None
+```
+
+Close the file
+
+
+
+# koyeb/sandbox.sandbox
+
+Koyeb Sandbox - Python SDK for creating and managing Koyeb sandboxes
+
+
+
+## ProcessInfo Objects
+
+```python
+@dataclass
+class ProcessInfo()
+```
+
+Type definition for process information returned by list_processes.
+
+
+
+#### id
+
+Process ID (UUID string)
+
+
+
+#### command
+
+The command that was executed
+
+
+
+#### status
+
+Process status (e.g., "running", "completed")
+
+
+
+#### pid
+
+OS process ID (if running)
+
+
+
+#### exit\_code
+
+Exit code (if completed)
+
+
+
+#### started\_at
+
+ISO 8601 timestamp when process started
+
+
+
+## ExposedPort Objects
+
+```python
+@dataclass
+class ExposedPort()
+```
+
+Result of exposing a port via TCP proxy.
+
+
+
+## Sandbox Objects
+
+```python
+class Sandbox()
+```
+
+Synchronous sandbox for running code on Koyeb infrastructure.
+Provides creation and deletion functionality with proper health polling.
+
+
+
+#### id
+
+```python
+@property
+def id() -> str
+```
+
+Get the service ID of the sandbox.
+
+
+
+#### create
+
+```python
+@classmethod
+def create(cls,
+ image: str = "koyeb/sandbox",
+ name: str = "quick-sandbox",
+ wait_ready: bool = True,
+ instance_type: str = "micro",
+ exposed_port_protocol: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ region: Optional[str] = None,
+ api_token: Optional[str] = None,
+ timeout: int = 300,
+ idle_timeout: Optional[IdleTimeout] = None,
+ enable_tcp_proxy: bool = False,
+ privileged: bool = False) -> Sandbox
+```
+
+Create a new sandbox instance.
+
+**Arguments**:
+
+- `image` - Docker image to use (default: koyeb/sandbox)
+- `name` - Name of the sandbox
+- `wait_ready` - Wait for sandbox to be ready (default: True)
+- `instance_type` - Instance type (default: nano)
+- `exposed_port_protocol` - Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+- `env` - Environment variables
+- `region` - Region to deploy to (default: "na")
+- `api_token` - Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+- `timeout` - Timeout for sandbox creation in seconds
+- `idle_timeout` - Idle timeout configuration for scale-to-zero
+ - None: Auto-enable (light_sleep=300s, deep_sleep=600s)
+ - 0: Disable scale-to-zero (keep always-on)
+ - int > 0: Deep sleep only (e.g., 600 for 600s deep sleep)
+ - dict: Explicit configuration with {"light_sleep": 300, "deep_sleep": 600}
+- `enable_tcp_proxy` - If True, enables TCP proxy for direct TCP access to port 3031
+- `privileged` - If True, run the container in privileged mode (default: False)
+
+
+**Returns**:
+
+- `Sandbox` - A new Sandbox instance
+
+
+**Raises**:
+
+- `ValueError` - If API token is not provided
+- `SandboxTimeoutError` - If wait_ready is True and sandbox does not become ready within timeout
+
+
+
+#### get\_from\_id
+
+```python
+@classmethod
+def get_from_id(cls, id: str, api_token: Optional[str] = None) -> "Sandbox"
+```
+
+Get a sandbox by service ID.
+
+**Arguments**:
+
+- `id` - Service ID of the sandbox
+- `api_token` - Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+
+
+**Returns**:
+
+- `Sandbox` - The Sandbox instance
+
+
+**Raises**:
+
+- `ValueError` - If API token is not provided or id is invalid
+- `SandboxError` - If sandbox is not found or retrieval fails
+
+
+
+#### wait\_ready
+
+```python
+def wait_ready(timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL) -> bool
+```
+
+Wait for sandbox to become ready with proper polling.
+
+**Arguments**:
+
+- `timeout` - Maximum time to wait in seconds
+- `poll_interval` - Time between health checks in seconds
+
+
+**Returns**:
+
+- `bool` - True if sandbox became ready, False if timeout
+
+
+
+#### wait\_tcp\_proxy\_ready
+
+```python
+def wait_tcp_proxy_ready(timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL) -> bool
+```
+
+Wait for TCP proxy to become ready and available.
+
+Polls the deployment metadata until the TCP proxy information is available.
+This is useful when enable_tcp_proxy=True was set during sandbox creation,
+as the proxy information may not be immediately available.
+
+**Arguments**:
+
+- `timeout` - Maximum time to wait in seconds
+- `poll_interval` - Time between checks in seconds
+
+
+**Returns**:
+
+- `bool` - True if TCP proxy became ready, False if timeout
+
+
+
+#### delete
+
+```python
+def delete() -> None
+```
+
+Delete the sandbox instance.
+
+
+
+#### get\_domain
+
+```python
+def get_domain() -> Optional[str]
+```
+
+Get the public domain of the sandbox.
+
+Returns the domain name (e.g., "app-name-org.koyeb.app") without protocol or path.
+To construct the URL, use: f"https://{sandbox.get_domain()}"
+
+**Returns**:
+
+- `Optional[str]` - The domain name or None if unavailable
+
+
+
+#### get\_tcp\_proxy\_info
+
+```python
+def get_tcp_proxy_info() -> Optional[tuple[str, int]]
+```
+
+Get the TCP proxy host and port for the sandbox.
+
+Returns the TCP proxy host and port as a tuple (host, port) for direct TCP access to port 3031.
+This is only available if enable_tcp_proxy=True was set when creating the sandbox.
+
+**Returns**:
+
+ Optional[tuple[str, int]]: A tuple of (host, port) or None if unavailable
+
+
+
+#### is\_healthy
+
+```python
+def is_healthy() -> bool
+```
+
+Check if sandbox is healthy and ready for operations
+
+
+
+#### filesystem
+
+```python
+@property
+def filesystem() -> "SandboxFilesystem"
+```
+
+Get filesystem operations interface
+
+
+
+#### exec
+
+```python
+@property
+def exec() -> "SandboxExecutor"
+```
+
+Get command execution interface
+
+
+
+#### expose\_port
+
+```python
+def expose_port(port: int) -> ExposedPort
+```
+
+Expose a port to external connections via TCP proxy.
+
+Binds the specified internal port to the TCP proxy, allowing external
+connections to reach services running on that port inside the sandbox.
+Automatically unbinds any existing port before binding the new one.
+
+**Arguments**:
+
+- `port` - The internal port number to expose (must be a valid port number between 1 and 65535)
+
+
+**Returns**:
+
+- `ExposedPort` - An object with `port` and `exposed_at` attributes:
+ - port: The exposed port number
+ - exposed_at: The full URL with https:// protocol (e.g., "https://app-name-org.koyeb.app")
+
+
+**Raises**:
+
+- `ValueError` - If port is not in valid range [1, 65535]
+- `SandboxError` - If the port binding operation fails
+
+
+**Notes**:
+
+ - Only one port can be exposed at a time
+ - Any existing port binding is automatically unbound before binding the new port
+ - The port must be available and accessible within the sandbox environment
+ - The TCP proxy is accessed via get_tcp_proxy_info() which returns (host, port)
+
+
+**Example**:
+
+ >>> result = sandbox.expose_port(8080)
+ >>> result.port
+ 8080
+ >>> result.exposed_at
+ 'https://app-name-org.koyeb.app'
+
+
+
+#### unexpose\_port
+
+```python
+def unexpose_port() -> None
+```
+
+Unexpose a port from external connections.
+
+Removes the TCP proxy port binding, stopping traffic forwarding to the
+previously bound port.
+
+**Raises**:
+
+- `SandboxError` - If the port unbinding operation fails
+
+
+**Notes**:
+
+ - After unexposing, the TCP proxy will no longer forward traffic
+ - Safe to call even if no port is currently bound
+
+
+
+#### launch\_process
+
+```python
+def launch_process(cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None) -> str
+```
+
+Launch a background process in the sandbox.
+
+Starts a long-running background process that continues executing even after
+the method returns. Use this for servers, workers, or other long-running tasks.
+
+**Arguments**:
+
+- `cmd` - The shell command to execute as a background process
+- `cwd` - Optional working directory for the process
+- `env` - Optional environment variables to set/override for the process
+
+
+**Returns**:
+
+- `str` - The unique process ID (UUID string) that can be used to manage the process
+
+
+**Raises**:
+
+- `SandboxError` - If the process launch fails
+
+
+**Example**:
+
+ >>> process_id = sandbox.launch_process("python -u server.py")
+ >>> print(f"Started process: {process_id}")
+
+
+
+#### kill\_process
+
+```python
+def kill_process(process_id: str) -> None
+```
+
+Kill a background process by its ID.
+
+Terminates a running background process. This sends a SIGTERM signal to the process,
+allowing it to clean up gracefully. If the process doesn't terminate within a timeout,
+it will be forcefully killed with SIGKILL.
+
+**Arguments**:
+
+- `process_id` - The unique process ID (UUID string) to kill
+
+
+**Raises**:
+
+- `SandboxError` - If the process kill operation fails
+
+
+**Example**:
+
+ >>> sandbox.kill_process("550e8400-e29b-41d4-a716-446655440000")
+
+
+
+#### list\_processes
+
+```python
+def list_processes() -> List[ProcessInfo]
+```
+
+List all background processes.
+
+Returns information about all currently running and recently completed background
+processes. This includes both active processes and processes that have completed
+(which remain in memory until server restart).
+
+**Returns**:
+
+- `List[ProcessInfo]` - List of process objects, each containing:
+ - id: Process ID (UUID string)
+ - command: The command that was executed
+ - status: Process status (e.g., "running", "completed")
+ - pid: OS process ID (if running)
+ - exit_code: Exit code (if completed)
+ - started_at: ISO 8601 timestamp when process started
+ - completed_at: ISO 8601 timestamp when process completed (if applicable)
+
+
+**Raises**:
+
+- `SandboxError` - If listing processes fails
+
+
+**Example**:
+
+ >>> processes = sandbox.list_processes()
+ >>> for process in processes:
+ ... print(f"{process.id}: {process.command} - {process.status}")
+
+
+
+#### kill\_all\_processes
+
+```python
+def kill_all_processes() -> int
+```
+
+Kill all running background processes.
+
+Convenience method that lists all processes and kills them all. This is useful
+for cleanup operations.
+
+**Returns**:
+
+- `int` - The number of processes that were killed
+
+
+**Raises**:
+
+- `SandboxError` - If listing or killing processes fails
+
+
+**Example**:
+
+ >>> count = sandbox.kill_all_processes()
+ >>> print(f"Killed {count} processes")
+
+
+
+#### \_\_enter\_\_
+
+```python
+def __enter__() -> "Sandbox"
+```
+
+Context manager entry - returns self.
+
+
+
+#### \_\_exit\_\_
+
+```python
+def __exit__(exc_type, exc_val, exc_tb) -> None
+```
+
+Context manager exit - automatically deletes the sandbox.
+
+
+
+## AsyncSandbox Objects
+
+```python
+class AsyncSandbox(Sandbox)
+```
+
+Async sandbox for running code on Koyeb infrastructure.
+Inherits from Sandbox and provides async wrappers for all operations.
+
+
+
+#### get\_from\_id
+
+```python
+@classmethod
+async def get_from_id(cls,
+ id: str,
+ api_token: Optional[str] = None) -> "AsyncSandbox"
+```
+
+Get a sandbox by service ID asynchronously.
+
+**Arguments**:
+
+- `id` - Service ID of the sandbox
+- `api_token` - Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+
+
+**Returns**:
+
+- `AsyncSandbox` - The AsyncSandbox instance
+
+
+**Raises**:
+
+- `ValueError` - If API token is not provided or id is invalid
+- `SandboxError` - If sandbox is not found or retrieval fails
+
+
+
+#### create
+
+```python
+@classmethod
+async def create(cls,
+ image: str = "koyeb/sandbox",
+ name: str = "quick-sandbox",
+ wait_ready: bool = True,
+ instance_type: str = "nano",
+ exposed_port_protocol: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ region: Optional[str] = None,
+ api_token: Optional[str] = None,
+ timeout: int = 300,
+ idle_timeout: Optional[IdleTimeout] = None,
+ enable_tcp_proxy: bool = False,
+ privileged: bool = False) -> AsyncSandbox
+```
+
+Create a new sandbox instance with async support.
+
+**Arguments**:
+
+- `image` - Docker image to use (default: koyeb/sandbox)
+- `name` - Name of the sandbox
+- `wait_ready` - Wait for sandbox to be ready (default: True)
+- `instance_type` - Instance type (default: nano)
+- `exposed_port_protocol` - Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+- `env` - Environment variables
+- `region` - Region to deploy to (default: "na")
+- `api_token` - Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+- `timeout` - Timeout for sandbox creation in seconds
+- `idle_timeout` - Idle timeout configuration for scale-to-zero
+ - None: Auto-enable (light_sleep=300s, deep_sleep=600s)
+ - 0: Disable scale-to-zero (keep always-on)
+ - int > 0: Deep sleep only (e.g., 600 for 600s deep sleep)
+ - dict: Explicit configuration with {"light_sleep": 300, "deep_sleep": 600}
+- `enable_tcp_proxy` - If True, enables TCP proxy for direct TCP access to port 3031
+- `privileged` - If True, run the container in privileged mode (default: False)
+
+
+**Returns**:
+
+- `AsyncSandbox` - A new AsyncSandbox instance
+
+
+**Raises**:
+
+- `ValueError` - If API token is not provided
+- `SandboxTimeoutError` - If wait_ready is True and sandbox does not become ready within timeout
+
+
+
+#### wait\_ready
+
+```python
+async def wait_ready(timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL) -> bool
+```
+
+Wait for sandbox to become ready with proper async polling.
+
+**Arguments**:
+
+- `timeout` - Maximum time to wait in seconds
+- `poll_interval` - Time between health checks in seconds
+
+
+**Returns**:
+
+- `bool` - True if sandbox became ready, False if timeout
+
+
+
+#### wait\_tcp\_proxy\_ready
+
+```python
+async def wait_tcp_proxy_ready(
+ timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL) -> bool
+```
+
+Wait for TCP proxy to become ready and available asynchronously.
+
+Polls the deployment metadata until the TCP proxy information is available.
+This is useful when enable_tcp_proxy=True was set during sandbox creation,
+as the proxy information may not be immediately available.
+
+**Arguments**:
+
+- `timeout` - Maximum time to wait in seconds
+- `poll_interval` - Time between checks in seconds
+
+
+**Returns**:
+
+- `bool` - True if TCP proxy became ready, False if timeout
+
+
+
+#### delete
+
+```python
+@async_wrapper("delete")
+async def delete() -> None
+```
+
+Delete the sandbox instance asynchronously.
+
+
+
+#### is\_healthy
+
+```python
+@async_wrapper("is_healthy")
+async def is_healthy() -> bool
+```
+
+Check if sandbox is healthy and ready for operations asynchronously
+
+
+
+#### exec
+
+```python
+@property
+def exec() -> "AsyncSandboxExecutor"
+```
+
+Get async command execution interface
+
+
+
+#### filesystem
+
+```python
+@property
+def filesystem() -> "AsyncSandboxFilesystem"
+```
+
+Get filesystem operations interface
+
+
+
+#### expose\_port
+
+```python
+@async_wrapper("expose_port")
+async def expose_port(port: int) -> ExposedPort
+```
+
+Expose a port to external connections via TCP proxy asynchronously.
+
+
+
+#### unexpose\_port
+
+```python
+@async_wrapper("unexpose_port")
+async def unexpose_port() -> None
+```
+
+Unexpose a port from external connections asynchronously.
+
+
+
+#### launch\_process
+
+```python
+@async_wrapper("launch_process")
+async def launch_process(cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None) -> str
+```
+
+Launch a background process in the sandbox asynchronously.
+
+
+
+#### kill\_process
+
+```python
+@async_wrapper("kill_process")
+async def kill_process(process_id: str) -> None
+```
+
+Kill a background process by its ID asynchronously.
+
+
+
+#### list\_processes
+
+```python
+@async_wrapper("list_processes")
+async def list_processes() -> List[ProcessInfo]
+```
+
+List all background processes asynchronously.
+
+
+
+#### kill\_all\_processes
+
+```python
+async def kill_all_processes() -> int
+```
+
+Kill all running background processes asynchronously.
+
+
+
+#### \_\_aenter\_\_
+
+```python
+async def __aenter__() -> "AsyncSandbox"
+```
+
+Async context manager entry - returns self.
+
+
+
+#### \_\_aexit\_\_
+
+```python
+async def __aexit__(exc_type, exc_val, exc_tb) -> None
+```
+
+Async context manager exit - automatically deletes the sandbox.
+
+
+
+# koyeb/sandbox.utils
+
+Utility functions for Koyeb Sandbox
+
+
+
+#### DEFAULT\_INSTANCE\_WAIT\_TIMEOUT
+
+seconds
+
+
+
+#### DEFAULT\_POLL\_INTERVAL
+
+seconds
+
+
+
+#### DEFAULT\_COMMAND\_TIMEOUT
+
+seconds
+
+
+
+#### DEFAULT\_HTTP\_TIMEOUT
+
+seconds for HTTP requests
+
+
+
+## IdleTimeoutConfig Objects
+
+```python
+class IdleTimeoutConfig(TypedDict)
+```
+
+Configuration for idle timeout with light and deep sleep.
+
+
+
+#### light\_sleep
+
+Optional, but if provided, deep_sleep is required
+
+
+
+#### deep\_sleep
+
+Required
+
+
+
+#### get\_api\_client
+
+```python
+def get_api_client(
+ api_token: Optional[str] = None,
+ host: Optional[str] = None
+) -> tuple[AppsApi, ServicesApi, InstancesApi, CatalogInstancesApi]
+```
+
+Get configured API clients for Koyeb operations.
+
+**Arguments**:
+
+- `api_token` - Koyeb API token. If not provided, will try to get from KOYEB_API_TOKEN env var
+- `host` - Koyeb API host URL. If not provided, will try to get from KOYEB_API_HOST env var (defaults to https://app.koyeb.com)
+
+
+**Returns**:
+
+ Tuple of (AppsApi, ServicesApi, InstancesApi, CatalogInstancesApi) instances
+
+
+**Raises**:
+
+- `ValueError` - If API token is not provided
+
+
+
+#### build\_env\_vars
+
+```python
+def build_env_vars(env: Optional[Dict[str, str]]) -> List[DeploymentEnv]
+```
+
+Build environment variables list from dictionary.
+
+**Arguments**:
+
+- `env` - Dictionary of environment variables
+
+
+**Returns**:
+
+ List of DeploymentEnv objects
+
+
+
+#### create\_docker\_source
+
+```python
+def create_docker_source(image: str,
+ command_args: List[str],
+ privileged: Optional[bool] = None) -> DockerSource
+```
+
+Create Docker source configuration.
+
+**Arguments**:
+
+- `image` - Docker image name
+- `command_args` - Command and arguments to run (optional, empty list means use image default)
+- `privileged` - If True, run the container in privileged mode (default: None/False)
+
+
+**Returns**:
+
+ DockerSource object
+
+
+
+#### create\_koyeb\_sandbox\_ports
+
+```python
+def create_koyeb_sandbox_ports(protocol: str = "http") -> List[DeploymentPort]
+```
+
+Create port configuration for koyeb/sandbox image.
+
+Creates two ports:
+- Port 3030 exposed on HTTP, mounted on /koyeb-sandbox/
+- Port 3031 exposed with the specified protocol, mounted on /
+
+**Arguments**:
+
+- `protocol` - Protocol to use for port 3031 ("http" or "http2"), defaults to "http"
+
+
+**Returns**:
+
+ List of DeploymentPort objects configured for koyeb/sandbox
+
+
+
+#### create\_koyeb\_sandbox\_proxy\_ports
+
+```python
+def create_koyeb_sandbox_proxy_ports() -> List[DeploymentProxyPort]
+```
+
+Create TCP proxy port configuration for koyeb/sandbox image.
+
+Creates proxy port for direct TCP access:
+- Port 3031 exposed via TCP proxy
+
+**Returns**:
+
+ List of DeploymentProxyPort objects configured for TCP proxy access
+
+
+
+#### create\_koyeb\_sandbox\_routes
+
+```python
+def create_koyeb_sandbox_routes() -> List[DeploymentRoute]
+```
+
+Create route configuration for koyeb/sandbox image to make it publicly accessible.
+
+Creates two routes:
+- Port 3030 accessible at /koyeb-sandbox/
+- Port 3031 accessible at /
+
+**Returns**:
+
+ List of DeploymentRoute objects configured for koyeb/sandbox
+
+
+
+#### create\_deployment\_definition
+
+```python
+def create_deployment_definition(
+ name: str,
+ docker_source: DockerSource,
+ env_vars: List[DeploymentEnv],
+ instance_type: str,
+ exposed_port_protocol: Optional[str] = None,
+ region: Optional[str] = None,
+ routes: Optional[List[DeploymentRoute]] = None,
+ idle_timeout: Optional[IdleTimeout] = None,
+ light_sleep_enabled: bool = True,
+ enable_tcp_proxy: bool = False) -> DeploymentDefinition
+```
+
+Create deployment definition for a sandbox service.
+
+**Arguments**:
+
+- `name` - Service name
+- `docker_source` - Docker configuration
+- `env_vars` - Environment variables
+- `instance_type` - Instance type
+- `exposed_port_protocol` - Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+- `region` - Region to deploy to (defaults to "na")
+- `routes` - List of routes for public access
+- `idle_timeout` - Idle timeout configuration (see IdleTimeout type)
+- `light_sleep_enabled` - Whether light sleep is enabled for the instance type (default: True)
+- `enable_tcp_proxy` - If True, enables TCP proxy for direct TCP access to port 3031
+
+
+**Returns**:
+
+ DeploymentDefinition object
+
+
+
+#### get\_sandbox\_status
+
+```python
+def get_sandbox_status(instance_id: str,
+ api_token: Optional[str] = None) -> InstanceStatus
+```
+
+Get the current status of a sandbox instance.
+
+
+
+#### is\_sandbox\_healthy
+
+```python
+def is_sandbox_healthy(instance_id: str,
+ sandbox_url: str,
+ sandbox_secret: str,
+ api_token: Optional[str] = None) -> bool
+```
+
+Check if sandbox is healthy and ready for operations.
+
+This function requires both sandbox_url and sandbox_secret to properly check:
+1. The Koyeb instance status (via API) - using instance_id and api_token
+2. The sandbox executor health endpoint (via SandboxClient) - using sandbox_url and sandbox_secret
+
+**Arguments**:
+
+- `instance_id` - The Koyeb instance ID
+- `api_token` - Koyeb API token
+- `sandbox_url` - URL of the sandbox executor API (required)
+- `sandbox_secret` - Secret for sandbox executor authentication (required)
+
+
+**Returns**:
+
+- `bool` - True if sandbox is healthy, False otherwise
+
+
+**Raises**:
+
+- `ValueError` - If sandbox_url or sandbox_secret are not provided
+
+
+
+#### escape\_shell\_arg
+
+```python
+def escape_shell_arg(arg: str) -> str
+```
+
+Escape a shell argument for safe use in shell commands.
+
+**Arguments**:
+
+- `arg` - The argument to escape
+
+
+**Returns**:
+
+ Properly escaped shell argument
+
+
+
+#### validate\_port
+
+```python
+def validate_port(port: int) -> None
+```
+
+Validate that a port number is in the valid range.
+
+**Arguments**:
+
+- `port` - Port number to validate
+
+
+**Raises**:
+
+- `ValueError` - If port is not in valid range [1, 65535]
+
+
+
+#### check\_error\_message
+
+```python
+def check_error_message(error_msg: str, error_type: str) -> bool
+```
+
+Check if an error message matches a specific error type.
+Uses case-insensitive matching against known error patterns.
+
+**Arguments**:
+
+- `error_msg` - The error message to check
+- `error_type` - The type of error to check for (key in ERROR_MESSAGES)
+
+
+**Returns**:
+
+ True if error message matches the error type
+
+
+
+#### run\_sync\_in\_executor
+
+```python
+async def run_sync_in_executor(method: Callable[..., Any], *args: Any,
+ **kwargs: Any) -> Any
+```
+
+Run a synchronous method in an async executor.
+
+Helper function to wrap synchronous methods for async execution.
+Used by AsyncSandbox and AsyncSandboxFilesystem to wrap sync parent methods.
+
+**Arguments**:
+
+- `method` - The synchronous method to run
+- `*args` - Positional arguments for the method
+- `**kwargs` - Keyword arguments for the method
+
+
+**Returns**:
+
+ Result of the synchronous method call
+
+
+
+#### async\_wrapper
+
+```python
+def async_wrapper(method_name: str)
+```
+
+Decorator to automatically create async wrapper for sync methods.
+
+This decorator creates an async method that wraps a sync method from the parent class.
+The sync method is called via super() and executed in an executor.
+
+**Arguments**:
+
+- `method_name` - Name of the sync method to wrap (from parent class)
+
+ Usage:
+ @async_wrapper("delete")
+ async def delete(self) -> None:
+ """Delete the sandbox instance asynchronously."""
+ pass # Implementation is handled by decorator
+
+
+
+#### create\_sandbox\_client
+
+```python
+def create_sandbox_client(sandbox_url: Optional[str],
+ sandbox_secret: Optional[str],
+ existing_client: Optional[Any] = None) -> Any
+```
+
+Create or return existing SandboxClient instance with validation.
+
+Helper function to create SandboxClient instances with consistent validation.
+Used by Sandbox, SandboxExecutor, and SandboxFilesystem to avoid duplication.
+
+**Arguments**:
+
+- `sandbox_url` - The sandbox URL (from _get_sandbox_url() or sandbox._get_sandbox_url())
+- `sandbox_secret` - The sandbox secret
+- `existing_client` - Existing client instance to return if not None
+
+
+**Returns**:
+
+- `SandboxClient` - Configured client instance
+
+
+**Raises**:
+
+- `SandboxError` - If sandbox URL or secret is not available
+
+
+
+## SandboxError Objects
+
+```python
+class SandboxError(Exception)
+```
+
+Base exception for sandbox operations
+
+
+
+## SandboxTimeoutError Objects
+
+```python
+class SandboxTimeoutError(SandboxError)
+```
+
+Raised when a sandbox operation times out
+
+
+
+# koyeb/sandbox.executor\_client
+
+Sandbox Executor API Client
+
+A simple Python client for interacting with the Sandbox Executor API.
+
+
+
+## SandboxClient Objects
+
+```python
+class SandboxClient()
+```
+
+Client for the Sandbox Executor API.
+
+
+
+#### \_\_init\_\_
+
+```python
+def __init__(base_url: str,
+ secret: str,
+ timeout: float = DEFAULT_HTTP_TIMEOUT)
+```
+
+Initialize the Sandbox Client.
+
+**Arguments**:
+
+- `base_url` - The base URL of the sandbox server (e.g., 'http://localhost:8080')
+- `secret` - The authentication secret/token
+- `timeout` - Request timeout in seconds (default: 30)
+
+
+
+#### close
+
+```python
+def close() -> None
+```
+
+Close the HTTP session and release resources.
+
+
+
+#### \_\_enter\_\_
+
+```python
+def __enter__()
+```
+
+Context manager entry - returns self.
+
+
+
+#### \_\_exit\_\_
+
+```python
+def __exit__(exc_type, exc_val, exc_tb) -> None
+```
+
+Context manager exit - automatically closes the session.
+
+
+
+#### \_\_del\_\_
+
+```python
+def __del__()
+```
+
+Clean up session on deletion (fallback, not guaranteed to run).
+
+
+
+#### health
+
+```python
+def health() -> Dict[str, str]
+```
+
+Check the health status of the server.
+
+**Returns**:
+
+ Dict with status information
+
+
+**Raises**:
+
+- `requests.HTTPError` - If the health check fails
+
+
+
+#### run
+
+```python
+def run(cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: Optional[float] = None) -> Dict[str, Any]
+```
+
+Execute a shell command in the sandbox.
+
+**Arguments**:
+
+- `cmd` - The shell command to execute
+- `cwd` - Optional working directory for command execution
+- `env` - Optional environment variables to set/override
+- `timeout` - Optional timeout in seconds for the request
+
+
+**Returns**:
+
+ Dict containing stdout, stderr, error (if any), and exit code
+
+
+
+#### run\_streaming
+
+```python
+def run_streaming(cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: Optional[float] = None) -> Iterator[Dict[str, Any]]
+```
+
+Execute a shell command in the sandbox and stream the output in real-time.
+
+This method uses Server-Sent Events (SSE) to stream command output line-by-line
+as it's produced. Use this for long-running commands where you want real-time
+output. For simple commands where buffered output is acceptable, use run() instead.
+
+**Arguments**:
+
+- `cmd` - The shell command to execute
+- `cwd` - Optional working directory for command execution
+- `env` - Optional environment variables to set/override
+- `timeout` - Optional timeout in seconds for the streaming request
+
+
+**Yields**:
+
+ Dict events with the following types:
+
+ - output events (as command produces output):
+- `{"stream"` - "stdout"|"stderr", "data": "line of output"}
+
+ - complete event (when command finishes):
+- `{"code"` - , "error": false}
+
+ - error event (if command fails to start):
+- `{"error"` - "error message"}
+
+
+**Example**:
+
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> for event in client.run_streaming("echo 'Hello'; sleep 1; echo 'World'"):
+ ... if "stream" in event:
+ ... print(f"{event['stream']}: {event['data']}")
+ ... elif "code" in event:
+ ... print(f"Exit code: {event['code']}")
+
+
+
+#### write\_file
+
+```python
+def write_file(path: str, content: str) -> Dict[str, Any]
+```
+
+Write content to a file.
+
+**Arguments**:
+
+- `path` - The file path to write to
+- `content` - The content to write
+
+
+**Returns**:
+
+ Dict with success status and error if any
+
+
+
+#### read\_file
+
+```python
+def read_file(path: str) -> Dict[str, Any]
+```
+
+Read content from a file.
+
+**Arguments**:
+
+- `path` - The file path to read from
+
+
+**Returns**:
+
+ Dict with file content and error if any
+
+
+
+#### delete\_file
+
+```python
+def delete_file(path: str) -> Dict[str, Any]
+```
+
+Delete a file.
+
+**Arguments**:
+
+- `path` - The file path to delete
+
+
+**Returns**:
+
+ Dict with success status and error if any
+
+
+
+#### make\_dir
+
+```python
+def make_dir(path: str) -> Dict[str, Any]
+```
+
+Create a directory (including parent directories).
+
+**Arguments**:
+
+- `path` - The directory path to create
+
+
+**Returns**:
+
+ Dict with success status and error if any
+
+
+
+#### delete\_dir
+
+```python
+def delete_dir(path: str) -> Dict[str, Any]
+```
+
+Recursively delete a directory and all its contents.
+
+**Arguments**:
+
+- `path` - The directory path to delete
+
+
+**Returns**:
+
+ Dict with success status and error if any
+
+
+
+#### list\_dir
+
+```python
+def list_dir(path: str) -> Dict[str, Any]
+```
+
+List the contents of a directory.
+
+**Arguments**:
+
+- `path` - The directory path to list
+
+
+**Returns**:
+
+ Dict with entries list and error if any
+
+
+
+#### bind\_port
+
+```python
+def bind_port(port: int) -> Dict[str, Any]
+```
+
+Bind a port to the TCP proxy for external access.
+
+Configures the TCP proxy to forward traffic to the specified port inside the sandbox.
+This allows you to expose services running inside the sandbox to external connections.
+
+**Arguments**:
+
+- `port` - The port number to bind to (must be a valid port number)
+
+
+**Returns**:
+
+ Dict with success status, message, and port information
+
+
+**Notes**:
+
+ - Only one port can be bound at a time
+ - Binding a new port will override the previous binding
+ - The port must be available and accessible within the sandbox environment
+
+
+
+#### unbind\_port
+
+```python
+def unbind_port(port: Optional[int] = None) -> Dict[str, Any]
+```
+
+Unbind a port from the TCP proxy.
+
+Removes the TCP proxy port binding, stopping traffic forwarding to the previously bound port.
+
+**Arguments**:
+
+- `port` - Optional port number to unbind. If provided, it must match the currently bound port.
+ If not provided, any existing binding will be removed.
+
+
+**Returns**:
+
+ Dict with success status and message
+
+
+**Notes**:
+
+ - If a port is specified and doesn't match the currently bound port, the request will fail
+ - After unbinding, the TCP proxy will no longer forward traffic
+
+
+
+#### start\_process
+
+```python
+def start_process(cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None) -> Dict[str, Any]
+```
+
+Start a background process in the sandbox.
+
+Starts a long-running background process that continues executing even after
+the API call completes. Use this for servers, workers, or other long-running tasks.
+
+**Arguments**:
+
+- `cmd` - The shell command to execute as a background process
+- `cwd` - Optional working directory for the process
+- `env` - Optional environment variables to set/override for the process
+
+
+**Returns**:
+
+ Dict with process id and success status:
+ - id: The unique process ID (UUID string)
+ - success: True if the process was started successfully
+
+
+**Example**:
+
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.start_process("python -u server.py")
+ >>> process_id = result["id"]
+ >>> print(f"Started process: {process_id}")
+
+
+
+#### kill\_process
+
+```python
+def kill_process(process_id: str) -> Dict[str, Any]
+```
+
+Kill a background process by its ID.
+
+Terminates a running background process. This sends a SIGTERM signal to the process,
+allowing it to clean up gracefully. If the process doesn't terminate within a timeout,
+it will be forcefully killed with SIGKILL.
+
+**Arguments**:
+
+- `process_id` - The unique process ID (UUID string) to kill
+
+
+**Returns**:
+
+ Dict with success status and error message if any
+
+
+**Example**:
+
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.kill_process("550e8400-e29b-41d4-a716-446655440000")
+ >>> if result.get("success"):
+ ... print("Process killed successfully")
+
+
+
+#### list\_processes
+
+```python
+def list_processes() -> Dict[str, Any]
+```
+
+List all background processes.
+
+Returns information about all currently running and recently completed background
+processes. This includes both active processes and processes that have completed
+(which remain in memory until server restart).
+
+**Returns**:
+
+ Dict with a list of processes:
+ - processes: List of process objects, each containing:
+ - id: Process ID (UUID string)
+ - command: The command that was executed
+ - status: Process status (e.g., "running", "completed")
+ - pid: OS process ID (if running)
+ - exit_code: Exit code (if completed)
+ - started_at: ISO 8601 timestamp when process started
+ - completed_at: ISO 8601 timestamp when process completed (if applicable)
+
+
+**Example**:
+
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.list_processes()
+ >>> for process in result.get("processes", []):
+ ... print(f"{process['id']}: {process['command']} - {process['status']}")
+
diff --git a/examples/00_run_all.py b/examples/00_run_all.py
new file mode 100644
index 00000000..01ec7598
--- /dev/null
+++ b/examples/00_run_all.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+"""Run all synchronous example scripts in order"""
+
+import subprocess
+import sys
+import time
+from pathlib import Path
+
+
+def main():
+ # Get the examples directory
+ examples_dir = Path(__file__).parent
+
+ # Find all Python files, excluding this script and async variants
+ example_files = sorted(
+ [
+ f
+ for f in examples_dir.glob("*.py")
+ if f.name not in ["00_run_all.py", "00_run_all.py"]
+ and not f.name.endswith("_async.py")
+ ]
+ )
+
+ if not example_files:
+ print("No example files found to run")
+ return 0
+
+ print(f"Found {len(example_files)} example(s) to run\n")
+ print("=" * 70)
+
+ total_start = time.time()
+ results = []
+
+ for example_file in example_files:
+ example_name = example_file.name
+ print(f"\n▶ Running: {example_name}")
+ print("-" * 70)
+
+ start_time = time.time()
+
+ try:
+ # Run the example script
+ result = subprocess.run(
+ [sys.executable, str(example_file)],
+ capture_output=True,
+ text=True,
+ timeout=60, # 60 second timeout per script
+ )
+
+ elapsed_time = time.time() - start_time
+
+ # Print output
+ if result.stdout:
+ print(result.stdout)
+
+ # Check for errors
+ if result.returncode != 0:
+ print(f"\n❌ ERROR in {example_name}")
+ if result.stderr:
+ print("STDERR:")
+ print(result.stderr)
+
+ results.append(
+ {
+ "name": example_name,
+ "status": "FAILED",
+ "time": elapsed_time,
+ "error": result.stderr or "Non-zero exit code",
+ }
+ )
+
+ # Break on error
+ print("\n" + "=" * 70)
+ print("STOPPING: Error encountered")
+ print("=" * 70)
+ print_summary(results, time.time() - total_start)
+ return 1
+ else:
+ results.append(
+ {"name": example_name, "status": "PASSED", "time": elapsed_time}
+ )
+ print(f"✓ Completed in {elapsed_time:.2f}s")
+
+ except subprocess.TimeoutExpired:
+ elapsed_time = time.time() - start_time
+ print(f"\n❌ TIMEOUT in {example_name} after {elapsed_time:.2f}s")
+
+ results.append(
+ {
+ "name": example_name,
+ "status": "TIMEOUT",
+ "time": elapsed_time,
+ "error": "Script exceeded 60 second timeout",
+ }
+ )
+
+ # Break on timeout
+ print("\n" + "=" * 70)
+ print("STOPPING: Timeout encountered")
+ print("=" * 70)
+ print_summary(results, time.time() - total_start)
+ return 1
+
+ except Exception as e:
+ elapsed_time = time.time() - start_time
+ print(f"\n❌ EXCEPTION in {example_name}: {e}")
+
+ results.append(
+ {
+ "name": example_name,
+ "status": "ERROR",
+ "time": elapsed_time,
+ "error": str(e),
+ }
+ )
+
+ # Break on exception
+ print("\n" + "=" * 70)
+ print("STOPPING: Exception encountered")
+ print("=" * 70)
+ print_summary(results, time.time() - total_start)
+ return 1
+
+ total_time = time.time() - total_start
+
+ # Print summary
+ print("\n" + "=" * 70)
+ print("ALL EXAMPLES COMPLETED SUCCESSFULLY")
+ print("=" * 70)
+ print_summary(results, total_time)
+
+ return 0
+
+
+def print_summary(results, total_time):
+ """Print execution summary"""
+ print("\n📊 EXECUTION SUMMARY")
+ print("-" * 70)
+
+ for result in results:
+ status_symbol = {
+ "PASSED": "✓",
+ "FAILED": "❌",
+ "TIMEOUT": "⏱",
+ "ERROR": "❌",
+ }.get(result["status"], "?")
+
+ print(
+ f"{status_symbol} {result['name']:40s} {result['time']:>6.2f}s {result['status']}"
+ )
+
+ if "error" in result:
+ error_preview = result["error"].split("\n")[0][:50]
+ print(f" Error: {error_preview}")
+
+ print("-" * 70)
+ print(f"Total execution time: {total_time:.2f}s")
+
+ passed = sum(1 for r in results if r["status"] == "PASSED")
+ total = len(results)
+ print(f"Results: {passed}/{total} passed")
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/examples/00_run_all_async.py b/examples/00_run_all_async.py
new file mode 100644
index 00000000..6007e7c4
--- /dev/null
+++ b/examples/00_run_all_async.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+"""Run all asynchronous example scripts in order"""
+
+import asyncio
+import os
+import subprocess
+import sys
+import time
+from pathlib import Path
+
+
+async def run_example(example_file):
+ """Run a single example script and return results"""
+ example_name = example_file.name
+ print(f"\n▶ Running: {example_name}")
+ print("-" * 70)
+
+ start_time = time.time()
+
+ try:
+ # Run the example script
+ process = await asyncio.create_subprocess_exec(
+ sys.executable,
+ str(example_file),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE
+ )
+
+ try:
+ stdout, stderr = await asyncio.wait_for(
+ process.communicate(),
+ timeout=60 # 60 second timeout per script
+ )
+ except asyncio.TimeoutError:
+ process.kill()
+ await process.wait()
+ elapsed_time = time.time() - start_time
+ print(f"\n❌ TIMEOUT in {example_name} after {elapsed_time:.2f}s")
+ return {
+ "name": example_name,
+ "status": "TIMEOUT",
+ "time": elapsed_time,
+ "error": "Script exceeded 60 second timeout"
+ }
+
+ elapsed_time = time.time() - start_time
+
+ # Print output
+ if stdout:
+ print(stdout.decode())
+
+ # Check for errors
+ if process.returncode != 0:
+ print(f"\n❌ ERROR in {example_name}")
+ if stderr:
+ print("STDERR:")
+ print(stderr.decode())
+
+ return {
+ "name": example_name,
+ "status": "FAILED",
+ "time": elapsed_time,
+ "error": stderr.decode() if stderr else "Non-zero exit code"
+ }
+ else:
+ print(f"✓ Completed in {elapsed_time:.2f}s")
+ return {
+ "name": example_name,
+ "status": "PASSED",
+ "time": elapsed_time
+ }
+
+ except Exception as e:
+ elapsed_time = time.time() - start_time
+ print(f"\n❌ EXCEPTION in {example_name}: {e}")
+
+ return {
+ "name": example_name,
+ "status": "ERROR",
+ "time": elapsed_time,
+ "error": str(e)
+ }
+
+
+async def main():
+ # Get the examples directory
+ examples_dir = Path(__file__).parent
+
+ # Find all async Python files, excluding this script
+ example_files = sorted([
+ f for f in examples_dir.glob("*_async.py")
+ if f.name != "00_run_all_async.py"
+ ])
+
+ if not example_files:
+ print("No async example files found to run")
+ return 0
+
+ print(f"Found {len(example_files)} async example(s) to run\n")
+ print("=" * 70)
+
+ total_start = time.time()
+ results = []
+
+ # Run examples sequentially to maintain order and stop on first error
+ for example_file in example_files:
+ result = await run_example(example_file)
+ results.append(result)
+
+ # Break on error
+ if result["status"] in ["FAILED", "TIMEOUT", "ERROR"]:
+ print("\n" + "=" * 70)
+ print("STOPPING: Error encountered")
+ print("=" * 70)
+ print_summary(results, time.time() - total_start)
+ return 1
+
+ total_time = time.time() - total_start
+
+ # Print summary
+ print("\n" + "=" * 70)
+ print("ALL ASYNC EXAMPLES COMPLETED SUCCESSFULLY")
+ print("=" * 70)
+ print_summary(results, total_time)
+
+ return 0
+
+
+def print_summary(results, total_time):
+ """Print execution summary"""
+ print("\n📊 EXECUTION SUMMARY")
+ print("-" * 70)
+
+ for result in results:
+ status_symbol = {
+ "PASSED": "✓",
+ "FAILED": "❌",
+ "TIMEOUT": "⏱",
+ "ERROR": "❌"
+ }.get(result["status"], "?")
+
+ print(f"{status_symbol} {result['name']:40s} {result['time']:>6.2f}s {result['status']}")
+
+ if "error" in result:
+ error_preview = result["error"].split("\n")[0][:50]
+ print(f" Error: {error_preview}")
+
+ print("-" * 70)
+ print(f"Total execution time: {total_time:.2f}s")
+
+ passed = sum(1 for r in results if r["status"] == "PASSED")
+ total = len(results)
+ print(f"Results: {passed}/{total} passed")
+
+
+if __name__ == "__main__":
+ sys.exit(asyncio.run(main()))
diff --git a/examples/01_create_sandbox.py b/examples/01_create_sandbox.py
new file mode 100644
index 00000000..1ca091c2
--- /dev/null
+++ b/examples/01_create_sandbox.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+"""Create and manage a sandbox"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Check health
+ is_healthy = sandbox.is_healthy()
+ print(f"Healthy: {is_healthy}")
+
+ # Test command
+ result = sandbox.exec("echo 'Sandbox is ready!'")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/01_create_sandbox_async.py b/examples/01_create_sandbox_async.py
new file mode 100644
index 00000000..dcdf0199
--- /dev/null
+++ b/examples/01_create_sandbox_async.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+"""Create and manage a sandbox (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Check health
+ is_healthy = await sandbox.is_healthy()
+ print(f"Healthy: {is_healthy}")
+
+ # Test command
+ result = await sandbox.exec("echo 'Sandbox is ready!'")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/02_create_sandbox_with_timing.py b/examples/02_create_sandbox_with_timing.py
new file mode 100644
index 00000000..d711cdc7
--- /dev/null
+++ b/examples/02_create_sandbox_with_timing.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+"""Create and manage a sandbox with detailed timing information for debugging"""
+
+import argparse
+import os
+import time
+from collections import defaultdict
+from datetime import datetime
+
+
+from koyeb import Sandbox
+
+
+class TimingTracker:
+ """Track timing information for operations"""
+ def __init__(self):
+ self.operations = []
+ self.categories = defaultdict(list)
+
+ def record(self, name, duration, category="general"):
+ """Record an operation's timing"""
+ self.operations.append({
+ 'name': name,
+ 'duration': duration,
+ 'category': category,
+ 'timestamp': datetime.now()
+ })
+ self.categories[category].append(duration)
+
+ def get_total_time(self):
+ """Get total time for all operations"""
+ return sum(op['duration'] for op in self.operations)
+
+ def get_category_total(self, category):
+ """Get total time for a specific category"""
+ return sum(self.categories[category])
+
+ def print_recap(self):
+ """Print a detailed recap of all timings"""
+ print("\n" + "="*70)
+ print(" TIMING SUMMARY")
+ print("="*70)
+
+ if not self.operations:
+ print("No operations recorded")
+ return
+
+ total_time = self.get_total_time()
+
+ # Print individual operations
+ print()
+
+ for op in self.operations:
+ percentage = (op['duration'] / total_time * 100) if total_time > 0 else 0
+ bar_length = int(percentage / 2) # 50 chars = 100%
+ bar = "█" * bar_length
+
+ print(f" {op['name']:<30} {op['duration']:6.2f}s {percentage:5.1f}% {bar}")
+
+ print()
+ print("-" * 70)
+ print(f" {'TOTAL':<30} {total_time:6.2f}s 100.0%")
+ print("="*70)
+
+
+def main(run_long_tests=False):
+ script_start = time.time()
+ tracker = TimingTracker()
+
+ print("Starting sandbox operations...")
+
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ # Create sandbox with timing
+ print(" → Creating sandbox...")
+ create_start = time.time()
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox-timed",
+ wait_ready=True,
+ api_token=api_token,
+ )
+ create_duration = time.time() - create_start
+ tracker.record("Sandbox creation", create_duration, "setup")
+ print(f" ✓ took {create_duration:.1f}s")
+
+ # Check health with timing
+ print(" → Checking sandbox health...")
+ health_start = time.time()
+ is_healthy = sandbox.is_healthy()
+ health_duration = time.time() - health_start
+ tracker.record("Health check", health_duration, "monitoring")
+ print(f" ✓ took {health_duration:.1f}s")
+
+ # Test command execution with timing
+ print(" → Executing initial test command...")
+ exec_start = time.time()
+ result = sandbox.exec("echo 'Sandbox is ready!'")
+ exec_duration = time.time() - exec_start
+ tracker.record("Initial exec command", exec_duration, "execution")
+ print(f" ✓ took {exec_duration:.1f}s")
+
+ if run_long_tests:
+ # Long test 1: Install a package
+ print(" → [LONG TEST] Installing a package...")
+ install_start = time.time()
+ result = sandbox.exec("pip install requests")
+ install_duration = time.time() - install_start
+ tracker.record("Package installation", install_duration, "long_tests")
+ print(f" ✓ took {install_duration:.1f}s")
+
+ # Long test 2: Run a computation
+ print(" → [LONG TEST] Running computation...")
+ compute_start = time.time()
+ result = sandbox.exec("python -c 'import time; sum(range(10000000)); time.sleep(2)'")
+ compute_duration = time.time() - compute_start
+ tracker.record("Heavy computation", compute_duration, "long_tests")
+ print(f" ✓ took {compute_duration:.1f}s")
+
+ # Long test 3: Multiple health checks
+ print(" → [LONG TEST] Multiple health checks...")
+ multi_check_start = time.time()
+ for i in range(5):
+ sandbox.is_healthy()
+ time.sleep(0.5)
+ multi_check_duration = time.time() - multi_check_start
+ tracker.record("Multiple health checks (5x)", multi_check_duration, "long_tests")
+ print(f" ✓ took {multi_check_duration:.1f}s")
+
+ except Exception as e:
+ print(f"\n✗ Error occurred: {e}")
+ import traceback
+ traceback.print_exc()
+ finally:
+ if sandbox:
+ print(" → Deleting sandbox...")
+ delete_start = time.time()
+ sandbox.delete()
+ delete_duration = time.time() - delete_start
+ tracker.record("Sandbox deletion", delete_duration, "cleanup")
+ print(f" ✓ took {delete_duration:.1f}s")
+
+ print("\n✓ All operations completed")
+
+ # Print detailed recap
+ tracker.print_recap()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Create and manage a sandbox with detailed timing information"
+ )
+ parser.add_argument(
+ "--long",
+ action="store_true",
+ help="Run longer tests (package installation, computation, etc.)"
+ )
+
+ args = parser.parse_args()
+ main(run_long_tests=args.long)
diff --git a/examples/02_create_sandbox_with_timing_async.py b/examples/02_create_sandbox_with_timing_async.py
new file mode 100644
index 00000000..90bce722
--- /dev/null
+++ b/examples/02_create_sandbox_with_timing_async.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python3
+"""Create and manage a sandbox with detailed timing information for debugging (async variant)"""
+
+import argparse
+import asyncio
+import os
+import time
+from collections import defaultdict
+from datetime import datetime
+
+from koyeb import AsyncSandbox
+
+
+class TimingTracker:
+ """Track timing information for operations"""
+
+ def __init__(self):
+ self.operations = []
+ self.categories = defaultdict(list)
+
+ def record(self, name, duration, category="general"):
+ """Record an operation's timing"""
+ self.operations.append(
+ {
+ "name": name,
+ "duration": duration,
+ "category": category,
+ "timestamp": datetime.now(),
+ }
+ )
+ self.categories[category].append(duration)
+
+ def get_total_time(self):
+ """Get total time for all operations"""
+ return sum(op["duration"] for op in self.operations)
+
+ def get_category_total(self, category):
+ """Get total time for a specific category"""
+ return sum(self.categories[category])
+
+ def print_recap(self):
+ """Print a detailed recap of all timings"""
+ print("\n" + "=" * 70)
+ print(" TIMING SUMMARY")
+ print("=" * 70)
+
+ if not self.operations:
+ print("No operations recorded")
+ return
+
+ total_time = self.get_total_time()
+
+ # Print individual operations
+ print()
+
+ for op in self.operations:
+ percentage = (op["duration"] / total_time * 100) if total_time > 0 else 0
+ bar_length = int(percentage / 2) # 50 chars = 100%
+ bar = "█" * bar_length
+
+ print(
+ f" {op['name']:<30} {op['duration']:6.2f}s {percentage:5.1f}% {bar}"
+ )
+
+ print()
+ print("-" * 70)
+ print(f" {'TOTAL':<30} {total_time:6.2f}s 100.0%")
+ print("=" * 70)
+
+
+async def main(run_long_tests=False):
+ tracker = TimingTracker()
+
+ print("Starting sandbox operations...")
+
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ # Create sandbox with timing
+ print(" → Creating sandbox...")
+ create_start = time.time()
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox-timed",
+ wait_ready=True,
+ api_token=api_token,
+ )
+ create_duration = time.time() - create_start
+ tracker.record("Sandbox creation", create_duration, "setup")
+ print(f" ✓ took {create_duration:.1f}s")
+
+ # Check health with timing
+ print(" → Checking sandbox health...")
+ health_start = time.time()
+ await sandbox.is_healthy()
+ health_duration = time.time() - health_start
+ tracker.record("Health check", health_duration, "monitoring")
+ print(f" ✓ took {health_duration:.1f}s")
+
+ # Test command execution with timing
+ print(" → Executing initial test command...")
+ exec_start = time.time()
+ await sandbox.exec("echo 'Sandbox is ready!'")
+ exec_duration = time.time() - exec_start
+ tracker.record("Initial exec command", exec_duration, "execution")
+ print(f" ✓ took {exec_duration:.1f}s")
+
+ if run_long_tests:
+ # Long test 1: Install a package
+ print(" → [LONG TEST] Installing a package...")
+ install_start = time.time()
+ await sandbox.exec("pip install requests")
+ install_duration = time.time() - install_start
+ tracker.record("Package installation", install_duration, "long_tests")
+ print(f" ✓ took {install_duration:.1f}s")
+
+ # Long test 2: Run a computation
+ print(" → [LONG TEST] Running computation...")
+ compute_start = time.time()
+ await sandbox.exec(
+ "python -c 'import time; sum(range(10000000)); time.sleep(2)'"
+ )
+ compute_duration = time.time() - compute_start
+ tracker.record("Heavy computation", compute_duration, "long_tests")
+ print(f" ✓ took {compute_duration:.1f}s")
+
+ # Long test 3: Multiple health checks
+ print(" → [LONG TEST] Multiple health checks...")
+ multi_check_start = time.time()
+ for i in range(5):
+ await sandbox.is_healthy()
+ await asyncio.sleep(0.5)
+ multi_check_duration = time.time() - multi_check_start
+ tracker.record(
+ "Multiple health checks (5x)", multi_check_duration, "long_tests"
+ )
+ print(f" ✓ took {multi_check_duration:.1f}s")
+
+ except Exception as e:
+ print(f"\n✗ Error occurred: {e}")
+ import traceback
+
+ traceback.print_exc()
+ finally:
+ if sandbox:
+ print(" → Deleting sandbox...")
+ delete_start = time.time()
+ await sandbox.delete()
+ delete_duration = time.time() - delete_start
+ tracker.record("Sandbox deletion", delete_duration, "cleanup")
+ print(f" ✓ took {delete_duration:.1f}s")
+
+ print("\n✓ All operations completed")
+
+ # Print detailed recap
+ tracker.print_recap()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Create and manage a sandbox with detailed timing information"
+ )
+ parser.add_argument(
+ "--long",
+ action="store_true",
+ help="Run longer tests (package installation, computation, etc.)",
+ )
+
+ args = parser.parse_args()
+ asyncio.run(main(run_long_tests=args.long))
diff --git a/examples/03_basic_commands.py b/examples/03_basic_commands.py
new file mode 100644
index 00000000..6305b44a
--- /dev/null
+++ b/examples/03_basic_commands.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+"""Basic command execution"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="basic-commands",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Simple command
+ result = sandbox.exec("echo 'Hello World'")
+ print(result.stdout.strip())
+
+ # Python command
+ result = sandbox.exec("python3 -c 'print(2 + 2)'")
+ print(result.stdout.strip())
+
+ # Multi-line Python script
+ result = sandbox.exec(
+ '''python3 -c "
+import sys
+print(f'Python version: {sys.version.split()[0]}')
+print(f'Platform: {sys.platform}')
+"'''
+ )
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/03_basic_commands_async.py b/examples/03_basic_commands_async.py
new file mode 100644
index 00000000..92c75749
--- /dev/null
+++ b/examples/03_basic_commands_async.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+"""Basic command execution (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="basic-commands",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Simple command
+ result = await sandbox.exec("echo 'Hello World'")
+ print(result.stdout.strip())
+
+ # Python command
+ result = await sandbox.exec("python3 -c 'print(2 + 2)'")
+ print(result.stdout.strip())
+
+ # Multi-line Python script
+ result = await sandbox.exec(
+ '''python3 -c "
+import sys
+print(f'Python version: {sys.version.split()[0]}')
+print(f'Platform: {sys.platform}')
+"'''
+ )
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/04_streaming_output.py b/examples/04_streaming_output.py
new file mode 100644
index 00000000..9ce5e2d2
--- /dev/null
+++ b/examples/04_streaming_output.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+"""Streaming command output"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="streaming",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Stream output in real-time
+ result = sandbox.exec(
+ '''python3 -c "
+import time
+for i in range(5):
+ print(f'Line {i+1}')
+ time.sleep(0.5)
+"''',
+ on_stdout=lambda data: print(data.strip(), end=" "),
+ on_stderr=lambda data: print(f"ERR: {data.strip()}"),
+ )
+ print(f"\nExit code: {result.exit_code}")
+
+ # Stream a script
+ sandbox.filesystem.write_file(
+ "/tmp/counter.py",
+ "#!/usr/bin/env python3\nimport time\nfor i in range(1, 6):\n print(f'Count: {i}')\n time.sleep(0.3)\nprint('Done!')\n",
+ )
+ sandbox.exec("chmod +x /tmp/counter.py")
+
+ result = sandbox.exec(
+ "python3 /tmp/counter.py",
+ on_stdout=lambda data: print(data.strip()),
+ )
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/04_streaming_output_async.py b/examples/04_streaming_output_async.py
new file mode 100644
index 00000000..98b0270c
--- /dev/null
+++ b/examples/04_streaming_output_async.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+"""Streaming command output (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="streaming",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Stream output in real-time
+ result = await sandbox.exec(
+ '''python3 -c "
+import time
+for i in range(5):
+ print(f'Line {i+1}')
+ time.sleep(0.5)
+"''',
+ on_stdout=lambda data: print(data.strip(), end=" "),
+ on_stderr=lambda data: print(f"ERR: {data.strip()}"),
+ )
+ print(f"\nExit code: {result.exit_code}")
+
+ # Stream a script
+ await sandbox.filesystem.write_file(
+ "/tmp/counter.py",
+ "#!/usr/bin/env python3\nimport time\nfor i in range(1, 6):\n print(f'Count: {i}')\n time.sleep(0.3)\nprint('Done!')\n",
+ )
+ await sandbox.exec("chmod +x /tmp/counter.py")
+
+ result = await sandbox.exec(
+ "python3 /tmp/counter.py",
+ on_stdout=lambda data: print(data.strip()),
+ )
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/05_environment_variables.py b/examples/05_environment_variables.py
new file mode 100644
index 00000000..cfb1c8a8
--- /dev/null
+++ b/examples/05_environment_variables.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+"""Environment variables in commands"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="env-vars",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Set environment variables
+ env_vars = {"MY_VAR": "Hello", "DEBUG": "true"}
+ result = sandbox.exec("env | grep MY_VAR", env=env_vars)
+ print(result.stdout.strip())
+
+ # Use in Python command
+ result = sandbox.exec(
+ 'python3 -c "import os; print(os.getenv(\'MY_VAR\'))"',
+ env={"MY_VAR": "Hello from Python!"},
+ )
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/05_environment_variables_async.py b/examples/05_environment_variables_async.py
new file mode 100644
index 00000000..a91cd1af
--- /dev/null
+++ b/examples/05_environment_variables_async.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+"""Environment variables in commands (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="env-vars",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Set environment variables
+ env_vars = {"MY_VAR": "Hello", "DEBUG": "true"}
+ result = await sandbox.exec("env | grep MY_VAR", env=env_vars)
+ print(result.stdout.strip())
+
+ # Use in Python command
+ result = await sandbox.exec(
+ 'python3 -c "import os; print(os.getenv(\'MY_VAR\'))"',
+ env={"MY_VAR": "Hello from Python!"},
+ )
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/06_working_directory.py b/examples/06_working_directory.py
new file mode 100644
index 00000000..ae190d54
--- /dev/null
+++ b/examples/06_working_directory.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+"""Working directory for commands"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="working-dir",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Setup: create directory structure
+ sandbox.exec("mkdir -p /tmp/my_project/src")
+ sandbox.exec("echo 'print(\\\"hello\\\")' > /tmp/my_project/src/main.py")
+
+ # Run command in specific directory
+ result = sandbox.exec("pwd", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ # List files in working directory
+ result = sandbox.exec("ls -la", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ # Use relative paths
+ result = sandbox.exec("cat src/main.py", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/06_working_directory_async.py b/examples/06_working_directory_async.py
new file mode 100644
index 00000000..967d9329
--- /dev/null
+++ b/examples/06_working_directory_async.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+"""Working directory for commands (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="working-dir",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Setup: create directory structure
+ await sandbox.exec("mkdir -p /tmp/my_project/src")
+ await sandbox.exec("echo 'print(\\\"hello\\\")' > /tmp/my_project/src/main.py")
+
+ # Run command in specific directory
+ result = await sandbox.exec("pwd", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ # List files in working directory
+ result = await sandbox.exec("ls -la", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ # Use relative paths
+ result = await sandbox.exec("cat src/main.py", cwd="/tmp/my_project")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/07_file_operations.py b/examples/07_file_operations.py
new file mode 100644
index 00000000..2593a465
--- /dev/null
+++ b/examples/07_file_operations.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+"""Basic file operations"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="file-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write file
+ content = "Hello, Koyeb Sandbox!\nThis is a test file."
+ fs.write_file("/tmp/hello.txt", content)
+
+ # Read file
+ file_info = fs.read_file("/tmp/hello.txt")
+ print(file_info.content)
+
+ # Write Python script
+ python_code = "#!/usr/bin/env python3\nprint('Hello from Python!')\n"
+ fs.write_file("/tmp/script.py", python_code)
+ sandbox.exec("chmod +x /tmp/script.py")
+ result = sandbox.exec("/tmp/script.py")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/07_file_operations_async.py b/examples/07_file_operations_async.py
new file mode 100644
index 00000000..2b683ce1
--- /dev/null
+++ b/examples/07_file_operations_async.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+"""Basic file operations (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="file-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write file
+ content = "Hello, Koyeb Sandbox!\nThis is a test file."
+ await fs.write_file("/tmp/hello.txt", content)
+
+ # Read file
+ file_info = await fs.read_file("/tmp/hello.txt")
+ print(file_info.content)
+
+ # Write Python script
+ python_code = "#!/usr/bin/env python3\nprint('Hello from Python!')\n"
+ await fs.write_file("/tmp/script.py", python_code)
+ await sandbox.exec("chmod +x /tmp/script.py")
+ result = await sandbox.exec("/tmp/script.py")
+ print(result.stdout.strip())
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/08_directory_operations.py b/examples/08_directory_operations.py
new file mode 100644
index 00000000..e9c7b589
--- /dev/null
+++ b/examples/08_directory_operations.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+"""Directory operations"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="directory-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Create directory
+ fs.mkdir("/tmp/my_project")
+
+ # Create nested directories
+ fs.mkdir("/tmp/my_project/src/utils", recursive=True)
+
+ # List directory
+ contents = fs.list_dir("/tmp/my_project")
+ print(f"Contents: {contents}")
+
+ # Create project structure
+ fs.mkdir("/tmp/my_project/src", recursive=True)
+ fs.mkdir("/tmp/my_project/tests", recursive=True)
+ fs.write_file("/tmp/my_project/src/main.py", "print('Hello')")
+ fs.write_file("/tmp/my_project/README.md", "# My Project")
+
+ # Check if path exists
+ exists = fs.exists("/tmp/my_project")
+ is_dir = fs.is_dir("/tmp/my_project")
+ is_file = fs.is_file("/tmp/my_project/src/main.py")
+ print(f"Exists: {exists}, Is dir: {is_dir}, Is file: {is_file}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/08_directory_operations_async.py b/examples/08_directory_operations_async.py
new file mode 100644
index 00000000..fd646b69
--- /dev/null
+++ b/examples/08_directory_operations_async.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+"""Directory operations (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="directory-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Create directory
+ await fs.mkdir("/tmp/my_project")
+
+ # Create nested directories
+ await fs.mkdir("/tmp/my_project/src/utils", recursive=True)
+
+ # List directory
+ contents = await fs.list_dir("/tmp/my_project")
+ print(f"Contents: {contents}")
+
+ # Create project structure
+ await fs.mkdir("/tmp/my_project/src", recursive=True)
+ await fs.mkdir("/tmp/my_project/tests", recursive=True)
+ await fs.write_file("/tmp/my_project/src/main.py", "print('Hello')")
+ await fs.write_file("/tmp/my_project/README.md", "# My Project")
+
+ # Check if path exists
+ exists = await fs.exists("/tmp/my_project")
+ is_dir = await fs.is_dir("/tmp/my_project")
+ is_file = await fs.is_file("/tmp/my_project/src/main.py")
+ print(f"Exists: {exists}, Is dir: {is_dir}, Is file: {is_file}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/09_binary_files.py b/examples/09_binary_files.py
new file mode 100644
index 00000000..fa83f0e6
--- /dev/null
+++ b/examples/09_binary_files.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+"""Binary file operations"""
+
+import base64
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="binary-files",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write binary data
+ binary_data = b"Binary data: \x00\x01\x02\x03\xff\xfe\xfd"
+ base64_data = base64.b64encode(binary_data).decode("utf-8")
+ fs.write_file("/tmp/binary.bin", base64_data, encoding="base64")
+
+ # Read binary data
+ file_info = fs.read_file("/tmp/binary.bin", encoding="base64")
+ decoded = base64.b64decode(file_info.content)
+ print(f"Original: {binary_data}")
+ print(f"Decoded: {decoded}")
+ assert binary_data == decoded
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/09_binary_files_async.py b/examples/09_binary_files_async.py
new file mode 100644
index 00000000..2de90ae5
--- /dev/null
+++ b/examples/09_binary_files_async.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+"""Binary file operations (async variant)"""
+
+import asyncio
+import base64
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="binary-files",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write binary data
+ binary_data = b"Binary data: \x00\x01\x02\x03\xff\xfe\xfd"
+ base64_data = base64.b64encode(binary_data).decode("utf-8")
+ await fs.write_file("/tmp/binary.bin", base64_data, encoding="base64")
+
+ # Read binary data
+ file_info = await fs.read_file("/tmp/binary.bin", encoding="base64")
+ decoded = base64.b64decode(file_info.content)
+ print(f"Original: {binary_data}")
+ print(f"Decoded: {decoded}")
+ assert binary_data == decoded
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/10_batch_operations.py b/examples/10_batch_operations.py
new file mode 100644
index 00000000..c987e0c0
--- /dev/null
+++ b/examples/10_batch_operations.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+"""Batch file operations"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="batch-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write multiple files at once
+ files_to_create = [
+ {"path": "/tmp/file1.txt", "content": "Content of file 1"},
+ {"path": "/tmp/file2.txt", "content": "Content of file 2"},
+ {"path": "/tmp/file3.txt", "content": "Content of file 3"},
+ ]
+
+ fs.write_files(files_to_create)
+ print("Created 3 files")
+
+ # Verify
+ created_files = fs.ls("/tmp")
+ batch_files = [f for f in created_files if f.startswith("file")]
+ print(f"Files: {batch_files}")
+
+ # Create project structure
+ project_files = [
+ {"path": "/tmp/project/main.py", "content": "print('Hello')"},
+ {"path": "/tmp/project/utils.py", "content": "def helper(): pass"},
+ {"path": "/tmp/project/README.md", "content": "# My Project"},
+ ]
+
+ fs.mkdir("/tmp/project", recursive=True)
+ fs.write_files(project_files)
+ print("Created project structure")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/10_batch_operations_async.py b/examples/10_batch_operations_async.py
new file mode 100644
index 00000000..e0b26176
--- /dev/null
+++ b/examples/10_batch_operations_async.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+"""Batch file operations (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="batch-ops",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Write multiple files at once
+ files_to_create = [
+ {"path": "/tmp/file1.txt", "content": "Content of file 1"},
+ {"path": "/tmp/file2.txt", "content": "Content of file 2"},
+ {"path": "/tmp/file3.txt", "content": "Content of file 3"},
+ ]
+
+ await fs.write_files(files_to_create)
+ print("Created 3 files")
+
+ # Verify
+ created_files = await fs.ls("/tmp")
+ batch_files = [f for f in created_files if f.startswith("file")]
+ print(f"Files: {batch_files}")
+
+ # Create project structure
+ project_files = [
+ {"path": "/tmp/project/main.py", "content": "print('Hello')"},
+ {"path": "/tmp/project/utils.py", "content": "def helper(): pass"},
+ {"path": "/tmp/project/README.md", "content": "# My Project"},
+ ]
+
+ await fs.mkdir("/tmp/project", recursive=True)
+ await fs.write_files(project_files)
+ print("Created project structure")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/11_upload_download.py b/examples/11_upload_download.py
new file mode 100644
index 00000000..e0703d49
--- /dev/null
+++ b/examples/11_upload_download.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+"""Upload and download files"""
+
+import os
+import tempfile
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="upload-download",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Upload local file to sandbox
+ with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
+ f.write("This is a local file\n")
+ f.write("Uploaded to Koyeb Sandbox!")
+ local_file = f.name
+
+ try:
+ fs.upload_file(local_file, "/tmp/uploaded_file.txt")
+ uploaded_info = fs.read_file("/tmp/uploaded_file.txt")
+ print(uploaded_info.content)
+ finally:
+ os.unlink(local_file)
+
+ # Download file from sandbox
+ fs.write_file(
+ "/tmp/download_source.txt", "Download test content\nMultiple lines"
+ )
+
+ with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix="_downloaded.txt") as f:
+ download_path = f.name
+
+ try:
+ fs.download_file("/tmp/download_source.txt", download_path)
+ with open(download_path, "r") as f:
+ print(f.read())
+ finally:
+ os.unlink(download_path)
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/11_upload_download_async.py b/examples/11_upload_download_async.py
new file mode 100644
index 00000000..cf159a69
--- /dev/null
+++ b/examples/11_upload_download_async.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+"""Upload and download files (async variant)"""
+
+import asyncio
+import os
+import tempfile
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="upload-download",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Upload local file to sandbox
+ with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
+ f.write("This is a local file\n")
+ f.write("Uploaded to Koyeb Sandbox!")
+ local_file = f.name
+
+ try:
+ await fs.upload_file(local_file, "/tmp/uploaded_file.txt")
+ uploaded_info = await fs.read_file("/tmp/uploaded_file.txt")
+ print(uploaded_info.content)
+ finally:
+ os.unlink(local_file)
+
+ # Download file from sandbox
+ await fs.write_file(
+ "/tmp/download_source.txt", "Download test content\nMultiple lines"
+ )
+
+ with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix="_downloaded.txt") as f:
+ download_path = f.name
+
+ try:
+ await fs.download_file("/tmp/download_source.txt", download_path)
+ with open(download_path, "r") as f:
+ print(f.read())
+ finally:
+ os.unlink(download_path)
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/12_file_manipulation.py b/examples/12_file_manipulation.py
new file mode 100644
index 00000000..6268706d
--- /dev/null
+++ b/examples/12_file_manipulation.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+"""File manipulation operations"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="file-manip",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Setup
+ fs.write_file("/tmp/file1.txt", "Content of file 1")
+ fs.write_file("/tmp/file2.txt", "Content of file 2")
+ fs.mkdir("/tmp/test_dir", recursive=True)
+
+ # Rename file
+ fs.rename_file("/tmp/file1.txt", "/tmp/renamed_file.txt")
+ print(f"Renamed: {fs.exists('/tmp/renamed_file.txt')}")
+
+ # Move file
+ fs.move_file("/tmp/file2.txt", "/tmp/test_dir/moved_file.txt")
+ print(f"Moved: {fs.exists('/tmp/test_dir/moved_file.txt')}")
+
+ # Copy file (read + write)
+ original_content = fs.read_file("/tmp/renamed_file.txt")
+ fs.write_file("/tmp/test_dir/copied_file.txt", original_content.content)
+ print(f"Copied: {fs.exists('/tmp/test_dir/copied_file.txt')}")
+
+ # Delete file
+ fs.rm("/tmp/renamed_file.txt")
+ print(f"Deleted: {not fs.exists('/tmp/renamed_file.txt')}")
+
+ # Delete directory
+ fs.rm("/tmp/test_dir", recursive=True)
+ print(f"Directory deleted: {not fs.exists('/tmp/test_dir')}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/12_file_manipulation_async.py b/examples/12_file_manipulation_async.py
new file mode 100644
index 00000000..47871fa6
--- /dev/null
+++ b/examples/12_file_manipulation_async.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+"""File manipulation operations (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="file-manip",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ fs = sandbox.filesystem
+
+ # Setup
+ await fs.write_file("/tmp/file1.txt", "Content of file 1")
+ await fs.write_file("/tmp/file2.txt", "Content of file 2")
+ await fs.mkdir("/tmp/test_dir", recursive=True)
+
+ # Rename file
+ await fs.rename_file("/tmp/file1.txt", "/tmp/renamed_file.txt")
+ renamed_exists = await fs.exists("/tmp/renamed_file.txt")
+ print(f"Renamed: {renamed_exists}")
+
+ # Move file
+ await fs.move_file("/tmp/file2.txt", "/tmp/test_dir/moved_file.txt")
+ moved_exists = await fs.exists("/tmp/test_dir/moved_file.txt")
+ print(f"Moved: {moved_exists}")
+
+ # Copy file (read + write)
+ original_content = await fs.read_file("/tmp/renamed_file.txt")
+ await fs.write_file("/tmp/test_dir/copied_file.txt", original_content.content)
+ copied_exists = await fs.exists("/tmp/test_dir/copied_file.txt")
+ print(f"Copied: {copied_exists}")
+
+ # Delete file
+ await fs.rm("/tmp/renamed_file.txt")
+ deleted_check = not await fs.exists("/tmp/renamed_file.txt")
+ print(f"Deleted: {deleted_check}")
+
+ # Delete directory
+ await fs.rm("/tmp/test_dir", recursive=True)
+ dir_deleted_check = not await fs.exists("/tmp/test_dir")
+ print(f"Directory deleted: {dir_deleted_check}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/13_background_processes.py b/examples/13_background_processes.py
new file mode 100755
index 00000000..a373ee36
--- /dev/null
+++ b/examples/13_background_processes.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+"""Background process management"""
+
+import os
+import time
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="background-processes",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ print("Launching background processes...")
+
+ # Launch a long-running process
+ process_id_1 = sandbox.launch_process(
+ "python3 -c 'import time; [print(f\"Process 1: {i}\") or time.sleep(1) for i in range(10)]'"
+ )
+ print(f"Launched process 1: {process_id_1}")
+
+ # Launch another process with a different command
+ process_id_2 = sandbox.launch_process(
+ "python3 -c 'import time; [print(f\"Process 2: {i}\") or time.sleep(1) for i in range(5)]'"
+ )
+ print(f"Launched process 2: {process_id_2}")
+
+ # Wait a bit for processes to start
+ time.sleep(2)
+
+ # List all processes
+ print("\nListing all processes:")
+ processes = sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ if process.pid:
+ print(f" PID: {process.pid}")
+ print()
+
+ # Kill a specific process
+ print(f"Killing process {process_id_2}...")
+ sandbox.kill_process(process_id_2)
+ print("Process killed")
+
+ # Wait a bit
+ time.sleep(1)
+
+ # List processes again
+ print("\nListing processes after kill:")
+ processes = sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ print()
+
+ # Launch a few more processes
+ process_id_3 = sandbox.launch_process("sleep 5")
+ process_id_4 = sandbox.launch_process("sleep 5")
+ print(f"Launched processes 3 and 4: {process_id_3}, {process_id_4}")
+
+ # Wait a bit
+ time.sleep(1)
+
+ # Kill all running processes
+ print("\nKilling all running processes...")
+ killed_count = sandbox.kill_all_processes()
+ print(f"Killed {killed_count} processes")
+
+ # Final list
+ print("\nFinal process list:")
+ processes = sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ print()
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/13_background_processes_async.py b/examples/13_background_processes_async.py
new file mode 100755
index 00000000..169990de
--- /dev/null
+++ b/examples/13_background_processes_async.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+"""Background process management (async variant)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="background-processes",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ print("Launching background processes...")
+
+ # Launch a long-running process
+ process_id_1 = await sandbox.launch_process(
+ "python3 -c 'import time; [print(f\"Process 1: {i}\") or time.sleep(1) for i in range(10)]'"
+ )
+ print(f"Launched process 1: {process_id_1}")
+
+ # Launch another process with a different command
+ process_id_2 = await sandbox.launch_process(
+ "python3 -c 'import time; [print(f\"Process 2: {i}\") or time.sleep(1) for i in range(5)]'"
+ )
+ print(f"Launched process 2: {process_id_2}")
+
+ # Wait a bit for processes to start
+ await asyncio.sleep(2)
+
+ # List all processes
+ print("\nListing all processes:")
+ processes = await sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ if process.pid:
+ print(f" PID: {process.pid}")
+ print()
+
+ # Kill a specific process
+ print(f"Killing process {process_id_2}...")
+ await sandbox.kill_process(process_id_2)
+ print("Process killed")
+
+ # Wait a bit
+ await asyncio.sleep(1)
+
+ # List processes again
+ print("\nListing processes after kill:")
+ processes = await sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ print()
+
+ # Launch a few more processes
+ process_id_3 = await sandbox.launch_process("sleep 5")
+ process_id_4 = await sandbox.launch_process("sleep 5")
+ print(f"Launched processes 3 and 4: {process_id_3}, {process_id_4}")
+
+ # Wait a bit
+ await asyncio.sleep(1)
+
+ # Kill all running processes
+ print("\nKilling all running processes...")
+ killed_count = await sandbox.kill_all_processes()
+ print(f"Killed {killed_count} processes")
+
+ # Final list
+ print("\nFinal process list:")
+ processes = await sandbox.list_processes()
+ for process in processes:
+ print(f" ID: {process.id}")
+ print(f" Command: {process.command}")
+ print(f" Status: {process.status}")
+ print()
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/14_expose_port.py b/examples/14_expose_port.py
new file mode 100755
index 00000000..af91ec6a
--- /dev/null
+++ b/examples/14_expose_port.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Port exposure via TCP proxy"""
+
+import os
+import time
+
+import requests
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="expose-port",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Create a test file to serve
+ print("\nCreating test file...")
+ sandbox.filesystem.write_file(
+ "/tmp/test.html", "Hello from Sandbox!
Port 8080
"
+ )
+ print("Test file created")
+
+ # Start a simple HTTP server on port 8080
+ print("\nStarting HTTP server on port 8080...")
+ process_id = sandbox.launch_process(
+ "python3 -m http.server 8080",
+ cwd="/tmp",
+ )
+ print(f"Server started with process ID: {process_id}")
+
+ # Wait for server to start
+ print("Waiting for server to start...")
+ time.sleep(3)
+
+ # Expose port 8080
+ print("\nExposing port 8080...")
+ exposed = sandbox.expose_port(8080)
+ print(f"Port exposed: {exposed.port}")
+ print(f"Exposed at: {exposed.exposed_at}")
+
+ # Wait a bit for the port to be ready
+ print("Waiting for port to be ready...")
+ time.sleep(2)
+
+ # Make a request to verify it's working
+ print("\nMaking HTTP request to verify port exposure...")
+ try:
+ response = requests.get(f"{exposed.exposed_at}/test.html", timeout=10)
+ response.raise_for_status()
+ print(f"✓ Request successful! Status: {response.status_code}")
+ print(f"✓ Response content: {response.text.strip()}")
+ except requests.RequestException as e:
+ print(f"⚠ Request failed: {e}")
+ print("Note: Port may still be propagating. Try again in a few seconds.")
+
+ # List processes to show the server is running
+ print("\nRunning processes:")
+ processes = sandbox.list_processes()
+ for process in processes:
+ if process.status == "running":
+ print(f" {process.id}: {process.command} - {process.status}")
+
+ # Switch to a different port (e.g., 8081)
+ print("\nSwitching to port 8081...")
+ # Create a different test file for port 8081
+ sandbox.filesystem.write_file(
+ "/tmp/test2.html", "Hello from Sandbox!
Port 8081
"
+ )
+ # Start a new server on 8081
+ sandbox.launch_process(
+ "python3 -m http.server 8081",
+ cwd="/tmp",
+ )
+ print("Waiting for server to start...")
+ time.sleep(3)
+
+ # Expose the new port (this will automatically unbind the previous port)
+ exposed_2 = sandbox.expose_port(8081)
+ print(f"Port exposed: {exposed_2.port}")
+ print(f"Exposed at: {exposed_2.exposed_at}")
+
+ # Wait a bit for the port to be ready
+ print("Waiting for port to be ready...")
+ time.sleep(2)
+
+ # Make a request to verify the new port is working
+ print("\nMaking HTTP request to verify port 8081...")
+ try:
+ response = requests.get(f"{exposed_2.exposed_at}/test2.html", timeout=10)
+ response.raise_for_status()
+ print(f"✓ Request successful! Status: {response.status_code}")
+ print(f"✓ Response content: {response.text.strip()}")
+ except requests.RequestException as e:
+ print(f"⚠ Request failed: {e}")
+ print("Note: Port may still be propagating. Try again in a few seconds.")
+
+ # Unexpose the port
+ print("\nUnexposing port...")
+ sandbox.unexpose_port()
+ print("Port unexposed")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ sandbox.delete()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/14_expose_port_async.py b/examples/14_expose_port_async.py
new file mode 100755
index 00000000..50ab62f6
--- /dev/null
+++ b/examples/14_expose_port_async.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+"""Port exposure via TCP proxy (async variant)"""
+
+import asyncio
+import os
+
+import requests
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ sandbox = None
+ try:
+ sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="expose-port",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ # Create a test file to serve
+ print("\nCreating test file...")
+ await sandbox.filesystem.write_file(
+ "/tmp/test.html", "Hello from Sandbox!
Port 8080
"
+ )
+ print("Test file created")
+
+ # Start a simple HTTP server on port 8080
+ print("\nStarting HTTP server on port 8080...")
+ process_id = await sandbox.launch_process(
+ "python3 -m http.server 8080",
+ cwd="/tmp",
+ )
+ print(f"Server started with process ID: {process_id}")
+
+ # Wait for server to start
+ print("Waiting for server to start...")
+ await asyncio.sleep(3)
+
+ # Expose port 8080
+ print("\nExposing port 8080...")
+ exposed = await sandbox.expose_port(8080)
+ print(f"Port exposed: {exposed.port}")
+ print(f"Exposed at: {exposed.exposed_at}")
+
+ # Wait a bit for the port to be ready
+ print("Waiting for port to be ready...")
+ await asyncio.sleep(2)
+
+ # Make a request to verify it's working
+ print("\nMaking HTTP request to verify port exposure...")
+ try:
+ loop = asyncio.get_running_loop()
+ response = await loop.run_in_executor(
+ None, requests.get, f"{exposed.exposed_at}/test.html"
+ )
+ response.raise_for_status()
+ print(f"✓ Request successful! Status: {response.status_code}")
+ print(f"✓ Response content: {response.text.strip()}")
+ except Exception as e:
+ print(f"⚠ Request failed: {e}")
+ print("Note: Port may still be propagating. Try again in a few seconds.")
+
+ # List processes to show the server is running
+ print("\nRunning processes:")
+ processes = await sandbox.list_processes()
+ for process in processes:
+ if process.status == "running":
+ print(f" {process.id}: {process.command} - {process.status}")
+
+ # Switch to a different port (e.g., 8081)
+ print("\nSwitching to port 8081...")
+ # Create a different test file for port 8081
+ await sandbox.filesystem.write_file(
+ "/tmp/test2.html", "Hello from Sandbox!
Port 8081
"
+ )
+ # Start a new server on 8081
+ await sandbox.launch_process(
+ "python3 -m http.server 8081",
+ cwd="/tmp",
+ )
+ print("Waiting for server to start...")
+ await asyncio.sleep(3)
+
+ # Expose the new port (this will automatically unbind the previous port)
+ exposed_2 = await sandbox.expose_port(8081)
+ print(f"Port exposed: {exposed_2.port}")
+ print(f"Exposed at: {exposed_2.exposed_at}")
+
+ # Wait a bit for the port to be ready
+ print("Waiting for port to be ready...")
+ await asyncio.sleep(2)
+
+ # Make a request to verify the new port is working
+ print("\nMaking HTTP request to verify port 8081...")
+ try:
+ loop = asyncio.get_running_loop()
+ response = await loop.run_in_executor(
+ None, requests.get, f"{exposed_2.exposed_at}/test2.html"
+ )
+ response.raise_for_status()
+ print(f"✓ Request successful! Status: {response.status_code}")
+ print(f"✓ Response content: {response.text.strip()}")
+ except Exception as e:
+ print(f"⚠ Request failed: {e}")
+ print("Note: Port may still be propagating. Try again in a few seconds.")
+
+ # Unexpose the port
+ print("\nUnexposing port...")
+ await sandbox.unexpose_port()
+ print("Port unexposed")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ if sandbox:
+ await sandbox.delete()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/15_get_sandbox.py b/examples/15_get_sandbox.py
new file mode 100644
index 00000000..5eb161ae
--- /dev/null
+++ b/examples/15_get_sandbox.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""Create a sandbox and then retrieve it by service ID"""
+
+import os
+
+from koyeb import Sandbox
+
+
+def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ original_sandbox = None
+ retrieved_sandbox = None
+
+ try:
+ # Step 1: Create a new sandbox
+ print("Creating a new sandbox...")
+ original_sandbox = Sandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ print(f"✓ Created sandbox: {original_sandbox.name}")
+ print(f" Service ID: {original_sandbox.service_id}")
+ print(f" App ID: {original_sandbox.app_id}")
+ print(f" Instance ID: {original_sandbox.instance_id}")
+
+ # Execute a command with the original sandbox
+ result = original_sandbox.exec("echo 'Hello from original sandbox!'")
+ print(f" Original sandbox output: {result.stdout.strip()}")
+
+ # Step 2: Retrieve the same sandbox using its service ID
+ print("\nRetrieving sandbox by service ID...")
+ retrieved_sandbox = Sandbox.get_from_id(
+ id=original_sandbox.id,
+ api_token=api_token,
+ )
+
+ print(f"✓ Retrieved sandbox: {retrieved_sandbox.name}")
+ print(f" Service ID: {retrieved_sandbox.service_id}")
+ print(f" App ID: {retrieved_sandbox.app_id}")
+ print(f" Instance ID: {retrieved_sandbox.instance_id}")
+
+ # Verify it's the same sandbox
+ assert original_sandbox.id == retrieved_sandbox.id, "Sandbox IDs should match!"
+ print(" ✓ Confirmed: Same sandbox retrieved")
+
+ # Check health
+ is_healthy = retrieved_sandbox.is_healthy()
+ print(f" Healthy: {is_healthy}")
+
+ # Execute a command with the retrieved sandbox
+ if is_healthy:
+ result = retrieved_sandbox.exec("echo 'Hello from retrieved sandbox!'")
+ print(f" Retrieved sandbox output: {result.stdout.strip()}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ # Cleanup: delete the sandbox (works from either instance)
+ if original_sandbox:
+ print("\nCleaning up...")
+ original_sandbox.delete()
+ print("✓ Sandbox deleted")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/15_get_sandbox_async.py b/examples/15_get_sandbox_async.py
new file mode 100644
index 00000000..e80789c9
--- /dev/null
+++ b/examples/15_get_sandbox_async.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+"""Create a sandbox and then retrieve it by service ID (async)"""
+
+import asyncio
+import os
+
+from koyeb import AsyncSandbox
+
+
+async def main():
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ print("Error: KOYEB_API_TOKEN not set")
+ return
+
+ original_sandbox = None
+ retrieved_sandbox = None
+
+ try:
+ # Step 1: Create a new sandbox
+ print("Creating a new sandbox...")
+ original_sandbox = await AsyncSandbox.create(
+ image="koyeb/sandbox",
+ name="example-sandbox",
+ wait_ready=True,
+ api_token=api_token,
+ )
+
+ print(f"✓ Created sandbox: {original_sandbox.name}")
+ print(f" Service ID: {original_sandbox.service_id}")
+ print(f" App ID: {original_sandbox.app_id}")
+ print(f" Instance ID: {original_sandbox.instance_id}")
+
+ # Execute a command with the original sandbox
+ result = await original_sandbox.exec("echo 'Hello from original sandbox!'")
+ print(f" Original sandbox output: {result.stdout.strip()}")
+
+ # Step 2: Retrieve the same sandbox using its service ID
+ print("\nRetrieving sandbox by service ID...")
+ retrieved_sandbox = await AsyncSandbox.get_from_id(
+ id=original_sandbox.id,
+ api_token=api_token,
+ )
+
+ print(f"✓ Retrieved sandbox: {retrieved_sandbox.name}")
+ print(f" Service ID: {retrieved_sandbox.service_id}")
+ print(f" App ID: {retrieved_sandbox.app_id}")
+ print(f" Instance ID: {retrieved_sandbox.instance_id}")
+
+ # Verify it's the same sandbox
+ assert original_sandbox.id == retrieved_sandbox.id, "Sandbox IDs should match!"
+ print(" ✓ Confirmed: Same sandbox retrieved")
+
+ # Check health
+ is_healthy = await retrieved_sandbox.is_healthy()
+ print(f" Healthy: {is_healthy}")
+
+ # Execute a command with the retrieved sandbox
+ if is_healthy:
+ result = await retrieved_sandbox.exec(
+ "echo 'Hello from retrieved sandbox!'"
+ )
+ print(f" Retrieved sandbox output: {result.stdout.strip()}")
+
+ except Exception as e:
+ print(f"Error: {e}")
+ finally:
+ # Cleanup: delete the sandbox (works from either instance)
+ if original_sandbox:
+ print("\nCleaning up...")
+ await original_sandbox.delete()
+ print("✓ Sandbox deleted")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 00000000..9dddf90a
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,62 @@
+# Koyeb Sandbox Examples
+
+A collection of examples demonstrating the Koyeb Sandbox SDK capabilities.
+
+## Quick Start
+
+```bash
+# Set your API token
+export KOYEB_API_TOKEN=your_api_token_here
+
+# Run individual examples
+uv run python examples/01_create_sandbox.py
+```
+
+## Examples
+
+- **01_create_sandbox.py** - Create and manage sandbox instances
+- **02_create_sandbox_with_timing.py** - Create sandbox with timing measurements
+- **03_basic_commands.py** - Basic command execution
+- **04_streaming_output.py** - Real-time streaming output
+- **05_environment_variables.py** - Environment variable configuration
+- **06_working_directory.py** - Working directory management
+- **07_file_operations.py** - File read/write operations
+- **08_directory_operations.py** - Directory management
+- **09_binary_files.py** - Binary file handling
+- **10_batch_operations.py** - Batch file operations
+- **11_upload_download.py** - File upload and download
+- **12_file_manipulation.py** - File manipulation operations
+- **13_background_processes.py** - Background process management (launch, list, kill)
+- **14_expose_port.py** - Port exposure via TCP proxy with HTTP verification
+- **15_get_sandbox.py** - Create a sandbox and then retrieve it by ID
+
+## Basic Usage
+
+```python
+from koyeb import Sandbox
+
+# Create a sandbox
+sandbox = await Sandbox.create(
+ image="koyeb/sandbox",
+ name="my-sandbox",
+ wait_ready=True,
+ api_token=api_token,
+)
+
+# Execute commands
+result = await sandbox.exec("echo 'Hello World'")
+print(result.stdout)
+
+# Use filesystem
+await sandbox.filesystem.write_file("/tmp/file.txt", "Hello!")
+content = await sandbox.filesystem.read_file("/tmp/file.txt")
+
+# Cleanup
+await sandbox.delete()
+```
+
+## Prerequisites
+
+- Koyeb API token from https://app.koyeb.com/account/api
+- Python 3.9+
+- `uv` package manager (or `pip`)
diff --git a/koyeb/__init__.py b/koyeb/__init__.py
index dba7c501..ee580dbd 100644
--- a/koyeb/__init__.py
+++ b/koyeb/__init__.py
@@ -1,3 +1,8 @@
# coding: utf-8
__version__ = "1.0.3"
+
+# Make Sandbox available at package level
+from .sandbox import Sandbox, AsyncSandbox
+
+__all__ = ["Sandbox", "AsyncSandbox"]
diff --git a/koyeb/api/api/logs_api.py b/koyeb/api/api/logs_api.py
index df6433b0..cfc7de79 100644
--- a/koyeb/api/api/logs_api.py
+++ b/koyeb/api/api/logs_api.py
@@ -18,7 +18,7 @@
from datetime import datetime
from pydantic import Field, StrictStr
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Annotated
from koyeb.api.models.query_logs_reply import QueryLogsReply
from koyeb.api.models.stream_result_of_log_entry import StreamResultOfLogEntry
@@ -44,19 +44,22 @@ def __init__(self, api_client=None) -> None:
@validate_call
def query_logs(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
start: Annotated[Optional[datetime], Field(description="(Optional) Must always be before `end`. Defaults to 15 minutes ago.")] = None,
end: Annotated[Optional[datetime], Field(description="(Optional) Must always be after `start`. Defaults to now.")] = None,
order: Annotated[Optional[StrictStr], Field(description="(Optional) `asc` or `desc`. Defaults to `desc`.")] = None,
limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 100. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -73,20 +76,24 @@ def query_logs(
"""Query logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param instance_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type regional_deployment_id: str
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param regional_deployment_id:
- :type regional_deployment_id: str
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
:param start: (Optional) Must always be before `end`. Defaults to 15 minutes ago.
:type start: datetime
:param end: (Optional) Must always be after `start`. Defaults to now.
@@ -99,6 +106,8 @@ def query_logs(
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -126,15 +135,18 @@ def query_logs(
app_id=app_id,
service_id=service_id,
deployment_id=deployment_id,
+ regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
- regional_deployment_id=regional_deployment_id,
+ streams=streams,
start=start,
end=end,
order=order,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -164,19 +176,22 @@ def query_logs(
@validate_call
def query_logs_with_http_info(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
start: Annotated[Optional[datetime], Field(description="(Optional) Must always be before `end`. Defaults to 15 minutes ago.")] = None,
end: Annotated[Optional[datetime], Field(description="(Optional) Must always be after `start`. Defaults to now.")] = None,
order: Annotated[Optional[StrictStr], Field(description="(Optional) `asc` or `desc`. Defaults to `desc`.")] = None,
limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 100. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -193,20 +208,24 @@ def query_logs_with_http_info(
"""Query logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param instance_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type regional_deployment_id: str
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param regional_deployment_id:
- :type regional_deployment_id: str
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
:param start: (Optional) Must always be before `end`. Defaults to 15 minutes ago.
:type start: datetime
:param end: (Optional) Must always be after `start`. Defaults to now.
@@ -219,6 +238,8 @@ def query_logs_with_http_info(
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -246,15 +267,18 @@ def query_logs_with_http_info(
app_id=app_id,
service_id=service_id,
deployment_id=deployment_id,
+ regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
- regional_deployment_id=regional_deployment_id,
+ streams=streams,
start=start,
end=end,
order=order,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -284,19 +308,22 @@ def query_logs_with_http_info(
@validate_call
def query_logs_without_preload_content(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
start: Annotated[Optional[datetime], Field(description="(Optional) Must always be before `end`. Defaults to 15 minutes ago.")] = None,
end: Annotated[Optional[datetime], Field(description="(Optional) Must always be after `start`. Defaults to now.")] = None,
order: Annotated[Optional[StrictStr], Field(description="(Optional) `asc` or `desc`. Defaults to `desc`.")] = None,
limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 100. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -313,20 +340,24 @@ def query_logs_without_preload_content(
"""Query logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param instance_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type regional_deployment_id: str
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param regional_deployment_id:
- :type regional_deployment_id: str
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
:param start: (Optional) Must always be before `end`. Defaults to 15 minutes ago.
:type start: datetime
:param end: (Optional) Must always be after `start`. Defaults to now.
@@ -339,6 +370,8 @@ def query_logs_without_preload_content(
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -366,15 +399,18 @@ def query_logs_without_preload_content(
app_id=app_id,
service_id=service_id,
deployment_id=deployment_id,
+ regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
- regional_deployment_id=regional_deployment_id,
+ streams=streams,
start=start,
end=end,
order=order,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -403,15 +439,18 @@ def _query_logs_serialize(
app_id,
service_id,
deployment_id,
+ regional_deployment_id,
instance_id,
+ instance_ids,
stream,
- regional_deployment_id,
+ streams,
start,
end,
order,
limit,
regex,
text,
+ regions,
_request_auth,
_content_type,
_headers,
@@ -421,6 +460,9 @@ def _query_logs_serialize(
_host = None
_collection_formats: Dict[str, str] = {
+ 'instance_ids': 'multi',
+ 'streams': 'multi',
+ 'regions': 'multi',
}
_path_params: Dict[str, str] = {}
@@ -450,17 +492,25 @@ def _query_logs_serialize(
_query_params.append(('deployment_id', deployment_id))
+ if regional_deployment_id is not None:
+
+ _query_params.append(('regional_deployment_id', regional_deployment_id))
+
if instance_id is not None:
_query_params.append(('instance_id', instance_id))
+ if instance_ids is not None:
+
+ _query_params.append(('instance_ids', instance_ids))
+
if stream is not None:
_query_params.append(('stream', stream))
- if regional_deployment_id is not None:
+ if streams is not None:
- _query_params.append(('regional_deployment_id', regional_deployment_id))
+ _query_params.append(('streams', streams))
if start is not None:
if isinstance(start, datetime):
@@ -504,6 +554,10 @@ def _query_logs_serialize(
_query_params.append(('text', text))
+ if regions is not None:
+
+ _query_params.append(('regions', regions))
+
# process the header parameters
# process the form parameters
# process the body parameter
@@ -544,17 +598,20 @@ def _query_logs_serialize(
@validate_call
def tail_logs(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
+ start: Annotated[Optional[datetime], Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 1000. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -571,28 +628,34 @@ def tail_logs(
"""Tails logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param regional_deployment_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type regional_deployment_id: str
- :param instance_id:
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param start:
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
+ :param start: (Optional) Defaults to 24 hours ago.
:type start: datetime
- :param limit:
+ :param limit: (Optional) Defaults to 1000. Maximum of 1000.
:type limit: str
:param regex: (Optional) Apply a regex to filter logs. Can't be used with `text`.
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -622,11 +685,14 @@ def tail_logs(
deployment_id=deployment_id,
regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
+ streams=streams,
start=start,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -656,17 +722,20 @@ def tail_logs(
@validate_call
def tail_logs_with_http_info(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
+ start: Annotated[Optional[datetime], Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 1000. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -683,28 +752,34 @@ def tail_logs_with_http_info(
"""Tails logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param regional_deployment_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type regional_deployment_id: str
- :param instance_id:
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param start:
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
+ :param start: (Optional) Defaults to 24 hours ago.
:type start: datetime
- :param limit:
+ :param limit: (Optional) Defaults to 1000. Maximum of 1000.
:type limit: str
:param regex: (Optional) Apply a regex to filter logs. Can't be used with `text`.
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -734,11 +809,14 @@ def tail_logs_with_http_info(
deployment_id=deployment_id,
regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
+ streams=streams,
start=start,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -768,17 +846,20 @@ def tail_logs_with_http_info(
@validate_call
def tail_logs_without_preload_content(
self,
- type: Optional[StrictStr] = None,
- app_id: Optional[StrictStr] = None,
- service_id: Optional[StrictStr] = None,
- deployment_id: Optional[StrictStr] = None,
- regional_deployment_id: Optional[StrictStr] = None,
- instance_id: Optional[StrictStr] = None,
- stream: Optional[StrictStr] = None,
- start: Optional[datetime] = None,
- limit: Optional[StrictStr] = None,
+ type: Annotated[Optional[StrictStr], Field(description="Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".")] = None,
+ app_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ service_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ regional_deployment_id: Annotated[Optional[StrictStr], Field(description="(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ instance_id: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using instance_ids instead.")] = None,
+ instance_ids: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.")] = None,
+ stream: Annotated[Optional[StrictStr], Field(description="Deprecated, prefer using streams instead.")] = None,
+ streams: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).")] = None,
+ start: Annotated[Optional[datetime], Field(description="(Optional) Defaults to 24 hours ago.")] = None,
+ limit: Annotated[Optional[StrictStr], Field(description="(Optional) Defaults to 1000. Maximum of 1000.")] = None,
regex: Annotated[Optional[StrictStr], Field(description="(Optional) Apply a regex to filter logs. Can't be used with `text`.")] = None,
text: Annotated[Optional[StrictStr], Field(description="(Optional) Looks for this string in logs. Can't be used with `regex`.")] = None,
+ regions: Annotated[Optional[List[StrictStr]], Field(description="(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).")] = None,
_request_timeout: Union[
None,
Annotated[StrictFloat, Field(gt=0)],
@@ -795,28 +876,34 @@ def tail_logs_without_preload_content(
"""Tails logs
- :param type:
+ :param type: Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".
:type type: str
- :param app_id:
+ :param app_id: (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type app_id: str
- :param service_id:
+ :param service_id: (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type service_id: str
- :param deployment_id:
+ :param deployment_id: (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type deployment_id: str
- :param regional_deployment_id:
+ :param regional_deployment_id: (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
:type regional_deployment_id: str
- :param instance_id:
+ :param instance_id: Deprecated, prefer using instance_ids instead.
:type instance_id: str
- :param stream:
+ :param instance_ids: (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.
+ :type instance_ids: List[str]
+ :param stream: Deprecated, prefer using streams instead.
:type stream: str
- :param start:
+ :param streams: (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).
+ :type streams: List[str]
+ :param start: (Optional) Defaults to 24 hours ago.
:type start: datetime
- :param limit:
+ :param limit: (Optional) Defaults to 1000. Maximum of 1000.
:type limit: str
:param regex: (Optional) Apply a regex to filter logs. Can't be used with `text`.
:type regex: str
:param text: (Optional) Looks for this string in logs. Can't be used with `regex`.
:type text: str
+ :param regions: (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).
+ :type regions: List[str]
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
@@ -846,11 +933,14 @@ def tail_logs_without_preload_content(
deployment_id=deployment_id,
regional_deployment_id=regional_deployment_id,
instance_id=instance_id,
+ instance_ids=instance_ids,
stream=stream,
+ streams=streams,
start=start,
limit=limit,
regex=regex,
text=text,
+ regions=regions,
_request_auth=_request_auth,
_content_type=_content_type,
_headers=_headers,
@@ -881,11 +971,14 @@ def _tail_logs_serialize(
deployment_id,
regional_deployment_id,
instance_id,
+ instance_ids,
stream,
+ streams,
start,
limit,
regex,
text,
+ regions,
_request_auth,
_content_type,
_headers,
@@ -895,6 +988,9 @@ def _tail_logs_serialize(
_host = None
_collection_formats: Dict[str, str] = {
+ 'instance_ids': 'multi',
+ 'streams': 'multi',
+ 'regions': 'multi',
}
_path_params: Dict[str, str] = {}
@@ -932,10 +1028,18 @@ def _tail_logs_serialize(
_query_params.append(('instance_id', instance_id))
+ if instance_ids is not None:
+
+ _query_params.append(('instance_ids', instance_ids))
+
if stream is not None:
_query_params.append(('stream', stream))
+ if streams is not None:
+
+ _query_params.append(('streams', streams))
+
if start is not None:
if isinstance(start, datetime):
_query_params.append(
@@ -961,6 +1065,10 @@ def _tail_logs_serialize(
_query_params.append(('text', text))
+ if regions is not None:
+
+ _query_params.append(('regions', regions))
+
# process the header parameters
# process the form parameters
# process the body parameter
diff --git a/koyeb/api/docs/DeploymentDefinitionType.md b/koyeb/api/docs/DeploymentDefinitionType.md
index 0a2d7a71..5e50e67b 100644
--- a/koyeb/api/docs/DeploymentDefinitionType.md
+++ b/koyeb/api/docs/DeploymentDefinitionType.md
@@ -11,6 +11,8 @@
* `DATABASE` (value: `'DATABASE'`)
+* `SANDBOX` (value: `'SANDBOX'`)
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
diff --git a/koyeb/api/docs/LogsApi.md b/koyeb/api/docs/LogsApi.md
index 18610da8..4e9d54ea 100644
--- a/koyeb/api/docs/LogsApi.md
+++ b/koyeb/api/docs/LogsApi.md
@@ -9,7 +9,7 @@ Method | HTTP request | Description
# **query_logs**
-> QueryLogsReply query_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, instance_id=instance_id, stream=stream, regional_deployment_id=regional_deployment_id, start=start, end=end, order=order, limit=limit, regex=regex, text=text)
+> QueryLogsReply query_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, instance_ids=instance_ids, stream=stream, streams=streams, start=start, end=end, order=order, limit=limit, regex=regex, text=text, regions=regions)
Query logs
@@ -44,23 +44,26 @@ configuration.api_key['Bearer'] = os.environ["API_KEY"]
with koyeb.api.ApiClient(configuration) as api_client:
# Create an instance of the API class
api_instance = koyeb.api.LogsApi(api_client)
- type = 'type_example' # str | (optional)
- app_id = 'app_id_example' # str | (optional)
- service_id = 'service_id_example' # str | (optional)
- deployment_id = 'deployment_id_example' # str | (optional)
- instance_id = 'instance_id_example' # str | (optional)
- stream = 'stream_example' # str | (optional)
- regional_deployment_id = 'regional_deployment_id_example' # str | (optional)
+ type = 'type_example' # str | Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\". (optional)
+ app_id = 'app_id_example' # str | (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ service_id = 'service_id_example' # str | (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ deployment_id = 'deployment_id_example' # str | (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ regional_deployment_id = 'regional_deployment_id_example' # str | (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ instance_id = 'instance_id_example' # str | Deprecated, prefer using instance_ids instead. (optional)
+ instance_ids = ['instance_ids_example'] # List[str] | (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ stream = 'stream_example' # str | Deprecated, prefer using streams instead. (optional)
+ streams = ['streams_example'] # List[str] | (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs). (optional)
start = '2013-10-20T19:20:30+01:00' # datetime | (Optional) Must always be before `end`. Defaults to 15 minutes ago. (optional)
end = '2013-10-20T19:20:30+01:00' # datetime | (Optional) Must always be after `start`. Defaults to now. (optional)
order = 'order_example' # str | (Optional) `asc` or `desc`. Defaults to `desc`. (optional)
limit = 'limit_example' # str | (Optional) Defaults to 100. Maximum of 1000. (optional)
regex = 'regex_example' # str | (Optional) Apply a regex to filter logs. Can't be used with `text`. (optional)
text = 'text_example' # str | (Optional) Looks for this string in logs. Can't be used with `regex`. (optional)
+ regions = ['regions_example'] # List[str] | (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]). (optional)
try:
# Query logs
- api_response = api_instance.query_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, instance_id=instance_id, stream=stream, regional_deployment_id=regional_deployment_id, start=start, end=end, order=order, limit=limit, regex=regex, text=text)
+ api_response = api_instance.query_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, instance_ids=instance_ids, stream=stream, streams=streams, start=start, end=end, order=order, limit=limit, regex=regex, text=text, regions=regions)
print("The response of LogsApi->query_logs:\n")
pprint(api_response)
except Exception as e:
@@ -74,19 +77,22 @@ with koyeb.api.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
- **type** | **str**| | [optional]
- **app_id** | **str**| | [optional]
- **service_id** | **str**| | [optional]
- **deployment_id** | **str**| | [optional]
- **instance_id** | **str**| | [optional]
- **stream** | **str**| | [optional]
- **regional_deployment_id** | **str**| | [optional]
+ **type** | **str**| Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\". | [optional]
+ **app_id** | **str**| (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **service_id** | **str**| (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **deployment_id** | **str**| (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **regional_deployment_id** | **str**| (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **instance_id** | **str**| Deprecated, prefer using instance_ids instead. | [optional]
+ **instance_ids** | [**List[str]**](str.md)| (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **stream** | **str**| Deprecated, prefer using streams instead. | [optional]
+ **streams** | [**List[str]**](str.md)| (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs). | [optional]
**start** | **datetime**| (Optional) Must always be before `end`. Defaults to 15 minutes ago. | [optional]
**end** | **datetime**| (Optional) Must always be after `start`. Defaults to now. | [optional]
**order** | **str**| (Optional) `asc` or `desc`. Defaults to `desc`. | [optional]
**limit** | **str**| (Optional) Defaults to 100. Maximum of 1000. | [optional]
**regex** | **str**| (Optional) Apply a regex to filter logs. Can't be used with `text`. | [optional]
**text** | **str**| (Optional) Looks for this string in logs. Can't be used with `regex`. | [optional]
+ **regions** | [**List[str]**](str.md)| (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]). | [optional]
### Return type
@@ -117,7 +123,7 @@ Name | Type | Description | Notes
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
# **tail_logs**
-> StreamResultOfLogEntry tail_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, stream=stream, start=start, limit=limit, regex=regex, text=text)
+> StreamResultOfLogEntry tail_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, instance_ids=instance_ids, stream=stream, streams=streams, start=start, limit=limit, regex=regex, text=text, regions=regions)
Tails logs
@@ -152,21 +158,24 @@ configuration.api_key['Bearer'] = os.environ["API_KEY"]
with koyeb.api.ApiClient(configuration) as api_client:
# Create an instance of the API class
api_instance = koyeb.api.LogsApi(api_client)
- type = 'type_example' # str | (optional)
- app_id = 'app_id_example' # str | (optional)
- service_id = 'service_id_example' # str | (optional)
- deployment_id = 'deployment_id_example' # str | (optional)
- regional_deployment_id = 'regional_deployment_id_example' # str | (optional)
- instance_id = 'instance_id_example' # str | (optional)
- stream = 'stream_example' # str | (optional)
- start = '2013-10-20T19:20:30+01:00' # datetime | (optional)
- limit = 'limit_example' # str | (optional)
+ type = 'type_example' # str | Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\". (optional)
+ app_id = 'app_id_example' # str | (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ service_id = 'service_id_example' # str | (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ deployment_id = 'deployment_id_example' # str | (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ regional_deployment_id = 'regional_deployment_id_example' # str | (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ instance_id = 'instance_id_example' # str | Deprecated, prefer using instance_ids instead. (optional)
+ instance_ids = ['instance_ids_example'] # List[str] | (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. (optional)
+ stream = 'stream_example' # str | Deprecated, prefer using streams instead. (optional)
+ streams = ['streams_example'] # List[str] | (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs). (optional)
+ start = '2013-10-20T19:20:30+01:00' # datetime | (Optional) Defaults to 24 hours ago. (optional)
+ limit = 'limit_example' # str | (Optional) Defaults to 1000. Maximum of 1000. (optional)
regex = 'regex_example' # str | (Optional) Apply a regex to filter logs. Can't be used with `text`. (optional)
text = 'text_example' # str | (Optional) Looks for this string in logs. Can't be used with `regex`. (optional)
+ regions = ['regions_example'] # List[str] | (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]). (optional)
try:
# Tails logs
- api_response = api_instance.tail_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, stream=stream, start=start, limit=limit, regex=regex, text=text)
+ api_response = api_instance.tail_logs(type=type, app_id=app_id, service_id=service_id, deployment_id=deployment_id, regional_deployment_id=regional_deployment_id, instance_id=instance_id, instance_ids=instance_ids, stream=stream, streams=streams, start=start, limit=limit, regex=regex, text=text, regions=regions)
print("The response of LogsApi->tail_logs:\n")
pprint(api_response)
except Exception as e:
@@ -180,17 +189,20 @@ with koyeb.api.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
- **type** | **str**| | [optional]
- **app_id** | **str**| | [optional]
- **service_id** | **str**| | [optional]
- **deployment_id** | **str**| | [optional]
- **regional_deployment_id** | **str**| | [optional]
- **instance_id** | **str**| | [optional]
- **stream** | **str**| | [optional]
- **start** | **datetime**| | [optional]
- **limit** | **str**| | [optional]
+ **type** | **str**| Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\". | [optional]
+ **app_id** | **str**| (Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **service_id** | **str**| (Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **deployment_id** | **str**| (Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **regional_deployment_id** | **str**| (Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **instance_id** | **str**| Deprecated, prefer using instance_ids instead. | [optional]
+ **instance_ids** | [**List[str]**](str.md)| (Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set. | [optional]
+ **stream** | **str**| Deprecated, prefer using streams instead. | [optional]
+ **streams** | [**List[str]**](str.md)| (Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs). | [optional]
+ **start** | **datetime**| (Optional) Defaults to 24 hours ago. | [optional]
+ **limit** | **str**| (Optional) Defaults to 1000. Maximum of 1000. | [optional]
**regex** | **str**| (Optional) Apply a regex to filter logs. Can't be used with `text`. | [optional]
**text** | **str**| (Optional) Looks for this string in logs. Can't be used with `regex`. | [optional]
+ **regions** | [**List[str]**](str.md)| (Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]). | [optional]
### Return type
diff --git a/koyeb/api/docs/Organization.md b/koyeb/api/docs/Organization.md
index b11b379c..5ddaf0fe 100644
--- a/koyeb/api/docs/Organization.md
+++ b/koyeb/api/docs/Organization.md
@@ -7,6 +7,7 @@ Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**id** | **str** | | [optional]
**external_id** | **str** | | [optional]
+**provisioning** | **bool** | | [optional]
**address1** | **str** | | [optional]
**address2** | **str** | | [optional]
**city** | **str** | | [optional]
diff --git a/koyeb/api/docs/RegionalDeploymentDefinitionType.md b/koyeb/api/docs/RegionalDeploymentDefinitionType.md
index fd038f8c..54dc149e 100644
--- a/koyeb/api/docs/RegionalDeploymentDefinitionType.md
+++ b/koyeb/api/docs/RegionalDeploymentDefinitionType.md
@@ -9,6 +9,8 @@
* `WORKER` (value: `'WORKER'`)
+* `SANDBOX` (value: `'SANDBOX'`)
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
diff --git a/koyeb/api/docs/ServiceType.md b/koyeb/api/docs/ServiceType.md
index e6dd167c..688b1198 100644
--- a/koyeb/api/docs/ServiceType.md
+++ b/koyeb/api/docs/ServiceType.md
@@ -11,6 +11,8 @@
* `DATABASE` (value: `'DATABASE'`)
+* `SANDBOX` (value: `'SANDBOX'`)
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
diff --git a/koyeb/api/models/deployment_definition_type.py b/koyeb/api/models/deployment_definition_type.py
index ab9668ba..4897cba4 100644
--- a/koyeb/api/models/deployment_definition_type.py
+++ b/koyeb/api/models/deployment_definition_type.py
@@ -30,6 +30,7 @@ class DeploymentDefinitionType(str, Enum):
WEB = 'WEB'
WORKER = 'WORKER'
DATABASE = 'DATABASE'
+ SANDBOX = 'SANDBOX'
@classmethod
def from_json(cls, json_str: str) -> Self:
diff --git a/koyeb/api/models/organization.py b/koyeb/api/models/organization.py
index 2bf33861..48114d79 100644
--- a/koyeb/api/models/organization.py
+++ b/koyeb/api/models/organization.py
@@ -33,6 +33,7 @@ class Organization(BaseModel):
""" # noqa: E501
id: Optional[StrictStr] = None
external_id: Optional[StrictStr] = None
+ provisioning: Optional[StrictBool] = None
address1: Optional[StrictStr] = None
address2: Optional[StrictStr] = None
city: Optional[StrictStr] = None
@@ -61,7 +62,7 @@ class Organization(BaseModel):
trial_starts_at: Optional[datetime] = None
trial_ends_at: Optional[datetime] = None
email_domain_allowlist: Optional[List[StrictStr]] = None
- __properties: ClassVar[List[str]] = ["id", "external_id", "address1", "address2", "city", "postal_code", "state", "country", "company", "vat_number", "billing_name", "billing_email", "name", "plan", "plan_updated_at", "has_payment_method", "subscription_id", "current_subscription_id", "latest_subscription_id", "signup_qualification", "status", "status_message", "deactivation_reason", "verified", "qualifies_for_hobby23", "reprocess_after", "trialing", "trial_starts_at", "trial_ends_at", "email_domain_allowlist"]
+ __properties: ClassVar[List[str]] = ["id", "external_id", "provisioning", "address1", "address2", "city", "postal_code", "state", "country", "company", "vat_number", "billing_name", "billing_email", "name", "plan", "plan_updated_at", "has_payment_method", "subscription_id", "current_subscription_id", "latest_subscription_id", "signup_qualification", "status", "status_message", "deactivation_reason", "verified", "qualifies_for_hobby23", "reprocess_after", "trialing", "trial_starts_at", "trial_ends_at", "email_domain_allowlist"]
model_config = ConfigDict(
populate_by_name=True,
@@ -116,6 +117,7 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
_obj = cls.model_validate({
"id": obj.get("id"),
"external_id": obj.get("external_id"),
+ "provisioning": obj.get("provisioning"),
"address1": obj.get("address1"),
"address2": obj.get("address2"),
"city": obj.get("city"),
diff --git a/koyeb/api/models/regional_deployment_definition_type.py b/koyeb/api/models/regional_deployment_definition_type.py
index c1624920..377ae05d 100644
--- a/koyeb/api/models/regional_deployment_definition_type.py
+++ b/koyeb/api/models/regional_deployment_definition_type.py
@@ -29,6 +29,7 @@ class RegionalDeploymentDefinitionType(str, Enum):
INVALID = 'INVALID'
WEB = 'WEB'
WORKER = 'WORKER'
+ SANDBOX = 'SANDBOX'
@classmethod
def from_json(cls, json_str: str) -> Self:
diff --git a/koyeb/api/models/service_type.py b/koyeb/api/models/service_type.py
index cade14be..684e867e 100644
--- a/koyeb/api/models/service_type.py
+++ b/koyeb/api/models/service_type.py
@@ -30,6 +30,7 @@ class ServiceType(str, Enum):
WEB = 'WEB'
WORKER = 'WORKER'
DATABASE = 'DATABASE'
+ SANDBOX = 'SANDBOX'
@classmethod
def from_json(cls, json_str: str) -> Self:
diff --git a/koyeb/sandbox/__init__.py b/koyeb/sandbox/__init__.py
new file mode 100644
index 00000000..41b6f352
--- /dev/null
+++ b/koyeb/sandbox/__init__.py
@@ -0,0 +1,37 @@
+# coding: utf-8
+
+"""
+Koyeb Sandbox - Interactive execution environment for running arbitrary code on Koyeb
+"""
+
+__version__ = "1.0.3"
+
+from koyeb.api.models.instance_status import InstanceStatus as SandboxStatus
+
+from .exec import (
+ AsyncSandboxExecutor,
+ CommandResult,
+ CommandStatus,
+ SandboxCommandError,
+ SandboxExecutor,
+)
+from .filesystem import FileInfo, SandboxFilesystem
+from .sandbox import AsyncSandbox, ExposedPort, ProcessInfo, Sandbox
+from .utils import SandboxError, SandboxTimeoutError
+
+__all__ = [
+ "Sandbox",
+ "AsyncSandbox",
+ "SandboxFilesystem",
+ "SandboxExecutor",
+ "AsyncSandboxExecutor",
+ "FileInfo",
+ "SandboxStatus",
+ "SandboxError",
+ "SandboxTimeoutError",
+ "CommandResult",
+ "CommandStatus",
+ "SandboxCommandError",
+ "ExposedPort",
+ "ProcessInfo",
+]
diff --git a/koyeb/sandbox/exec.py b/koyeb/sandbox/exec.py
new file mode 100644
index 00000000..2ad4e786
--- /dev/null
+++ b/koyeb/sandbox/exec.py
@@ -0,0 +1,395 @@
+# coding: utf-8
+
+"""
+Command execution utilities for Koyeb Sandbox instances
+Using SandboxClient HTTP API
+"""
+
+from __future__ import annotations
+
+import asyncio
+import time
+from dataclasses import dataclass
+from enum import Enum
+from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, List, Optional
+
+from .executor_client import SandboxClient
+from .utils import SandboxError, create_sandbox_client
+
+if TYPE_CHECKING:
+ from .sandbox import Sandbox
+
+
+class CommandStatus(str, Enum):
+ """Command execution status"""
+
+ RUNNING = "running"
+ FINISHED = "finished"
+ FAILED = "failed"
+
+
+@dataclass
+class CommandResult:
+ """Result of a command execution using Koyeb API models"""
+
+ stdout: str = ""
+ stderr: str = ""
+ exit_code: int = 0
+ status: CommandStatus = CommandStatus.FINISHED
+ duration: float = 0.0
+ command: str = ""
+ args: Optional[List[str]] = None
+
+ def __post_init__(self):
+ if self.args is None:
+ self.args = []
+
+ @property
+ def success(self) -> bool:
+ """Check if command executed successfully"""
+ return self.exit_code == 0 and self.status == CommandStatus.FINISHED
+
+ @property
+ def output(self) -> str:
+ """Get combined stdout and stderr output"""
+ return self.stdout + (f"\n{self.stderr}" if self.stderr else "")
+
+
+class SandboxCommandError(SandboxError):
+ """Raised when command execution fails"""
+
+
+class SandboxExecutor:
+ """
+ Synchronous command execution interface for Koyeb Sandbox instances.
+ Bound to a specific sandbox instance.
+
+ For async usage, use AsyncSandboxExecutor instead.
+ """
+
+ def __init__(self, sandbox: Sandbox) -> None:
+ self.sandbox = sandbox
+ self._client = None
+
+ def _get_client(self) -> SandboxClient:
+ """Get or create SandboxClient instance"""
+ if self._client is None:
+ sandbox_url = self.sandbox._get_sandbox_url()
+ self._client = create_sandbox_client(
+ sandbox_url, self.sandbox.sandbox_secret
+ )
+ return self._client
+
+ def __call__(
+ self,
+ command: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: int = 30,
+ on_stdout: Optional[Callable[[str], None]] = None,
+ on_stderr: Optional[Callable[[str], None]] = None,
+ ) -> CommandResult:
+ """
+ Execute a command in a shell synchronously. Supports streaming output via callbacks.
+
+ Args:
+ command: Command to execute as a string (e.g., "python -c 'print(2+2)'")
+ cwd: Working directory for the command
+ env: Environment variables for the command
+ timeout: Command timeout in seconds (enforced for HTTP requests)
+ on_stdout: Optional callback for streaming stdout chunks
+ on_stderr: Optional callback for streaming stderr chunks
+
+ Returns:
+ CommandResult: Result of the command execution
+
+ Example:
+ ```python
+ # Synchronous execution
+ result = sandbox.exec("echo hello")
+
+ # With streaming callbacks
+ result = sandbox.exec(
+ "echo hello; sleep 1; echo world",
+ on_stdout=lambda data: print(f"OUT: {data}"),
+ on_stderr=lambda data: print(f"ERR: {data}"),
+ )
+ ```
+ """
+ start_time = time.time()
+
+ # Use streaming if callbacks are provided
+ if on_stdout or on_stderr:
+ stdout_buffer = []
+ stderr_buffer = []
+ exit_code = 0
+
+ try:
+ client = self._get_client()
+ for event in client.run_streaming(
+ cmd=command, cwd=cwd, env=env, timeout=float(timeout)
+ ):
+ if "stream" in event:
+ stream_type = event["stream"]
+ data = event["data"]
+
+ if stream_type == "stdout":
+ stdout_buffer.append(data)
+ if on_stdout:
+ on_stdout(data)
+ elif stream_type == "stderr":
+ stderr_buffer.append(data)
+ if on_stderr:
+ on_stderr(data)
+ elif "code" in event:
+ exit_code = event["code"]
+ elif "error" in event and isinstance(event["error"], str):
+ # Error starting command
+ return CommandResult(
+ stdout="",
+ stderr=event["error"],
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
+
+ return CommandResult(
+ stdout="".join(stdout_buffer),
+ stderr="".join(stderr_buffer),
+ exit_code=exit_code,
+ status=(
+ CommandStatus.FINISHED
+ if exit_code == 0
+ else CommandStatus.FAILED
+ ),
+ duration=time.time() - start_time,
+ command=command,
+ )
+ except Exception as e:
+ return CommandResult(
+ stdout="",
+ stderr=f"Command execution failed: {str(e)}",
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
+
+ # Use regular run for non-streaming execution
+ try:
+ client = self._get_client()
+ response = client.run(cmd=command, cwd=cwd, env=env, timeout=float(timeout))
+
+ stdout = response.get("stdout", "")
+ stderr = response.get("stderr", "")
+ exit_code = response.get("exit_code", 0)
+
+ return CommandResult(
+ stdout=stdout,
+ stderr=stderr,
+ exit_code=exit_code,
+ status=(
+ CommandStatus.FINISHED if exit_code == 0 else CommandStatus.FAILED
+ ),
+ duration=time.time() - start_time,
+ command=command,
+ )
+ except Exception as e:
+ return CommandResult(
+ stdout="",
+ stderr=f"Command execution failed: {str(e)}",
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
+
+
+class AsyncSandboxExecutor(SandboxExecutor):
+ """
+ Async command execution interface for Koyeb Sandbox instances.
+ Bound to a specific sandbox instance.
+
+ Inherits from SandboxExecutor and provides async command execution.
+ """
+
+ async def __call__(
+ self,
+ command: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: int = 30,
+ on_stdout: Optional[Callable[[str], None]] = None,
+ on_stderr: Optional[Callable[[str], None]] = None,
+ ) -> CommandResult:
+ """
+ Execute a command in a shell asynchronously. Supports streaming output via callbacks.
+
+ Args:
+ command: Command to execute as a string (e.g., "python -c 'print(2+2)'")
+ cwd: Working directory for the command
+ env: Environment variables for the command
+ timeout: Command timeout in seconds (enforced for HTTP requests)
+ on_stdout: Optional callback for streaming stdout chunks
+ on_stderr: Optional callback for streaming stderr chunks
+
+ Returns:
+ CommandResult: Result of the command execution
+
+ Example:
+ ```python
+ # Async execution
+ result = await sandbox.exec("echo hello")
+
+ # With streaming callbacks
+ result = await sandbox.exec(
+ "echo hello; sleep 1; echo world",
+ on_stdout=lambda data: print(f"OUT: {data}"),
+ on_stderr=lambda data: print(f"ERR: {data}"),
+ )
+ ```
+ """
+ start_time = time.time()
+
+ # Use streaming if callbacks are provided
+ if on_stdout or on_stderr:
+ stdout_buffer = []
+ stderr_buffer = []
+ exit_code = 0
+
+ try:
+ client = self._get_client()
+
+ # Create async generator for streaming events
+ async def stream_events() -> AsyncIterator[Dict[str, Any]]:
+ """Async generator that yields events as they arrive."""
+ import queue
+ from threading import Thread
+
+ event_queue: queue.Queue[Dict[str, Any] | None] = queue.Queue()
+ done = False
+
+ def sync_stream():
+ """Synchronous generator for streaming."""
+ nonlocal done
+ try:
+ for event in client.run_streaming(
+ cmd=command, cwd=cwd, env=env, timeout=float(timeout)
+ ):
+ event_queue.put(event)
+ event_queue.put(None) # Sentinel
+ except Exception as e:
+ event_queue.put({"error": str(e)})
+ event_queue.put(None)
+ finally:
+ done = True
+
+ # Start streaming in a thread
+ thread = Thread(target=sync_stream, daemon=True)
+ thread.start()
+
+ # Yield events as they arrive
+ while True:
+ try:
+ # Use get_nowait to avoid blocking in executor
+ event = event_queue.get_nowait()
+ if event is None:
+ # Sentinel received, streaming is complete
+ break
+ yield event
+ except queue.Empty:
+ # Check if thread is done and queue is empty
+ if done and event_queue.empty():
+ break
+ # Wait a bit before checking again
+ await asyncio.sleep(0.01)
+ continue
+
+ # Wait for thread to complete (should be done by now)
+ thread.join(timeout=1.0)
+
+ # Process events as they arrive
+ async for event in stream_events():
+ if "stream" in event:
+ stream_type = event["stream"]
+ data = event["data"]
+
+ if stream_type == "stdout":
+ stdout_buffer.append(data)
+ if on_stdout:
+ on_stdout(data)
+ elif stream_type == "stderr":
+ stderr_buffer.append(data)
+ if on_stderr:
+ on_stderr(data)
+ elif "code" in event:
+ exit_code = event["code"]
+ elif "error" in event and isinstance(event["error"], str):
+ # Error starting command
+ return CommandResult(
+ stdout="",
+ stderr=event["error"],
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
+
+ return CommandResult(
+ stdout="".join(stdout_buffer),
+ stderr="".join(stderr_buffer),
+ exit_code=exit_code,
+ status=(
+ CommandStatus.FINISHED
+ if exit_code == 0
+ else CommandStatus.FAILED
+ ),
+ duration=time.time() - start_time,
+ command=command,
+ )
+ except Exception as e:
+ return CommandResult(
+ stdout="",
+ stderr=f"Command execution failed: {str(e)}",
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
+
+ # Run in executor to avoid blocking
+ loop = asyncio.get_running_loop()
+
+ try:
+ client = self._get_client()
+ response = await loop.run_in_executor(
+ None,
+ lambda: client.run(
+ cmd=command, cwd=cwd, env=env, timeout=float(timeout)
+ ),
+ )
+
+ stdout = response.get("stdout", "")
+ stderr = response.get("stderr", "")
+ exit_code = response.get("exit_code", 0)
+
+ return CommandResult(
+ stdout=stdout,
+ stderr=stderr,
+ exit_code=exit_code,
+ status=(
+ CommandStatus.FINISHED if exit_code == 0 else CommandStatus.FAILED
+ ),
+ duration=time.time() - start_time,
+ command=command,
+ )
+ except Exception as e:
+ return CommandResult(
+ stdout="",
+ stderr=f"Command execution failed: {str(e)}",
+ exit_code=1,
+ status=CommandStatus.FAILED,
+ duration=time.time() - start_time,
+ command=command,
+ )
diff --git a/koyeb/sandbox/executor_client.py b/koyeb/sandbox/executor_client.py
new file mode 100644
index 00000000..95b6c0a5
--- /dev/null
+++ b/koyeb/sandbox/executor_client.py
@@ -0,0 +1,492 @@
+"""
+Sandbox Executor API Client
+
+A simple Python client for interacting with the Sandbox Executor API.
+"""
+
+import json
+import logging
+import time
+from typing import Any, Dict, Iterator, Optional
+
+import requests
+
+from .utils import DEFAULT_HTTP_TIMEOUT
+
+logger = logging.getLogger(__name__)
+
+
+class SandboxClient:
+ """Client for the Sandbox Executor API."""
+
+ def __init__(
+ self, base_url: str, secret: str, timeout: float = DEFAULT_HTTP_TIMEOUT
+ ):
+ """
+ Initialize the Sandbox Client.
+
+ Args:
+ base_url: The base URL of the sandbox server (e.g., 'http://localhost:8080')
+ secret: The authentication secret/token
+ timeout: Request timeout in seconds (default: 30)
+ """
+ self.base_url = base_url.rstrip("/")
+ self.secret = secret
+ self.timeout = timeout
+ self.headers = {
+ "Authorization": f"Bearer {secret}",
+ "Content-Type": "application/json",
+ }
+ # Use session for connection pooling
+ self._session = requests.Session()
+ self._session.headers.update(self.headers)
+ self._closed = False
+
+ def close(self) -> None:
+ """Close the HTTP session and release resources."""
+ if not self._closed and hasattr(self, "_session"):
+ self._session.close()
+ self._closed = True
+
+ def __enter__(self):
+ """Context manager entry - returns self."""
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ """Context manager exit - automatically closes the session."""
+ self.close()
+
+ def __del__(self):
+ """Clean up session on deletion (fallback, not guaranteed to run)."""
+ if not self._closed:
+ self.close()
+
+ def _request_with_retry(
+ self,
+ method: str,
+ url: str,
+ max_retries: int = 3,
+ initial_backoff: float = 1.0,
+ **kwargs,
+ ) -> requests.Response:
+ """
+ Make an HTTP request with retry logic for 503 errors.
+
+ Args:
+ method: HTTP method (e.g., 'GET', 'POST')
+ url: The URL to request
+ max_retries: Maximum number of retry attempts
+ initial_backoff: Initial backoff time in seconds (doubles each retry)
+ **kwargs: Additional arguments to pass to requests
+
+ Returns:
+ Response object
+
+ Raises:
+ requests.HTTPError: If the request fails after all retries
+ """
+ backoff = initial_backoff
+ last_exception = None
+
+ # Set default timeout if not provided
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = self.timeout
+
+ for attempt in range(max_retries + 1):
+ try:
+ # Use session for connection pooling
+ response = self._session.request(method, url, **kwargs)
+
+ # If we get a 503, retry with backoff
+ if response.status_code == 503 and attempt < max_retries:
+ logger.debug(
+ f"Received 503 error, retrying... (attempt {attempt + 1}/{max_retries + 1})"
+ )
+ time.sleep(backoff)
+ backoff *= 2 # Exponential backoff
+ continue
+
+ response.raise_for_status()
+ return response
+
+ except requests.HTTPError as e:
+ if (
+ e.response
+ and e.response.status_code == 503
+ and attempt < max_retries
+ ):
+ logger.debug(
+ f"Received 503 error, retrying... (attempt {attempt + 1}/{max_retries + 1})"
+ )
+ time.sleep(backoff)
+ backoff *= 2
+ last_exception = e
+ continue
+ raise
+ except requests.Timeout as e:
+ logger.warning(f"Request timeout after {self.timeout}s: {e}")
+ raise
+ except requests.RequestException as e:
+ logger.warning(f"Request failed: {e}")
+ raise
+
+ # If we exhausted all retries, raise the last exception
+ if last_exception:
+ raise last_exception
+
+ def health(self) -> Dict[str, str]:
+ """
+ Check the health status of the server.
+
+ Returns:
+ Dict with status information
+
+ Raises:
+ requests.HTTPError: If the health check fails
+ """
+ response = self._request_with_retry(
+ "GET", f"{self.base_url}/health", timeout=self.timeout
+ )
+ return response.json()
+
+ def run(
+ self,
+ cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: Optional[float] = None,
+ ) -> Dict[str, Any]:
+ """
+ Execute a shell command in the sandbox.
+
+ Args:
+ cmd: The shell command to execute
+ cwd: Optional working directory for command execution
+ env: Optional environment variables to set/override
+ timeout: Optional timeout in seconds for the request
+
+ Returns:
+ Dict containing stdout, stderr, error (if any), and exit code
+ """
+ payload = {"cmd": cmd}
+ if cwd is not None:
+ payload["cwd"] = cwd
+ if env is not None:
+ payload["env"] = env
+
+ request_timeout = timeout if timeout is not None else self.timeout
+ response = self._request_with_retry(
+ "POST",
+ f"{self.base_url}/run",
+ json=payload,
+ headers=self.headers,
+ timeout=request_timeout,
+ )
+ return response.json()
+
+ def run_streaming(
+ self,
+ cmd: str,
+ cwd: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ timeout: Optional[float] = None,
+ ) -> Iterator[Dict[str, Any]]:
+ """
+ Execute a shell command in the sandbox and stream the output in real-time.
+
+ This method uses Server-Sent Events (SSE) to stream command output line-by-line
+ as it's produced. Use this for long-running commands where you want real-time
+ output. For simple commands where buffered output is acceptable, use run() instead.
+
+ Args:
+ cmd: The shell command to execute
+ cwd: Optional working directory for command execution
+ env: Optional environment variables to set/override
+ timeout: Optional timeout in seconds for the streaming request
+
+ Yields:
+ Dict events with the following types:
+
+ - output events (as command produces output):
+ {"stream": "stdout"|"stderr", "data": "line of output"}
+
+ - complete event (when command finishes):
+ {"code": , "error": false}
+
+ - error event (if command fails to start):
+ {"error": "error message"}
+
+ Example:
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> for event in client.run_streaming("echo 'Hello'; sleep 1; echo 'World'"):
+ ... if "stream" in event:
+ ... print(f"{event['stream']}: {event['data']}")
+ ... elif "code" in event:
+ ... print(f"Exit code: {event['code']}")
+ """
+ payload = {"cmd": cmd}
+ if cwd is not None:
+ payload["cwd"] = cwd
+ if env is not None:
+ payload["env"] = env
+
+ response = self._session.post(
+ f"{self.base_url}/run_streaming",
+ json=payload,
+ headers=self.headers,
+ stream=True,
+ timeout=timeout if timeout is not None else self.timeout,
+ )
+ response.raise_for_status()
+
+ # Parse Server-Sent Events stream
+ for line in response.iter_lines(decode_unicode=True):
+ if not line:
+ continue
+
+ if line.startswith("data:"):
+ data = line[5:].strip()
+ try:
+ event_data = json.loads(data)
+ yield event_data
+ except json.JSONDecodeError:
+ # If we can't parse the JSON, yield the raw data
+ yield {"error": f"Failed to parse event data: {data}"}
+
+ def write_file(self, path: str, content: str) -> Dict[str, Any]:
+ """
+ Write content to a file.
+
+ Args:
+ path: The file path to write to
+ content: The content to write
+
+ Returns:
+ Dict with success status and error if any
+ """
+ payload = {"path": path, "content": content}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/write_file", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def read_file(self, path: str) -> Dict[str, Any]:
+ """
+ Read content from a file.
+
+ Args:
+ path: The file path to read from
+
+ Returns:
+ Dict with file content and error if any
+ """
+ payload = {"path": path}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/read_file", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def delete_file(self, path: str) -> Dict[str, Any]:
+ """
+ Delete a file.
+
+ Args:
+ path: The file path to delete
+
+ Returns:
+ Dict with success status and error if any
+ """
+ payload = {"path": path}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/delete_file", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def make_dir(self, path: str) -> Dict[str, Any]:
+ """
+ Create a directory (including parent directories).
+
+ Args:
+ path: The directory path to create
+
+ Returns:
+ Dict with success status and error if any
+ """
+ payload = {"path": path}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/make_dir", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def delete_dir(self, path: str) -> Dict[str, Any]:
+ """
+ Recursively delete a directory and all its contents.
+
+ Args:
+ path: The directory path to delete
+
+ Returns:
+ Dict with success status and error if any
+ """
+ payload = {"path": path}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/delete_dir", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def list_dir(self, path: str) -> Dict[str, Any]:
+ """
+ List the contents of a directory.
+
+ Args:
+ path: The directory path to list
+
+ Returns:
+ Dict with entries list and error if any
+ """
+ payload = {"path": path}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/list_dir", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def bind_port(self, port: int) -> Dict[str, Any]:
+ """
+ Bind a port to the TCP proxy for external access.
+
+ Configures the TCP proxy to forward traffic to the specified port inside the sandbox.
+ This allows you to expose services running inside the sandbox to external connections.
+
+ Args:
+ port: The port number to bind to (must be a valid port number)
+
+ Returns:
+ Dict with success status, message, and port information
+
+ Notes:
+ - Only one port can be bound at a time
+ - Binding a new port will override the previous binding
+ - The port must be available and accessible within the sandbox environment
+ """
+ payload = {"port": str(port)}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/bind_port", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def unbind_port(self, port: Optional[int] = None) -> Dict[str, Any]:
+ """
+ Unbind a port from the TCP proxy.
+
+ Removes the TCP proxy port binding, stopping traffic forwarding to the previously bound port.
+
+ Args:
+ port: Optional port number to unbind. If provided, it must match the currently bound port.
+ If not provided, any existing binding will be removed.
+
+ Returns:
+ Dict with success status and message
+
+ Notes:
+ - If a port is specified and doesn't match the currently bound port, the request will fail
+ - After unbinding, the TCP proxy will no longer forward traffic
+ """
+ payload = {}
+ if port is not None:
+ payload["port"] = str(port)
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/unbind_port", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def start_process(
+ self, cmd: str, cwd: Optional[str] = None, env: Optional[Dict[str, str]] = None
+ ) -> Dict[str, Any]:
+ """
+ Start a background process in the sandbox.
+
+ Starts a long-running background process that continues executing even after
+ the API call completes. Use this for servers, workers, or other long-running tasks.
+
+ Args:
+ cmd: The shell command to execute as a background process
+ cwd: Optional working directory for the process
+ env: Optional environment variables to set/override for the process
+
+ Returns:
+ Dict with process id and success status:
+ - id: The unique process ID (UUID string)
+ - success: True if the process was started successfully
+
+ Example:
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.start_process("python -u server.py")
+ >>> process_id = result["id"]
+ >>> print(f"Started process: {process_id}")
+ """
+ payload = {"cmd": cmd}
+ if cwd is not None:
+ payload["cwd"] = cwd
+ if env is not None:
+ payload["env"] = env
+
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/start_process", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def kill_process(self, process_id: str) -> Dict[str, Any]:
+ """
+ Kill a background process by its ID.
+
+ Terminates a running background process. This sends a SIGTERM signal to the process,
+ allowing it to clean up gracefully. If the process doesn't terminate within a timeout,
+ it will be forcefully killed with SIGKILL.
+
+ Args:
+ process_id: The unique process ID (UUID string) to kill
+
+ Returns:
+ Dict with success status and error message if any
+
+ Example:
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.kill_process("550e8400-e29b-41d4-a716-446655440000")
+ >>> if result.get("success"):
+ ... print("Process killed successfully")
+ """
+ payload = {"id": process_id}
+ response = self._request_with_retry(
+ "POST", f"{self.base_url}/kill_process", json=payload, headers=self.headers
+ )
+ return response.json()
+
+ def list_processes(self) -> Dict[str, Any]:
+ """
+ List all background processes.
+
+ Returns information about all currently running and recently completed background
+ processes. This includes both active processes and processes that have completed
+ (which remain in memory until server restart).
+
+ Returns:
+ Dict with a list of processes:
+ - processes: List of process objects, each containing:
+ - id: Process ID (UUID string)
+ - command: The command that was executed
+ - status: Process status (e.g., "running", "completed")
+ - pid: OS process ID (if running)
+ - exit_code: Exit code (if completed)
+ - started_at: ISO 8601 timestamp when process started
+ - completed_at: ISO 8601 timestamp when process completed (if applicable)
+
+ Example:
+ >>> client = SandboxClient("http://localhost:8080", "secret")
+ >>> result = client.list_processes()
+ >>> for process in result.get("processes", []):
+ ... print(f"{process['id']}: {process['command']} - {process['status']}")
+ """
+ response = self._request_with_retry(
+ "GET", f"{self.base_url}/list_processes", headers=self.headers
+ )
+ return response.json()
diff --git a/koyeb/sandbox/filesystem.py b/koyeb/sandbox/filesystem.py
new file mode 100644
index 00000000..ae072dae
--- /dev/null
+++ b/koyeb/sandbox/filesystem.py
@@ -0,0 +1,735 @@
+# coding: utf-8
+
+"""
+Filesystem operations for Koyeb Sandbox instances
+Using SandboxClient HTTP API
+"""
+
+from __future__ import annotations
+
+import os
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Dict, List, Union
+
+from .executor_client import SandboxClient
+from .utils import (
+ SandboxError,
+ async_wrapper,
+ check_error_message,
+ create_sandbox_client,
+ escape_shell_arg,
+ run_sync_in_executor,
+)
+
+if TYPE_CHECKING:
+ from .exec import SandboxExecutor
+ from .sandbox import Sandbox
+
+
+class SandboxFilesystemError(SandboxError):
+ """Base exception for filesystem operations"""
+
+
+class SandboxFileNotFoundError(SandboxFilesystemError):
+ """Raised when file or directory not found"""
+
+
+class SandboxFileExistsError(SandboxFilesystemError):
+ """Raised when file already exists"""
+
+
+@dataclass
+class FileInfo:
+ """File information"""
+
+ content: str
+ encoding: str
+
+
+class SandboxFilesystem:
+ """
+ Synchronous filesystem operations for Koyeb Sandbox instances.
+ Using SandboxClient HTTP API.
+
+ For async usage, use AsyncSandboxFilesystem instead.
+ """
+
+ def __init__(self, sandbox: Sandbox) -> None:
+ self.sandbox = sandbox
+ self._client = None
+ self._executor = None
+
+ def _get_client(self) -> SandboxClient:
+ """Get or create SandboxClient instance"""
+ if self._client is None:
+ sandbox_url = self.sandbox._get_sandbox_url()
+ self._client = create_sandbox_client(
+ sandbox_url, self.sandbox.sandbox_secret
+ )
+ return self._client
+
+ def _get_executor(self) -> "SandboxExecutor":
+ """Get or create SandboxExecutor instance"""
+ if self._executor is None:
+ from .exec import SandboxExecutor
+
+ self._executor = SandboxExecutor(self.sandbox)
+ return self._executor
+
+ def write_file(
+ self, path: str, content: Union[str, bytes], encoding: str = "utf-8"
+ ) -> None:
+ """
+ Write content to a file synchronously.
+
+ Args:
+ path: Absolute path to the file
+ content: Content to write (string or bytes)
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary data.
+ """
+ client = self._get_client()
+
+ if isinstance(content, bytes):
+ content_str = content.decode("utf-8")
+ else:
+ content_str = content
+
+ try:
+ response = client.write_file(path, content_str)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ raise SandboxFilesystemError(f"Failed to write file: {error_msg}")
+ except Exception as e:
+ if isinstance(e, SandboxFilesystemError):
+ raise
+ raise SandboxFilesystemError(f"Failed to write file: {str(e)}") from e
+
+ def read_file(self, path: str, encoding: str = "utf-8") -> FileInfo:
+ """
+ Read a file from the sandbox synchronously.
+
+ Args:
+ path: Absolute path to the file
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary data.
+
+ Returns:
+ FileInfo: Object with content and encoding
+ """
+ client = self._get_client()
+
+ try:
+ response = client.read_file(path)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {path}")
+ raise SandboxFilesystemError(f"Failed to read file: {error_msg}")
+ content = response.get("content", "")
+ return FileInfo(content=content, encoding=encoding)
+ except (SandboxFileNotFoundError, SandboxFilesystemError):
+ raise
+ except Exception as e:
+ error_msg = str(e)
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {path}") from e
+ raise SandboxFilesystemError(f"Failed to read file: {error_msg}") from e
+
+ def mkdir(self, path: str, recursive: bool = False) -> None:
+ """
+ Create a directory synchronously.
+
+ Args:
+ path: Absolute path to the directory
+ recursive: Create parent directories if needed (default: False, not used - API always creates parents)
+ """
+ client = self._get_client()
+
+ try:
+ response = client.make_dir(path)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ if check_error_message(error_msg, "FILE_EXISTS"):
+ raise SandboxFileExistsError(f"Directory already exists: {path}")
+ raise SandboxFilesystemError(f"Failed to create directory: {error_msg}")
+ except (SandboxFileExistsError, SandboxFilesystemError):
+ raise
+ except Exception as e:
+ error_msg = str(e)
+ if check_error_message(error_msg, "FILE_EXISTS"):
+ raise SandboxFileExistsError(f"Directory already exists: {path}") from e
+ raise SandboxFilesystemError(
+ f"Failed to create directory: {error_msg}"
+ ) from e
+
+ def list_dir(self, path: str = ".") -> List[str]:
+ """
+ List contents of a directory synchronously.
+
+ Args:
+ path: Path to the directory (default: current directory)
+
+ Returns:
+ List[str]: Names of files and directories within the specified path.
+ """
+ client = self._get_client()
+
+ try:
+ response = client.list_dir(path)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"Directory not found: {path}")
+ raise SandboxFilesystemError(f"Failed to list directory: {error_msg}")
+ entries = response.get("entries", [])
+ return entries
+ except (SandboxFileNotFoundError, SandboxFilesystemError):
+ raise
+ except Exception as e:
+ error_msg = str(e)
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"Directory not found: {path}") from e
+ raise SandboxFilesystemError(
+ f"Failed to list directory: {error_msg}"
+ ) from e
+
+ def delete_file(self, path: str) -> None:
+ """
+ Delete a file synchronously.
+
+ Args:
+ path: Absolute path to the file
+ """
+ client = self._get_client()
+
+ try:
+ response = client.delete_file(path)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {path}")
+ raise SandboxFilesystemError(f"Failed to delete file: {error_msg}")
+ except (SandboxFileNotFoundError, SandboxFilesystemError):
+ raise
+ except Exception as e:
+ error_msg = str(e)
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {path}") from e
+ raise SandboxFilesystemError(f"Failed to delete file: {error_msg}") from e
+
+ def delete_dir(self, path: str) -> None:
+ """
+ Delete a directory synchronously.
+
+ Args:
+ path: Absolute path to the directory
+ """
+ client = self._get_client()
+
+ try:
+ response = client.delete_dir(path)
+ if response.get("error"):
+ error_msg = response.get("error", "Unknown error")
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"Directory not found: {path}")
+ if check_error_message(error_msg, "DIR_NOT_EMPTY"):
+ raise SandboxFilesystemError(f"Directory not empty: {path}")
+ raise SandboxFilesystemError(f"Failed to delete directory: {error_msg}")
+ except (SandboxFileNotFoundError, SandboxFilesystemError):
+ raise
+ except Exception as e:
+ error_msg = str(e)
+ if check_error_message(error_msg, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"Directory not found: {path}") from e
+ if check_error_message(error_msg, "DIR_NOT_EMPTY"):
+ raise SandboxFilesystemError(f"Directory not empty: {path}") from e
+ raise SandboxFilesystemError(
+ f"Failed to delete directory: {error_msg}"
+ ) from e
+
+ def rename_file(self, old_path: str, new_path: str) -> None:
+ """
+ Rename a file synchronously.
+
+ Args:
+ old_path: Current file path
+ new_path: New file path
+ """
+ # Use exec since there's no direct rename in SandboxClient
+ # Properly escape paths to prevent shell injection
+ executor = self._get_executor()
+ old_path_escaped = escape_shell_arg(old_path)
+ new_path_escaped = escape_shell_arg(new_path)
+ result = executor(f"mv {old_path_escaped} {new_path_escaped}")
+
+ if not result.success:
+ if check_error_message(result.stderr, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {old_path}")
+ raise SandboxFilesystemError(f"Failed to rename file: {result.stderr}")
+
+ def move_file(self, source_path: str, destination_path: str) -> None:
+ """
+ Move a file to a different directory synchronously.
+
+ Args:
+ source_path: Current file path
+ destination_path: Destination path
+ """
+ # Use exec since there's no direct move in SandboxClient
+ # Properly escape paths to prevent shell injection
+ executor = self._get_executor()
+ source_path_escaped = escape_shell_arg(source_path)
+ destination_path_escaped = escape_shell_arg(destination_path)
+ result = executor(f"mv {source_path_escaped} {destination_path_escaped}")
+
+ if not result.success:
+ if check_error_message(result.stderr, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {source_path}")
+ raise SandboxFilesystemError(f"Failed to move file: {result.stderr}")
+
+ def write_files(self, files: List[Dict[str, str]]) -> None:
+ """
+ Write multiple files in a single operation synchronously.
+
+ Args:
+ files: List of dictionaries, each with 'path', 'content', and optional 'encoding'.
+ """
+ for file_info in files:
+ path = file_info["path"]
+ content = file_info["content"]
+ encoding = file_info.get("encoding", "utf-8")
+ self.write_file(path, content, encoding)
+
+ def exists(self, path: str) -> bool:
+ """Check if file/directory exists synchronously"""
+ executor = self._get_executor()
+ path_escaped = escape_shell_arg(path)
+ result = executor(f"test -e {path_escaped}")
+ return result.success
+
+ def is_file(self, path: str) -> bool:
+ """Check if path is a file synchronously"""
+ executor = self._get_executor()
+ path_escaped = escape_shell_arg(path)
+ result = executor(f"test -f {path_escaped}")
+ return result.success
+
+ def is_dir(self, path: str) -> bool:
+ """Check if path is a directory synchronously"""
+ executor = self._get_executor()
+ path_escaped = escape_shell_arg(path)
+ result = executor(f"test -d {path_escaped}")
+ return result.success
+
+ def upload_file(
+ self, local_path: str, remote_path: str, encoding: str = "utf-8"
+ ) -> None:
+ """
+ Upload a local file to the sandbox synchronously.
+
+ Args:
+ local_path: Path to the local file
+ remote_path: Destination path in the sandbox
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary files.
+
+ Raises:
+ SandboxFileNotFoundError: If local file doesn't exist
+ UnicodeDecodeError: If file cannot be decoded with specified encoding
+ """
+ if not os.path.exists(local_path):
+ raise SandboxFileNotFoundError(f"Local file not found: {local_path}")
+
+ with open(local_path, "rb") as f:
+ content_bytes = f.read()
+
+ if encoding == "base64":
+ import base64
+
+ content = base64.b64encode(content_bytes).decode("ascii")
+ self.write_file(remote_path, content, encoding="base64")
+ else:
+ try:
+ content = content_bytes.decode(encoding)
+ self.write_file(remote_path, content, encoding=encoding)
+ except UnicodeDecodeError as e:
+ raise UnicodeDecodeError(
+ e.encoding,
+ e.object,
+ e.start,
+ e.end,
+ f"Cannot decode file as {encoding}. Use encoding='base64' for binary files.",
+ ) from e
+
+ def download_file(
+ self, remote_path: str, local_path: str, encoding: str = "utf-8"
+ ) -> None:
+ """
+ Download a file from the sandbox to a local path synchronously.
+
+ Args:
+ remote_path: Path to the file in the sandbox
+ local_path: Destination path on the local filesystem
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary files.
+
+ Raises:
+ SandboxFileNotFoundError: If remote file doesn't exist
+ """
+ file_info = self.read_file(remote_path, encoding=encoding)
+
+ if encoding == "base64":
+ import base64
+
+ content_bytes = base64.b64decode(file_info.content)
+ else:
+ content_bytes = file_info.content.encode(encoding)
+
+ with open(local_path, "wb") as f:
+ f.write(content_bytes)
+
+ def ls(self, path: str = ".") -> List[str]:
+ """
+ List directory contents synchronously.
+
+ Args:
+ path: Path to list
+
+ Returns:
+ List of file/directory names
+ """
+ return self.list_dir(path)
+
+ def rm(self, path: str, recursive: bool = False) -> None:
+ """
+ Remove file or directory synchronously.
+
+ Args:
+ path: Path to remove
+ recursive: Remove recursively
+ """
+ executor = self._get_executor()
+ path_escaped = escape_shell_arg(path)
+
+ if recursive:
+ result = executor(f"rm -rf {path_escaped}")
+ else:
+ result = executor(f"rm {path_escaped}")
+
+ if not result.success:
+ if check_error_message(result.stderr, "NO_SUCH_FILE"):
+ raise SandboxFileNotFoundError(f"File not found: {path}")
+ raise SandboxFilesystemError(f"Failed to remove: {result.stderr}")
+
+ def open(self, path: str, mode: str = "r") -> SandboxFileIO:
+ """
+ Open a file in the sandbox synchronously.
+
+ Args:
+ path: Path to the file
+ mode: Open mode ('r', 'w', 'a', etc.)
+
+ Returns:
+ SandboxFileIO: File handle
+ """
+ return SandboxFileIO(self, path, mode)
+
+
+class AsyncSandboxFilesystem(SandboxFilesystem):
+ """
+ Async filesystem operations for Koyeb Sandbox instances.
+ Inherits from SandboxFilesystem and provides async methods.
+ """
+
+ async def _run_sync(self, method, *args, **kwargs):
+ """
+ Helper method to run a synchronous method in an executor.
+
+ Args:
+ method: The sync method to run (from super())
+ *args: Positional arguments for the method
+ **kwargs: Keyword arguments for the method
+
+ Returns:
+ Result of the synchronous method call
+ """
+ return await run_sync_in_executor(method, *args, **kwargs)
+
+ @async_wrapper("write_file")
+ async def write_file(
+ self, path: str, content: Union[str, bytes], encoding: str = "utf-8"
+ ) -> None:
+ """
+ Write content to a file asynchronously.
+
+ Args:
+ path: Absolute path to the file
+ content: Content to write (string or bytes)
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary data.
+ """
+ pass
+
+ @async_wrapper("read_file")
+ async def read_file(self, path: str, encoding: str = "utf-8") -> FileInfo:
+ """
+ Read a file from the sandbox asynchronously.
+
+ Args:
+ path: Absolute path to the file
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary data.
+
+ Returns:
+ FileInfo: Object with content and encoding
+ """
+ pass
+
+ @async_wrapper("mkdir")
+ async def mkdir(self, path: str, recursive: bool = False) -> None:
+ """
+ Create a directory asynchronously.
+
+ Args:
+ path: Absolute path to the directory
+ recursive: Create parent directories if needed (default: False, not used - API always creates parents)
+ """
+ pass
+
+ @async_wrapper("list_dir")
+ async def list_dir(self, path: str = ".") -> List[str]:
+ """
+ List contents of a directory asynchronously.
+
+ Args:
+ path: Path to the directory (default: current directory)
+
+ Returns:
+ List[str]: Names of files and directories within the specified path.
+ """
+ pass
+
+ @async_wrapper("delete_file")
+ async def delete_file(self, path: str) -> None:
+ """
+ Delete a file asynchronously.
+
+ Args:
+ path: Absolute path to the file
+ """
+ pass
+
+ @async_wrapper("delete_dir")
+ async def delete_dir(self, path: str) -> None:
+ """
+ Delete a directory asynchronously.
+
+ Args:
+ path: Absolute path to the directory
+ """
+ pass
+
+ @async_wrapper("rename_file")
+ async def rename_file(self, old_path: str, new_path: str) -> None:
+ """
+ Rename a file asynchronously.
+
+ Args:
+ old_path: Current file path
+ new_path: New file path
+ """
+ pass
+
+ @async_wrapper("move_file")
+ async def move_file(self, source_path: str, destination_path: str) -> None:
+ """
+ Move a file to a different directory asynchronously.
+
+ Args:
+ source_path: Current file path
+ destination_path: Destination path
+ """
+ pass
+
+ async def write_files(self, files: List[Dict[str, str]]) -> None:
+ """
+ Write multiple files in a single operation asynchronously.
+
+ Args:
+ files: List of dictionaries, each with 'path', 'content', and optional 'encoding'.
+ """
+ for file_info in files:
+ path = file_info["path"]
+ content = file_info["content"]
+ encoding = file_info.get("encoding", "utf-8")
+ await self.write_file(path, content, encoding)
+
+ @async_wrapper("exists")
+ async def exists(self, path: str) -> bool:
+ """Check if file/directory exists asynchronously"""
+ pass
+
+ @async_wrapper("is_file")
+ async def is_file(self, path: str) -> bool:
+ """Check if path is a file asynchronously"""
+ pass
+
+ @async_wrapper("is_dir")
+ async def is_dir(self, path: str) -> bool:
+ """Check if path is a directory asynchronously"""
+ pass
+
+ @async_wrapper("upload_file")
+ async def upload_file(
+ self, local_path: str, remote_path: str, encoding: str = "utf-8"
+ ) -> None:
+ """
+ Upload a local file to the sandbox asynchronously.
+
+ Args:
+ local_path: Path to the local file
+ remote_path: Destination path in the sandbox
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary files.
+ """
+ pass
+
+ @async_wrapper("download_file")
+ async def download_file(
+ self, remote_path: str, local_path: str, encoding: str = "utf-8"
+ ) -> None:
+ """
+ Download a file from the sandbox to a local path asynchronously.
+
+ Args:
+ remote_path: Path to the file in the sandbox
+ local_path: Destination path on the local filesystem
+ encoding: File encoding (default: "utf-8"). Use "base64" for binary files.
+ """
+ pass
+
+ async def ls(self, path: str = ".") -> List[str]:
+ """
+ List directory contents asynchronously.
+
+ Args:
+ path: Path to list
+
+ Returns:
+ List of file/directory names
+ """
+ return await self.list_dir(path)
+
+ @async_wrapper("rm")
+ async def rm(self, path: str, recursive: bool = False) -> None:
+ """
+ Remove file or directory asynchronously.
+
+ Args:
+ path: Path to remove
+ recursive: Remove recursively
+ """
+ pass
+
+ def open(self, path: str, mode: str = "r") -> AsyncSandboxFileIO:
+ """
+ Open a file in the sandbox asynchronously.
+
+ Args:
+ path: Path to the file
+ mode: Open mode ('r', 'w', 'a', etc.)
+
+ Returns:
+ AsyncSandboxFileIO: Async file handle
+ """
+ return AsyncSandboxFileIO(self, path, mode)
+
+
+class SandboxFileIO:
+ """Synchronous file I/O handle for sandbox files"""
+
+ def __init__(self, filesystem: SandboxFilesystem, path: str, mode: str):
+ self.filesystem = filesystem
+ self.path = path
+ self.mode = mode
+ self._closed = False
+
+ def read(self) -> str:
+ """Read file content synchronously"""
+ if "r" not in self.mode:
+ raise ValueError("File not opened for reading")
+
+ if self._closed:
+ raise ValueError("File is closed")
+
+ file_info = self.filesystem.read_file(self.path)
+ return file_info.content
+
+ def write(self, content: str) -> None:
+ """Write content to file synchronously"""
+ if "w" not in self.mode and "a" not in self.mode:
+ raise ValueError("File not opened for writing")
+
+ if self._closed:
+ raise ValueError("File is closed")
+
+ if "a" in self.mode:
+ try:
+ existing = self.filesystem.read_file(self.path)
+ content = existing.content + content
+ except SandboxFileNotFoundError:
+ pass
+
+ self.filesystem.write_file(self.path, content)
+
+ def close(self) -> None:
+ """Close the file"""
+ self._closed = True
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+
+class AsyncSandboxFileIO:
+ """Async file I/O handle for sandbox files"""
+
+ def __init__(self, filesystem: AsyncSandboxFilesystem, path: str, mode: str):
+ self.filesystem = filesystem
+ self.path = path
+ self.mode = mode
+ self._closed = False
+
+ async def read(self) -> str:
+ """Read file content asynchronously"""
+ if "r" not in self.mode:
+ raise ValueError("File not opened for reading")
+
+ if self._closed:
+ raise ValueError("File is closed")
+
+ file_info = await self.filesystem.read_file(self.path)
+ return file_info.content
+
+ async def write(self, content: str) -> None:
+ """Write content to file asynchronously"""
+ if "w" not in self.mode and "a" not in self.mode:
+ raise ValueError("File not opened for writing")
+
+ if self._closed:
+ raise ValueError("File is closed")
+
+ if "a" in self.mode:
+ try:
+ existing = await self.filesystem.read_file(self.path)
+ content = existing.content + content
+ except SandboxFileNotFoundError:
+ pass
+
+ await self.filesystem.write_file(self.path, content)
+
+ def close(self) -> None:
+ """Close the file"""
+ self._closed = True
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ self.close()
diff --git a/koyeb/sandbox/sandbox.py b/koyeb/sandbox/sandbox.py
new file mode 100644
index 00000000..8701fc1a
--- /dev/null
+++ b/koyeb/sandbox/sandbox.py
@@ -0,0 +1,1075 @@
+# coding: utf-8
+
+"""
+Koyeb Sandbox - Python SDK for creating and managing Koyeb sandboxes
+"""
+
+from __future__ import annotations
+
+import asyncio
+import os
+import secrets
+import time
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Dict, List, Optional
+
+from koyeb.api.api.deployments_api import DeploymentsApi
+from koyeb.api.exceptions import ApiException, NotFoundException
+from koyeb.api.models.create_app import CreateApp
+from koyeb.api.models.create_service import CreateService
+
+from .utils import (
+ DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ DEFAULT_POLL_INTERVAL,
+ IdleTimeout,
+ SandboxError,
+ SandboxTimeoutError,
+ _is_light_sleep_enabled,
+ async_wrapper,
+ build_env_vars,
+ create_deployment_definition,
+ create_docker_source,
+ create_koyeb_sandbox_routes,
+ create_sandbox_client,
+ get_api_client,
+ logger,
+ run_sync_in_executor,
+ validate_port,
+)
+
+if TYPE_CHECKING:
+ from .exec import AsyncSandboxExecutor, SandboxExecutor
+ from .executor_client import SandboxClient
+ from .filesystem import AsyncSandboxFilesystem, SandboxFilesystem
+
+
+@dataclass
+class ProcessInfo:
+ """Type definition for process information returned by list_processes."""
+
+ id: str # Process ID (UUID string)
+ command: str # The command that was executed
+ status: str # Process status (e.g., "running", "completed")
+ pid: Optional[int] = None # OS process ID (if running)
+ exit_code: Optional[int] = None # Exit code (if completed)
+ started_at: Optional[str] = None # ISO 8601 timestamp when process started
+ completed_at: Optional[str] = (
+ None # ISO 8601 timestamp when process completed (if applicable)
+ )
+
+
+@dataclass
+class ExposedPort:
+ """Result of exposing a port via TCP proxy."""
+
+ port: int
+ exposed_at: str
+
+ def __str__(self) -> str:
+ return f"ExposedPort(port={self.port}, exposed_at='{self.exposed_at}')"
+
+
+class Sandbox:
+ """
+ Synchronous sandbox for running code on Koyeb infrastructure.
+ Provides creation and deletion functionality with proper health polling.
+ """
+
+ def __init__(
+ self,
+ sandbox_id: str,
+ app_id: str,
+ service_id: str,
+ name: Optional[str] = None,
+ api_token: Optional[str] = None,
+ sandbox_secret: Optional[str] = None,
+ ):
+ self.sandbox_id = sandbox_id
+ self.app_id = app_id
+ self.service_id = service_id
+ self.name = name
+ self.api_token = api_token
+ self.sandbox_secret = sandbox_secret
+ self._created_at = time.time()
+ self._sandbox_url = None
+ self._client = None
+
+ @property
+ def id(self) -> str:
+ """Get the service ID of the sandbox."""
+ return self.service_id
+
+ @classmethod
+ def create(
+ cls,
+ image: str = "koyeb/sandbox",
+ name: str = "quick-sandbox",
+ wait_ready: bool = True,
+ instance_type: str = "micro",
+ exposed_port_protocol: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ region: Optional[str] = None,
+ api_token: Optional[str] = None,
+ timeout: int = 300,
+ idle_timeout: Optional[IdleTimeout] = None,
+ enable_tcp_proxy: bool = False,
+ privileged: bool = False,
+ ) -> Sandbox:
+ """
+ Create a new sandbox instance.
+
+ Args:
+ image: Docker image to use (default: koyeb/sandbox)
+ name: Name of the sandbox
+ wait_ready: Wait for sandbox to be ready (default: True)
+ instance_type: Instance type (default: nano)
+ exposed_port_protocol: Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+ env: Environment variables
+ region: Region to deploy to (default: "na")
+ api_token: Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+ timeout: Timeout for sandbox creation in seconds
+ idle_timeout: Idle timeout configuration for scale-to-zero
+ - None: Auto-enable (light_sleep=300s, deep_sleep=600s)
+ - 0: Disable scale-to-zero (keep always-on)
+ - int > 0: Deep sleep only (e.g., 600 for 600s deep sleep)
+ - dict: Explicit configuration with {"light_sleep": 300, "deep_sleep": 600}
+ enable_tcp_proxy: If True, enables TCP proxy for direct TCP access to port 3031
+ privileged: If True, run the container in privileged mode (default: False)
+
+ Returns:
+ Sandbox: A new Sandbox instance
+
+ Raises:
+ ValueError: If API token is not provided
+ SandboxTimeoutError: If wait_ready is True and sandbox does not become ready within timeout
+ """
+ if api_token is None:
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ raise ValueError(
+ "API token is required. Set KOYEB_API_TOKEN environment variable or pass api_token parameter"
+ )
+
+ sandbox = cls._create_sync(
+ name=name,
+ image=image,
+ instance_type=instance_type,
+ exposed_port_protocol=exposed_port_protocol,
+ env=env,
+ region=region,
+ api_token=api_token,
+ timeout=timeout,
+ idle_timeout=idle_timeout,
+ enable_tcp_proxy=enable_tcp_proxy,
+ privileged=privileged,
+ )
+
+ if wait_ready:
+ is_ready = sandbox.wait_ready(timeout=timeout)
+ if not is_ready:
+ raise SandboxTimeoutError(
+ f"Sandbox '{sandbox.name}' did not become ready within {timeout} seconds. "
+ f"The sandbox was created but may not be ready yet. "
+ f"You can check its status with sandbox.is_healthy() or call sandbox.wait_ready() again."
+ )
+
+ return sandbox
+
+ @classmethod
+ def _create_sync(
+ cls,
+ name: str,
+ image: str = "koyeb/sandbox",
+ instance_type: str = "nano",
+ exposed_port_protocol: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ region: Optional[str] = None,
+ api_token: Optional[str] = None,
+ timeout: int = 300,
+ idle_timeout: Optional[IdleTimeout] = None,
+ enable_tcp_proxy: bool = False,
+ privileged: bool = False,
+ ) -> Sandbox:
+ """
+ Synchronous creation method that returns creation parameters.
+ Subclasses can override to return their own type.
+ """
+ apps_api, services_api, _, catalog_instances_api = get_api_client(api_token)
+
+ # Always create routes (ports are always exposed, default to "http")
+ routes = create_koyeb_sandbox_routes()
+
+ # Generate secure sandbox secret
+ sandbox_secret = secrets.token_urlsafe(32)
+
+ # Add SANDBOX_SECRET to environment variables
+ if env is None:
+ env = {}
+ env["SANDBOX_SECRET"] = sandbox_secret
+
+ # Check if light sleep is enabled for this instance type
+ light_sleep_enabled = _is_light_sleep_enabled(
+ instance_type, catalog_instances_api
+ )
+
+ app_name = f"sandbox-app-{name}-{int(time.time())}"
+ app_response = apps_api.create_app(app=CreateApp(name=app_name))
+ app_id = app_response.app.id
+
+ env_vars = build_env_vars(env)
+ docker_source = create_docker_source(image, [], privileged=privileged)
+ deployment_definition = create_deployment_definition(
+ name=name,
+ docker_source=docker_source,
+ env_vars=env_vars,
+ instance_type=instance_type,
+ exposed_port_protocol=exposed_port_protocol,
+ region=region,
+ routes=routes,
+ idle_timeout=idle_timeout,
+ light_sleep_enabled=light_sleep_enabled,
+ enable_tcp_proxy=enable_tcp_proxy,
+ )
+
+ create_service = CreateService(app_id=app_id, definition=deployment_definition)
+ service_response = services_api.create_service(service=create_service)
+ service_id = service_response.service.id
+
+ return cls(
+ sandbox_id=name,
+ app_id=app_id,
+ service_id=service_id,
+ name=name,
+ api_token=api_token,
+ sandbox_secret=sandbox_secret,
+ )
+
+ @classmethod
+ def get_from_id(
+ cls,
+ id: str,
+ api_token: Optional[str] = None,
+ ) -> "Sandbox":
+ """
+ Get a sandbox by service ID.
+
+ Args:
+ id: Service ID of the sandbox
+ api_token: Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+
+ Returns:
+ Sandbox: The Sandbox instance
+
+ Raises:
+ ValueError: If API token is not provided or id is invalid
+ SandboxError: If sandbox is not found or retrieval fails
+ """
+ if api_token is None:
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ raise ValueError(
+ "API token is required. Set KOYEB_API_TOKEN environment variable or pass api_token parameter"
+ )
+
+ if not id:
+ raise ValueError("id is required")
+
+ _, services_api, _, _ = get_api_client(api_token)
+ deployments_api = DeploymentsApi(services_api.api_client)
+
+ # Get service by ID
+ try:
+ service_response = services_api.get_service(id=id)
+ service = service_response.service
+ except NotFoundException as e:
+ raise SandboxError(f"Sandbox not found with id: {id}") from e
+ except ApiException as e:
+ raise SandboxError(f"Failed to retrieve sandbox with id: {id}: {e}") from e
+
+ if service is None:
+ raise SandboxError(f"Sandbox not found with id: {id}")
+
+ sandbox_name = service.name
+
+ # Get deployment to extract sandbox_secret from env vars
+ deployment_id = service.active_deployment_id or service.latest_deployment_id
+ sandbox_secret = None
+
+ if deployment_id:
+ try:
+ deployment_response = deployments_api.get_deployment(id=deployment_id)
+ if (
+ deployment_response.deployment
+ and deployment_response.deployment.definition
+ and deployment_response.deployment.definition.env
+ ):
+ # Find SANDBOX_SECRET in env vars
+ for env_var in deployment_response.deployment.definition.env:
+ if env_var.key == "SANDBOX_SECRET":
+ sandbox_secret = env_var.value
+ break
+ except Exception as e:
+ logger.debug(f"Could not get deployment {deployment_id}: {e}")
+
+ return cls(
+ sandbox_id=service.id,
+ app_id=service.app_id,
+ service_id=service.id,
+ name=sandbox_name,
+ api_token=api_token,
+ sandbox_secret=sandbox_secret,
+ )
+
+ def wait_ready(
+ self,
+ timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL,
+ ) -> bool:
+ """
+ Wait for sandbox to become ready with proper polling.
+
+ Args:
+ timeout: Maximum time to wait in seconds
+ poll_interval: Time between health checks in seconds
+
+ Returns:
+ bool: True if sandbox became ready, False if timeout
+ """
+ start_time = time.time()
+ sandbox_url = None
+
+ while time.time() - start_time < timeout:
+ # Get sandbox URL on first iteration or if not yet retrieved
+ if sandbox_url is None:
+ sandbox_url = self._get_sandbox_url()
+ # If URL is not available yet, wait and retry
+ if sandbox_url is None:
+ time.sleep(poll_interval)
+ continue
+
+ is_healthy = self.is_healthy()
+
+ if is_healthy:
+ return True
+
+ time.sleep(poll_interval)
+
+ return False
+
+ def wait_tcp_proxy_ready(
+ self,
+ timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL,
+ ) -> bool:
+ """
+ Wait for TCP proxy to become ready and available.
+
+ Polls the deployment metadata until the TCP proxy information is available.
+ This is useful when enable_tcp_proxy=True was set during sandbox creation,
+ as the proxy information may not be immediately available.
+
+ Args:
+ timeout: Maximum time to wait in seconds
+ poll_interval: Time between checks in seconds
+
+ Returns:
+ bool: True if TCP proxy became ready, False if timeout
+ """
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ tcp_proxy_info = self.get_tcp_proxy_info()
+ if tcp_proxy_info is not None:
+ return True
+
+ time.sleep(poll_interval)
+
+ return False
+
+ def delete(self) -> None:
+ """Delete the sandbox instance."""
+ apps_api, services_api, _, _ = get_api_client(self.api_token)
+ services_api.delete_service(self.service_id)
+ apps_api.delete_app(self.app_id)
+
+ def get_domain(self) -> Optional[str]:
+ """
+ Get the public domain of the sandbox.
+
+ Returns the domain name (e.g., "app-name-org.koyeb.app") without protocol or path.
+ To construct the URL, use: f"https://{sandbox.get_domain()}"
+
+ Returns:
+ Optional[str]: The domain name or None if unavailable
+ """
+ try:
+ from koyeb.api.exceptions import ApiException, NotFoundException
+
+ from .utils import get_api_client
+
+ _, services_api, _, _ = get_api_client(self.api_token)
+ service_response = services_api.get_service(self.service_id)
+ service = service_response.service
+
+ if service.app_id:
+ apps_api, _, _, _ = get_api_client(self.api_token)
+ app_response = apps_api.get_app(service.app_id)
+ app = app_response.app
+ if hasattr(app, "domains") and app.domains:
+ # Use the first public domain
+ return app.domains[0].name
+ return None
+ except (NotFoundException, ApiException, Exception):
+ return None
+
+ def get_tcp_proxy_info(self) -> Optional[tuple[str, int]]:
+ """
+ Get the TCP proxy host and port for the sandbox.
+
+ Returns the TCP proxy host and port as a tuple (host, port) for direct TCP access to port 3031.
+ This is only available if enable_tcp_proxy=True was set when creating the sandbox.
+
+ Returns:
+ Optional[tuple[str, int]]: A tuple of (host, port) or None if unavailable
+ """
+ try:
+ from koyeb.api.exceptions import ApiException, NotFoundException
+
+ from .utils import get_api_client
+
+ _, services_api, _, _ = get_api_client(self.api_token)
+ service_response = services_api.get_service(self.service_id)
+ service = service_response.service
+
+ if not service.active_deployment_id:
+ return None
+
+ # Get the active deployment
+ deployments_api = DeploymentsApi()
+ deployments_api.api_client = services_api.api_client
+ deployment_response = deployments_api.get_deployment(
+ service.active_deployment_id
+ )
+ deployment = deployment_response.deployment
+
+ if not deployment.metadata or not deployment.metadata.proxy_ports:
+ return None
+
+ # Find the proxy port for port 3031
+ for proxy_port in deployment.metadata.proxy_ports:
+ if (
+ proxy_port.port == 3031
+ and proxy_port.host
+ and proxy_port.public_port
+ ):
+ return (proxy_port.host, proxy_port.public_port)
+
+ return None
+ except (NotFoundException, ApiException, Exception):
+ return None
+
+ def _get_sandbox_url(self) -> Optional[str]:
+ """
+ Internal method to get the sandbox URL for health checks and client initialization.
+ Caches the URL after first retrieval.
+
+ Returns:
+ Optional[str]: The sandbox URL or None if unavailable
+ """
+ if self._sandbox_url is None:
+ domain = self.get_domain()
+ if domain:
+ self._sandbox_url = f"https://{domain}/koyeb-sandbox"
+ return self._sandbox_url
+
+ def _get_client(self) -> "SandboxClient": # type: ignore[name-defined]
+ """
+ Get or create SandboxClient instance with validation.
+
+ Returns:
+ SandboxClient: Configured client instance
+
+ Raises:
+ SandboxError: If sandbox URL or secret is not available
+ """
+ if self._client is None:
+ sandbox_url = self._get_sandbox_url()
+ self._client = create_sandbox_client(sandbox_url, self.sandbox_secret)
+ return self._client
+
+ def _check_response_error(self, response: Dict, operation: str) -> None:
+ """
+ Check if a response indicates an error and raise SandboxError if so.
+
+ Args:
+ response: The response dictionary to check
+ operation: Description of the operation (e.g., "expose port 8080")
+
+ Raises:
+ SandboxError: If response indicates failure
+ """
+ if not response.get("success", False):
+ error_msg = response.get("error", "Unknown error")
+ raise SandboxError(f"Failed to {operation}: {error_msg}")
+
+ def is_healthy(self) -> bool:
+ """Check if sandbox is healthy and ready for operations"""
+ sandbox_url = self._get_sandbox_url()
+ if not sandbox_url or not self.sandbox_secret:
+ return False
+
+ # Check executor health directly - this is what matters for operations
+ # If executor is healthy, the sandbox is usable (will wake up service if needed)
+ try:
+ from .executor_client import SandboxClient
+
+ client = SandboxClient(sandbox_url, self.sandbox_secret)
+ health_response = client.health()
+ if isinstance(health_response, dict):
+ status = health_response.get("status", "").lower()
+ return status in ["ok", "healthy", "ready"]
+ return True # If we got a response, consider it healthy
+ except Exception:
+ return False
+
+ @property
+ def filesystem(self) -> "SandboxFilesystem":
+ """Get filesystem operations interface"""
+ from .filesystem import SandboxFilesystem
+
+ return SandboxFilesystem(self)
+
+ @property
+ def exec(self) -> "SandboxExecutor":
+ """Get command execution interface"""
+ from .exec import SandboxExecutor
+
+ return SandboxExecutor(self)
+
+ def expose_port(self, port: int) -> ExposedPort:
+ """
+ Expose a port to external connections via TCP proxy.
+
+ Binds the specified internal port to the TCP proxy, allowing external
+ connections to reach services running on that port inside the sandbox.
+ Automatically unbinds any existing port before binding the new one.
+
+ Args:
+ port: The internal port number to expose (must be a valid port number between 1 and 65535)
+
+ Returns:
+ ExposedPort: An object with `port` and `exposed_at` attributes:
+ - port: The exposed port number
+ - exposed_at: The full URL with https:// protocol (e.g., "https://app-name-org.koyeb.app")
+
+ Raises:
+ ValueError: If port is not in valid range [1, 65535]
+ SandboxError: If the port binding operation fails
+
+ Notes:
+ - Only one port can be exposed at a time
+ - Any existing port binding is automatically unbound before binding the new port
+ - The port must be available and accessible within the sandbox environment
+ - The TCP proxy is accessed via get_tcp_proxy_info() which returns (host, port)
+
+ Example:
+ >>> result = sandbox.expose_port(8080)
+ >>> result.port
+ 8080
+ >>> result.exposed_at
+ 'https://app-name-org.koyeb.app'
+ """
+ validate_port(port)
+ client = self._get_client()
+ try:
+ # Always unbind any existing port first
+ try:
+ client.unbind_port()
+ except Exception as e:
+ # Ignore errors when unbinding - it's okay if no port was bound
+ logger.debug(f"Error unbinding existing port (this is okay): {e}")
+ pass
+
+ # Now bind the new port
+ response = client.bind_port(port)
+ self._check_response_error(response, f"expose port {port}")
+
+ # Get domain for exposed_at
+ domain = self.get_domain()
+ if not domain:
+ raise SandboxError("Domain not available for exposed port")
+
+ # Return the port from response if available, otherwise use the requested port
+ exposed_port = int(response.get("port", port))
+ exposed_at = f"https://{domain}"
+ return ExposedPort(port=exposed_port, exposed_at=exposed_at)
+ except Exception as e:
+ if isinstance(e, SandboxError):
+ raise
+ raise SandboxError(f"Failed to expose port {port}: {str(e)}") from e
+
+ def unexpose_port(self) -> None:
+ """
+ Unexpose a port from external connections.
+
+ Removes the TCP proxy port binding, stopping traffic forwarding to the
+ previously bound port.
+
+ Raises:
+ SandboxError: If the port unbinding operation fails
+
+ Notes:
+ - After unexposing, the TCP proxy will no longer forward traffic
+ - Safe to call even if no port is currently bound
+ """
+ client = self._get_client()
+ try:
+ response = client.unbind_port()
+ self._check_response_error(response, "unexpose port")
+ except Exception as e:
+ if isinstance(e, SandboxError):
+ raise
+ raise SandboxError(f"Failed to unexpose port: {str(e)}") from e
+
+ def launch_process(
+ self, cmd: str, cwd: Optional[str] = None, env: Optional[Dict[str, str]] = None
+ ) -> str:
+ """
+ Launch a background process in the sandbox.
+
+ Starts a long-running background process that continues executing even after
+ the method returns. Use this for servers, workers, or other long-running tasks.
+
+ Args:
+ cmd: The shell command to execute as a background process
+ cwd: Optional working directory for the process
+ env: Optional environment variables to set/override for the process
+
+ Returns:
+ str: The unique process ID (UUID string) that can be used to manage the process
+
+ Raises:
+ SandboxError: If the process launch fails
+
+ Example:
+ >>> process_id = sandbox.launch_process("python -u server.py")
+ >>> print(f"Started process: {process_id}")
+ """
+ client = self._get_client()
+ try:
+ response = client.start_process(cmd, cwd, env)
+ # Check for process ID - if it exists, the process was launched successfully
+ process_id = response.get("id")
+ if process_id:
+ return process_id
+ # If no ID, check for explicit error
+ error_msg = response.get("error", response.get("message", "Unknown error"))
+ raise SandboxError(f"Failed to launch process: {error_msg}")
+ except Exception as e:
+ if isinstance(e, SandboxError):
+ raise
+ raise SandboxError(f"Failed to launch process: {str(e)}") from e
+
+ def kill_process(self, process_id: str) -> None:
+ """
+ Kill a background process by its ID.
+
+ Terminates a running background process. This sends a SIGTERM signal to the process,
+ allowing it to clean up gracefully. If the process doesn't terminate within a timeout,
+ it will be forcefully killed with SIGKILL.
+
+ Args:
+ process_id: The unique process ID (UUID string) to kill
+
+ Raises:
+ SandboxError: If the process kill operation fails
+
+ Example:
+ >>> sandbox.kill_process("550e8400-e29b-41d4-a716-446655440000")
+ """
+ client = self._get_client()
+ try:
+ response = client.kill_process(process_id)
+ self._check_response_error(response, f"kill process {process_id}")
+ except Exception as e:
+ if isinstance(e, SandboxError):
+ raise
+ raise SandboxError(f"Failed to kill process {process_id}: {str(e)}") from e
+
+ def list_processes(self) -> List[ProcessInfo]:
+ """
+ List all background processes.
+
+ Returns information about all currently running and recently completed background
+ processes. This includes both active processes and processes that have completed
+ (which remain in memory until server restart).
+
+ Returns:
+ List[ProcessInfo]: List of process objects, each containing:
+ - id: Process ID (UUID string)
+ - command: The command that was executed
+ - status: Process status (e.g., "running", "completed")
+ - pid: OS process ID (if running)
+ - exit_code: Exit code (if completed)
+ - started_at: ISO 8601 timestamp when process started
+ - completed_at: ISO 8601 timestamp when process completed (if applicable)
+
+ Raises:
+ SandboxError: If listing processes fails
+
+ Example:
+ >>> processes = sandbox.list_processes()
+ >>> for process in processes:
+ ... print(f"{process.id}: {process.command} - {process.status}")
+ """
+ client = self._get_client()
+ try:
+ response = client.list_processes()
+ processes_data = response.get("processes", [])
+ return [ProcessInfo(**process) for process in processes_data]
+ except Exception as e:
+ if isinstance(e, SandboxError):
+ raise
+ raise SandboxError(f"Failed to list processes: {str(e)}") from e
+
+ def kill_all_processes(self) -> int:
+ """
+ Kill all running background processes.
+
+ Convenience method that lists all processes and kills them all. This is useful
+ for cleanup operations.
+
+ Returns:
+ int: The number of processes that were killed
+
+ Raises:
+ SandboxError: If listing or killing processes fails
+
+ Example:
+ >>> count = sandbox.kill_all_processes()
+ >>> print(f"Killed {count} processes")
+ """
+ processes = self.list_processes()
+ killed_count = 0
+ for process in processes:
+ process_id = process.id
+ status = process.status
+ # Only kill running processes
+ if process_id and status == "running":
+ try:
+ self.kill_process(process_id)
+ killed_count += 1
+ except SandboxError:
+ # Continue killing other processes even if one fails
+ pass
+ return killed_count
+
+ def __enter__(self) -> "Sandbox":
+ """Context manager entry - returns self."""
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ """Context manager exit - automatically deletes the sandbox."""
+ try:
+ # Clean up client if it exists
+ if self._client is not None:
+ self._client.close()
+ self.delete()
+ except Exception as e:
+ logger.warning(f"Error during sandbox cleanup: {e}")
+
+
+class AsyncSandbox(Sandbox):
+ """
+ Async sandbox for running code on Koyeb infrastructure.
+ Inherits from Sandbox and provides async wrappers for all operations.
+ """
+
+ async def _run_sync(self, method, *args, **kwargs):
+ """
+ Helper method to run a synchronous method in an executor.
+
+ Args:
+ method: The sync method to run (from super())
+ *args: Positional arguments for the method
+ **kwargs: Keyword arguments for the method
+
+ Returns:
+ Result of the synchronous method call
+ """
+ return await run_sync_in_executor(method, *args, **kwargs)
+
+ @classmethod
+ async def get_from_id(
+ cls,
+ id: str,
+ api_token: Optional[str] = None,
+ ) -> "AsyncSandbox":
+ """
+ Get a sandbox by service ID asynchronously.
+
+ Args:
+ id: Service ID of the sandbox
+ api_token: Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+
+ Returns:
+ AsyncSandbox: The AsyncSandbox instance
+
+ Raises:
+ ValueError: If API token is not provided or id is invalid
+ SandboxError: If sandbox is not found or retrieval fails
+ """
+ sync_sandbox = await run_sync_in_executor(
+ Sandbox.get_from_id, id=id, api_token=api_token
+ )
+
+ # Convert Sandbox instance to AsyncSandbox instance
+ async_sandbox = cls(
+ sandbox_id=sync_sandbox.sandbox_id,
+ app_id=sync_sandbox.app_id,
+ service_id=sync_sandbox.service_id,
+ name=sync_sandbox.name,
+ api_token=sync_sandbox.api_token,
+ sandbox_secret=sync_sandbox.sandbox_secret,
+ )
+ async_sandbox._created_at = sync_sandbox._created_at
+
+ return async_sandbox
+
+ @classmethod
+ async def create(
+ cls,
+ image: str = "koyeb/sandbox",
+ name: str = "quick-sandbox",
+ wait_ready: bool = True,
+ instance_type: str = "nano",
+ exposed_port_protocol: Optional[str] = None,
+ env: Optional[Dict[str, str]] = None,
+ region: Optional[str] = None,
+ api_token: Optional[str] = None,
+ timeout: int = 300,
+ idle_timeout: Optional[IdleTimeout] = None,
+ enable_tcp_proxy: bool = False,
+ privileged: bool = False,
+ ) -> AsyncSandbox:
+ """
+ Create a new sandbox instance with async support.
+
+ Args:
+ image: Docker image to use (default: koyeb/sandbox)
+ name: Name of the sandbox
+ wait_ready: Wait for sandbox to be ready (default: True)
+ instance_type: Instance type (default: nano)
+ exposed_port_protocol: Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+ env: Environment variables
+ region: Region to deploy to (default: "na")
+ api_token: Koyeb API token (if None, will try to get from KOYEB_API_TOKEN env var)
+ timeout: Timeout for sandbox creation in seconds
+ idle_timeout: Idle timeout configuration for scale-to-zero
+ - None: Auto-enable (light_sleep=300s, deep_sleep=600s)
+ - 0: Disable scale-to-zero (keep always-on)
+ - int > 0: Deep sleep only (e.g., 600 for 600s deep sleep)
+ - dict: Explicit configuration with {"light_sleep": 300, "deep_sleep": 600}
+ enable_tcp_proxy: If True, enables TCP proxy for direct TCP access to port 3031
+ privileged: If True, run the container in privileged mode (default: False)
+
+ Returns:
+ AsyncSandbox: A new AsyncSandbox instance
+
+ Raises:
+ ValueError: If API token is not provided
+ SandboxTimeoutError: If wait_ready is True and sandbox does not become ready within timeout
+ """
+ if api_token is None:
+ api_token = os.getenv("KOYEB_API_TOKEN")
+ if not api_token:
+ raise ValueError(
+ "API token is required. Set KOYEB_API_TOKEN environment variable or pass api_token parameter"
+ )
+
+ loop = asyncio.get_running_loop()
+ sync_result = await loop.run_in_executor(
+ None,
+ lambda: Sandbox._create_sync(
+ name=name,
+ image=image,
+ instance_type=instance_type,
+ exposed_port_protocol=exposed_port_protocol,
+ env=env,
+ region=region,
+ api_token=api_token,
+ timeout=timeout,
+ idle_timeout=idle_timeout,
+ enable_tcp_proxy=enable_tcp_proxy,
+ privileged=privileged,
+ ),
+ )
+
+ # Convert Sandbox instance to AsyncSandbox instance
+ sandbox = cls(
+ sandbox_id=sync_result.sandbox_id,
+ app_id=sync_result.app_id,
+ service_id=sync_result.service_id,
+ name=sync_result.name,
+ api_token=sync_result.api_token,
+ sandbox_secret=sync_result.sandbox_secret,
+ )
+ sandbox._created_at = sync_result._created_at
+
+ if wait_ready:
+ is_ready = await sandbox.wait_ready(timeout=timeout)
+ if not is_ready:
+ raise SandboxTimeoutError(
+ f"Sandbox '{sandbox.name}' did not become ready within {timeout} seconds. "
+ f"The sandbox was created but may not be ready yet. "
+ f"You can check its status with sandbox.is_healthy() or call sandbox.wait_ready() again."
+ )
+
+ return sandbox
+
+ async def wait_ready(
+ self,
+ timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL,
+ ) -> bool:
+ """
+ Wait for sandbox to become ready with proper async polling.
+
+ Args:
+ timeout: Maximum time to wait in seconds
+ poll_interval: Time between health checks in seconds
+
+ Returns:
+ bool: True if sandbox became ready, False if timeout
+ """
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ loop = asyncio.get_running_loop()
+ is_healthy = await loop.run_in_executor(None, super().is_healthy)
+
+ if is_healthy:
+ return True
+
+ await asyncio.sleep(poll_interval)
+
+ return False
+
+ async def wait_tcp_proxy_ready(
+ self,
+ timeout: int = DEFAULT_INSTANCE_WAIT_TIMEOUT,
+ poll_interval: float = DEFAULT_POLL_INTERVAL,
+ ) -> bool:
+ """
+ Wait for TCP proxy to become ready and available asynchronously.
+
+ Polls the deployment metadata until the TCP proxy information is available.
+ This is useful when enable_tcp_proxy=True was set during sandbox creation,
+ as the proxy information may not be immediately available.
+
+ Args:
+ timeout: Maximum time to wait in seconds
+ poll_interval: Time between checks in seconds
+
+ Returns:
+ bool: True if TCP proxy became ready, False if timeout
+ """
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ loop = asyncio.get_running_loop()
+ tcp_proxy_info = await loop.run_in_executor(
+ None, super().get_tcp_proxy_info
+ )
+ if tcp_proxy_info is not None:
+ return True
+
+ await asyncio.sleep(poll_interval)
+
+ return False
+
+ @async_wrapper("delete")
+ async def delete(self) -> None:
+ """Delete the sandbox instance asynchronously."""
+ pass
+
+ @async_wrapper("is_healthy")
+ async def is_healthy(self) -> bool:
+ """Check if sandbox is healthy and ready for operations asynchronously"""
+ pass
+
+ @property
+ def exec(self) -> "AsyncSandboxExecutor":
+ """Get async command execution interface"""
+ from .exec import AsyncSandboxExecutor
+
+ return AsyncSandboxExecutor(self)
+
+ @property
+ def filesystem(self) -> "AsyncSandboxFilesystem":
+ """Get filesystem operations interface"""
+ from .filesystem import AsyncSandboxFilesystem
+
+ return AsyncSandboxFilesystem(self)
+
+ @async_wrapper("expose_port")
+ async def expose_port(self, port: int) -> ExposedPort:
+ """Expose a port to external connections via TCP proxy asynchronously."""
+ pass
+
+ @async_wrapper("unexpose_port")
+ async def unexpose_port(self) -> None:
+ """Unexpose a port from external connections asynchronously."""
+ pass
+
+ @async_wrapper("launch_process")
+ async def launch_process(
+ self, cmd: str, cwd: Optional[str] = None, env: Optional[Dict[str, str]] = None
+ ) -> str:
+ """Launch a background process in the sandbox asynchronously."""
+ pass
+
+ @async_wrapper("kill_process")
+ async def kill_process(self, process_id: str) -> None:
+ """Kill a background process by its ID asynchronously."""
+ pass
+
+ @async_wrapper("list_processes")
+ async def list_processes(self) -> List[ProcessInfo]:
+ """List all background processes asynchronously."""
+ pass
+
+ async def kill_all_processes(self) -> int:
+ """Kill all running background processes asynchronously."""
+ processes = await self.list_processes()
+ killed_count = 0
+ for process in processes:
+ process_id = process.id
+ status = process.status
+ # Only kill running processes
+ if process_id and status == "running":
+ try:
+ await self.kill_process(process_id)
+ killed_count += 1
+ except SandboxError:
+ # Continue killing other processes even if one fails
+ pass
+ return killed_count
+
+ async def __aenter__(self) -> "AsyncSandbox":
+ """Async context manager entry - returns self."""
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
+ """Async context manager exit - automatically deletes the sandbox."""
+ try:
+ # Clean up client if it exists
+ if self._client is not None:
+ self._client.close()
+ await self.delete()
+ except Exception as e:
+ logger.warning(f"Error during sandbox cleanup: {e}")
diff --git a/koyeb/sandbox/utils.py b/koyeb/sandbox/utils.py
new file mode 100644
index 00000000..cf07edbe
--- /dev/null
+++ b/koyeb/sandbox/utils.py
@@ -0,0 +1,687 @@
+# coding: utf-8
+
+"""
+Utility functions for Koyeb Sandbox
+"""
+
+import asyncio
+import logging
+import os
+import shlex
+from typing import Any, Callable, Dict, List, Literal, Optional, TypedDict, Union
+
+from koyeb.api import ApiClient, Configuration
+from koyeb.api.api import AppsApi, CatalogInstancesApi, InstancesApi, ServicesApi
+from koyeb.api.exceptions import ApiException, NotFoundException
+from koyeb.api.models.deployment_definition import DeploymentDefinition
+from koyeb.api.models.deployment_definition_type import DeploymentDefinitionType
+from koyeb.api.models.deployment_env import DeploymentEnv
+from koyeb.api.models.deployment_instance_type import DeploymentInstanceType
+from koyeb.api.models.deployment_port import DeploymentPort
+from koyeb.api.models.deployment_proxy_port import DeploymentProxyPort
+from koyeb.api.models.deployment_route import DeploymentRoute
+from koyeb.api.models.deployment_scaling import DeploymentScaling
+from koyeb.api.models.deployment_scaling_target import DeploymentScalingTarget
+from koyeb.api.models.deployment_scaling_target_sleep_idle_delay import (
+ DeploymentScalingTargetSleepIdleDelay,
+)
+from koyeb.api.models.docker_source import DockerSource
+from koyeb.api.models.instance_status import InstanceStatus
+from koyeb.api.models.proxy_port_protocol import ProxyPortProtocol
+
+# Setup logging
+logger = logging.getLogger(__name__)
+
+# Constants
+MIN_PORT = 1
+MAX_PORT = 65535
+DEFAULT_INSTANCE_WAIT_TIMEOUT = 60 # seconds
+DEFAULT_POLL_INTERVAL = 2.0 # seconds
+DEFAULT_COMMAND_TIMEOUT = 30 # seconds
+DEFAULT_HTTP_TIMEOUT = 30 # seconds for HTTP requests
+
+# Error messages
+ERROR_MESSAGES = {
+ "NO_SUCH_FILE": ["No such file", "not found", "No such file or directory"],
+ "FILE_EXISTS": ["exists", "already exists"],
+ "DIR_NOT_EMPTY": ["not empty", "Directory not empty"],
+}
+
+# Type definitions for idle timeout
+IdleTimeoutSeconds = int
+
+
+class IdleTimeoutConfig(TypedDict, total=False):
+ """Configuration for idle timeout with light and deep sleep."""
+
+ light_sleep: IdleTimeoutSeconds # Optional, but if provided, deep_sleep is required
+ deep_sleep: IdleTimeoutSeconds # Required
+
+
+IdleTimeout = Union[
+ Literal[0], # Disable scale-to-zero
+ IdleTimeoutSeconds, # Deep sleep only (standard and GPU instances)
+ IdleTimeoutConfig, # Explicit light_sleep/deep_sleep configuration
+]
+
+# Valid protocols for DeploymentPort (from OpenAPI spec: http, http2, tcp)
+# For sandboxes, we only support http and http2
+VALID_DEPLOYMENT_PORT_PROTOCOLS = ("http", "http2")
+
+
+def _validate_port_protocol(protocol: str) -> str:
+ """
+ Validate port protocol using API model structure.
+
+ Args:
+ protocol: Protocol string to validate
+
+ Returns:
+ Validated protocol string
+
+ Raises:
+ ValueError: If protocol is invalid
+ """
+ # Validate by attempting to create a DeploymentPort instance
+ # This ensures we're using the API model's validation structure
+ try:
+ port = DeploymentPort(port=3030, protocol=protocol)
+ # Additional validation: check if protocol is in allowed values
+ if protocol not in VALID_DEPLOYMENT_PORT_PROTOCOLS:
+ raise ValueError(
+ f"Invalid protocol '{protocol}'. Must be one of {VALID_DEPLOYMENT_PORT_PROTOCOLS}"
+ )
+ return port.protocol or "http"
+ except Exception as e:
+ if isinstance(e, ValueError):
+ raise
+ raise ValueError(
+ f"Invalid protocol '{protocol}'. Must be one of {VALID_DEPLOYMENT_PORT_PROTOCOLS}"
+ ) from e
+
+
+def get_api_client(
+ api_token: Optional[str] = None, host: Optional[str] = None
+) -> tuple[AppsApi, ServicesApi, InstancesApi, CatalogInstancesApi]:
+ """
+ Get configured API clients for Koyeb operations.
+
+ Args:
+ api_token: Koyeb API token. If not provided, will try to get from KOYEB_API_TOKEN env var
+ host: Koyeb API host URL. If not provided, will try to get from KOYEB_API_HOST env var (defaults to https://app.koyeb.com)
+
+ Returns:
+ Tuple of (AppsApi, ServicesApi, InstancesApi, CatalogInstancesApi) instances
+
+ Raises:
+ ValueError: If API token is not provided
+ """
+ token = api_token or os.getenv("KOYEB_API_TOKEN")
+ if not token:
+ raise ValueError(
+ "API token is required. Set KOYEB_API_TOKEN environment variable or pass api_token parameter"
+ )
+
+ api_host = host or os.getenv("KOYEB_API_HOST", "https://app.koyeb.com")
+ configuration = Configuration(host=api_host)
+ configuration.api_key["Bearer"] = token
+ configuration.api_key_prefix["Bearer"] = "Bearer"
+
+ api_client = ApiClient(configuration)
+ return (
+ AppsApi(api_client),
+ ServicesApi(api_client),
+ InstancesApi(api_client),
+ CatalogInstancesApi(api_client),
+ )
+
+
+def build_env_vars(env: Optional[Dict[str, str]]) -> List[DeploymentEnv]:
+ """
+ Build environment variables list from dictionary.
+
+ Args:
+ env: Dictionary of environment variables
+
+ Returns:
+ List of DeploymentEnv objects
+ """
+ env_vars = []
+ if env:
+ for key, value in env.items():
+ env_vars.append(DeploymentEnv(key=key, value=value))
+ return env_vars
+
+
+def create_docker_source(
+ image: str, command_args: List[str], privileged: Optional[bool] = None
+) -> DockerSource:
+ """
+ Create Docker source configuration.
+
+ Args:
+ image: Docker image name
+ command_args: Command and arguments to run (optional, empty list means use image default)
+ privileged: If True, run the container in privileged mode (default: None/False)
+
+ Returns:
+ DockerSource object
+ """
+ return DockerSource(
+ image=image,
+ command=command_args[0] if command_args else None,
+ args=list(command_args[1:]) if len(command_args) > 1 else None,
+ privileged=privileged,
+ )
+
+
+def create_koyeb_sandbox_ports(protocol: str = "http") -> List[DeploymentPort]:
+ """
+ Create port configuration for koyeb/sandbox image.
+
+ Creates two ports:
+ - Port 3030 exposed on HTTP, mounted on /koyeb-sandbox/
+ - Port 3031 exposed with the specified protocol, mounted on /
+
+ Args:
+ protocol: Protocol to use for port 3031 ("http" or "http2"), defaults to "http"
+
+ Returns:
+ List of DeploymentPort objects configured for koyeb/sandbox
+ """
+ return [
+ DeploymentPort(
+ port=3030,
+ protocol="http",
+ ),
+ DeploymentPort(
+ port=3031,
+ protocol=protocol,
+ ),
+ ]
+
+
+def create_koyeb_sandbox_proxy_ports() -> List[DeploymentProxyPort]:
+ """
+ Create TCP proxy port configuration for koyeb/sandbox image.
+
+ Creates proxy port for direct TCP access:
+ - Port 3031 exposed via TCP proxy
+
+ Returns:
+ List of DeploymentProxyPort objects configured for TCP proxy access
+ """
+ return [
+ DeploymentProxyPort(
+ port=3031,
+ protocol=ProxyPortProtocol.TCP,
+ ),
+ ]
+
+
+def create_koyeb_sandbox_routes() -> List[DeploymentRoute]:
+ """
+ Create route configuration for koyeb/sandbox image to make it publicly accessible.
+
+ Creates two routes:
+ - Port 3030 accessible at /koyeb-sandbox/
+ - Port 3031 accessible at /
+
+ Returns:
+ List of DeploymentRoute objects configured for koyeb/sandbox
+ """
+ return [
+ DeploymentRoute(port=3030, path="/koyeb-sandbox/"),
+ DeploymentRoute(port=3031, path="/"),
+ ]
+
+
+def _validate_idle_timeout(idle_timeout: Optional[IdleTimeout]) -> None:
+ """
+ Validate idle_timeout parameter according to spec.
+
+ Raises:
+ ValueError: If validation fails
+ """
+ if idle_timeout is None:
+ return
+
+ if isinstance(idle_timeout, int):
+ if idle_timeout < 0:
+ raise ValueError("idle_timeout must be >= 0")
+ if idle_timeout > 0:
+ # Deep sleep only - valid
+ return
+ # idle_timeout == 0 means disable scale-to-zero - valid
+ return
+
+ if isinstance(idle_timeout, dict):
+ if "deep_sleep" not in idle_timeout:
+ raise ValueError(
+ "idle_timeout dict must contain 'deep_sleep' key (at minimum)"
+ )
+
+ deep_sleep = idle_timeout.get("deep_sleep")
+ if deep_sleep is None or not isinstance(deep_sleep, int) or deep_sleep <= 0:
+ raise ValueError("deep_sleep must be a positive integer")
+
+ if "light_sleep" in idle_timeout:
+ light_sleep = idle_timeout.get("light_sleep")
+ if (
+ light_sleep is None
+ or not isinstance(light_sleep, int)
+ or light_sleep <= 0
+ ):
+ raise ValueError("light_sleep must be a positive integer")
+
+ if deep_sleep < light_sleep:
+ raise ValueError(
+ "deep_sleep must be >= light_sleep when both are provided"
+ )
+
+
+def _is_light_sleep_enabled(
+ instance_type: str,
+ catalog_instances_api: Optional[CatalogInstancesApi] = None,
+) -> bool:
+ """
+ Check if light sleep is enabled for the instance type using API or fallback.
+
+ Args:
+ instance_type: Instance type string
+ catalog_instances_api: Optional CatalogInstancesApi client (if None, will try to create one)
+
+ Returns:
+ True if light sleep is enabled, False otherwise (defaults to True if API call fails)
+ """
+ try:
+ if catalog_instances_api is None:
+ _, _, _, catalog_instances_api = get_api_client(None)
+ response = catalog_instances_api.get_catalog_instance(id=instance_type)
+ if response and response.instance:
+ return response.instance.light_sleep_enabled or False
+ except (ApiException, NotFoundException):
+ # If API call fails, default to True (assume light sleep is enabled)
+ pass
+ except Exception:
+ # Any other error, default to True (assume light sleep is enabled)
+ pass
+ # Default to True if we can't determine from API
+ return True
+
+
+def _process_idle_timeout(
+ idle_timeout: Optional[IdleTimeout],
+ light_sleep_enabled: bool = True,
+) -> Optional[DeploymentScalingTargetSleepIdleDelay]:
+ """
+ Process idle_timeout parameter and convert to DeploymentScalingTargetSleepIdleDelay.
+
+ According to spec:
+ - If unsupported instance type: idle_timeout is silently ignored (returns None)
+ - None (default): Auto-enable light_sleep=300s, deep_sleep=600s
+ - 0: Explicitly disable scale-to-zero (returns None)
+ - int > 0: Deep sleep only
+ - dict: Explicit configuration
+ - If light_sleep_enabled is False for the instance type, light_sleep is ignored
+
+ Args:
+ idle_timeout: Idle timeout configuration
+ light_sleep_enabled: Whether light sleep is enabled for the instance type (default: True)
+
+ Returns:
+ DeploymentScalingTargetSleepIdleDelay or None if disabled/ignored
+ """
+ # Validate the parameter
+ _validate_idle_timeout(idle_timeout)
+
+ # Process according to spec
+ if idle_timeout is None:
+ # Default: Auto-enable light_sleep=300s, deep_sleep=600s
+ # If light sleep is not enabled, only use deep_sleep
+ if not light_sleep_enabled:
+ return DeploymentScalingTargetSleepIdleDelay(
+ deep_sleep_value=600,
+ )
+ return DeploymentScalingTargetSleepIdleDelay(
+ light_sleep_value=300,
+ deep_sleep_value=600,
+ )
+
+ if isinstance(idle_timeout, int):
+ if idle_timeout == 0:
+ # Explicitly disable scale-to-zero
+ return None
+ # Deep sleep only
+ return DeploymentScalingTargetSleepIdleDelay(
+ deep_sleep_value=idle_timeout,
+ )
+
+ if isinstance(idle_timeout, dict):
+ deep_sleep = idle_timeout.get("deep_sleep")
+ light_sleep = idle_timeout.get("light_sleep")
+
+ # If light sleep is not enabled, ignore light_sleep if provided
+ if not light_sleep_enabled:
+ return DeploymentScalingTargetSleepIdleDelay(
+ deep_sleep_value=deep_sleep,
+ )
+
+ if light_sleep is not None:
+ # Both light_sleep and deep_sleep provided
+ return DeploymentScalingTargetSleepIdleDelay(
+ light_sleep_value=light_sleep,
+ deep_sleep_value=deep_sleep,
+ )
+ else:
+ # Deep sleep only
+ return DeploymentScalingTargetSleepIdleDelay(
+ deep_sleep_value=deep_sleep,
+ )
+
+
+def create_deployment_definition(
+ name: str,
+ docker_source: DockerSource,
+ env_vars: List[DeploymentEnv],
+ instance_type: str,
+ exposed_port_protocol: Optional[str] = None,
+ region: Optional[str] = None,
+ routes: Optional[List[DeploymentRoute]] = None,
+ idle_timeout: Optional[IdleTimeout] = None,
+ light_sleep_enabled: bool = True,
+ enable_tcp_proxy: bool = False,
+) -> DeploymentDefinition:
+ """
+ Create deployment definition for a sandbox service.
+
+ Args:
+ name: Service name
+ docker_source: Docker configuration
+ env_vars: Environment variables
+ instance_type: Instance type
+ exposed_port_protocol: Protocol to expose ports with ("http" or "http2").
+ If None, defaults to "http".
+ If provided, must be one of "http" or "http2".
+ region: Region to deploy to (defaults to "na")
+ routes: List of routes for public access
+ idle_timeout: Idle timeout configuration (see IdleTimeout type)
+ light_sleep_enabled: Whether light sleep is enabled for the instance type (default: True)
+ enable_tcp_proxy: If True, enables TCP proxy for direct TCP access to port 3031
+
+ Returns:
+ DeploymentDefinition object
+ """
+ if region is None:
+ region = "na"
+
+ # Convert single region string to list for API
+ regions_list = [region]
+
+ # Always create ports with protocol (default to "http" if not specified)
+ protocol = exposed_port_protocol if exposed_port_protocol is not None else "http"
+ # Validate protocol using API model structure
+ protocol = _validate_port_protocol(protocol)
+ ports = create_koyeb_sandbox_ports(protocol)
+
+ # Create TCP proxy ports if enabled
+ proxy_ports = None
+ if enable_tcp_proxy:
+ proxy_ports = create_koyeb_sandbox_proxy_ports()
+
+ # Always use SANDBOX type
+ deployment_type = DeploymentDefinitionType.SANDBOX
+
+ # Process idle_timeout
+ sleep_idle_delay = _process_idle_timeout(idle_timeout, light_sleep_enabled)
+
+ # Create scaling configuration
+ # If idle_timeout is 0, explicitly disable scale-to-zero (min=1, always-on)
+ # Otherwise (None, int > 0, or dict), enable scale-to-zero (min=0)
+ min_scale = 1 if idle_timeout == 0 else 0
+ targets = None
+ if sleep_idle_delay is not None:
+ scaling_target = DeploymentScalingTarget(sleep_idle_delay=sleep_idle_delay)
+ targets = [scaling_target]
+
+ scalings = [DeploymentScaling(min=min_scale, max=1, targets=targets)]
+
+ return DeploymentDefinition(
+ name=name,
+ type=deployment_type,
+ docker=docker_source,
+ env=env_vars,
+ ports=ports,
+ proxy_ports=proxy_ports,
+ routes=routes,
+ instance_types=[DeploymentInstanceType(type=instance_type)],
+ scalings=scalings,
+ regions=regions_list,
+ )
+
+
+def get_sandbox_status(
+ instance_id: str, api_token: Optional[str] = None
+) -> InstanceStatus:
+ """Get the current status of a sandbox instance."""
+ try:
+ _, _, instances_api, _ = get_api_client(api_token)
+ instance_response = instances_api.get_instance(instance_id)
+ return instance_response.instance.status
+ except (NotFoundException, ApiException) as e:
+ logger.debug(f"Failed to get sandbox status: {e}")
+ return InstanceStatus.ERROR
+ except Exception as e:
+ logger.warning(f"Unexpected error getting sandbox status: {e}")
+ return InstanceStatus.ERROR
+
+
+def is_sandbox_healthy(
+ instance_id: str,
+ sandbox_url: str,
+ sandbox_secret: str,
+ api_token: Optional[str] = None,
+) -> bool:
+ """
+ Check if sandbox is healthy and ready for operations.
+
+ This function requires both sandbox_url and sandbox_secret to properly check:
+ 1. The Koyeb instance status (via API) - using instance_id and api_token
+ 2. The sandbox executor health endpoint (via SandboxClient) - using sandbox_url and sandbox_secret
+
+ Args:
+ instance_id: The Koyeb instance ID
+ api_token: Koyeb API token
+ sandbox_url: URL of the sandbox executor API (required)
+ sandbox_secret: Secret for sandbox executor authentication (required)
+
+ Returns:
+ bool: True if sandbox is healthy, False otherwise
+
+ Raises:
+ ValueError: If sandbox_url or sandbox_secret are not provided
+ """
+ if not sandbox_url:
+ raise ValueError("sandbox_url is required for health check")
+ if not sandbox_secret:
+ raise ValueError("sandbox_secret is required for health check")
+
+ # Check Koyeb instance status
+ instance_healthy = (
+ get_sandbox_status(instance_id, api_token) == InstanceStatus.HEALTHY
+ )
+
+ # If instance is not healthy, no need to check executor
+ if not instance_healthy:
+ return False
+
+ # Check executor health
+ try:
+ from .executor_client import SandboxClient
+
+ client = SandboxClient(sandbox_url, sandbox_secret)
+ health_response = client.health()
+ # Check if health response indicates the server is healthy
+ # The exact response format may vary, but typically has a "status" field
+ if isinstance(health_response, dict):
+ status = health_response.get("status", "").lower()
+ is_healthy = status in ["ok", "healthy", "ready"]
+ if not is_healthy:
+ logger.debug(f"Sandbox executor health check returned status: {status}")
+ return is_healthy
+ return True # If we got a response, consider it healthy
+ except Exception as e:
+ # If we can't reach the executor API, consider it unhealthy
+ logger.debug(f"Sandbox executor health check failed: {e}")
+ return False
+
+
+def escape_shell_arg(arg: str) -> str:
+ """
+ Escape a shell argument for safe use in shell commands.
+
+ Args:
+ arg: The argument to escape
+
+ Returns:
+ Properly escaped shell argument
+ """
+ return shlex.quote(arg)
+
+
+def validate_port(port: int) -> None:
+ """
+ Validate that a port number is in the valid range.
+
+ Args:
+ port: Port number to validate
+
+ Raises:
+ ValueError: If port is not in valid range [1, 65535]
+ """
+ if not isinstance(port, int) or port < MIN_PORT or port > MAX_PORT:
+ raise ValueError(
+ f"Port must be an integer between {MIN_PORT} and {MAX_PORT}, got {port}"
+ )
+
+
+def check_error_message(error_msg: str, error_type: str) -> bool:
+ """
+ Check if an error message matches a specific error type.
+ Uses case-insensitive matching against known error patterns.
+
+ Args:
+ error_msg: The error message to check
+ error_type: The type of error to check for (key in ERROR_MESSAGES)
+
+ Returns:
+ True if error message matches the error type
+ """
+ if error_type not in ERROR_MESSAGES:
+ return False
+
+ error_msg_lower = error_msg.lower()
+ patterns = ERROR_MESSAGES[error_type]
+ return any(pattern.lower() in error_msg_lower for pattern in patterns)
+
+
+async def run_sync_in_executor(
+ method: Callable[..., Any], *args: Any, **kwargs: Any
+) -> Any:
+ """
+ Run a synchronous method in an async executor.
+
+ Helper function to wrap synchronous methods for async execution.
+ Used by AsyncSandbox and AsyncSandboxFilesystem to wrap sync parent methods.
+
+ Args:
+ method: The synchronous method to run
+ *args: Positional arguments for the method
+ **kwargs: Keyword arguments for the method
+
+ Returns:
+ Result of the synchronous method call
+ """
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(None, lambda: method(*args, **kwargs))
+
+
+def async_wrapper(method_name: str):
+ """
+ Decorator to automatically create async wrapper for sync methods.
+
+ This decorator creates an async method that wraps a sync method from the parent class.
+ The sync method is called via super() and executed in an executor.
+
+ Args:
+ method_name: Name of the sync method to wrap (from parent class)
+
+ Usage:
+ @async_wrapper("delete")
+ async def delete(self) -> None:
+ \"\"\"Delete the sandbox instance asynchronously.\"\"\"
+ pass # Implementation is handled by decorator
+ """
+
+ def decorator(func):
+ async def wrapper(self, *args, **kwargs):
+ # Get the parent class from MRO (Method Resolution Order)
+ # __mro__[0] is the current class, __mro__[1] is the parent
+ parent_class = self.__class__.__mro__[1]
+ # Get the unbound method from parent class
+ sync_method = getattr(parent_class, method_name)
+ # Bind it to self (equivalent to super().method_name)
+ bound_method = sync_method.__get__(self, parent_class)
+ return await self._run_sync(bound_method, *args, **kwargs)
+
+ # Preserve function metadata
+ wrapper.__name__ = func.__name__
+ wrapper.__qualname__ = func.__qualname__
+ wrapper.__doc__ = func.__doc__ or f"{method_name} (async version)"
+ wrapper.__annotations__ = func.__annotations__
+ return wrapper
+
+ return decorator
+
+
+def create_sandbox_client(
+ sandbox_url: Optional[str],
+ sandbox_secret: Optional[str],
+ existing_client: Optional[Any] = None,
+) -> Any:
+ """
+ Create or return existing SandboxClient instance with validation.
+
+ Helper function to create SandboxClient instances with consistent validation.
+ Used by Sandbox, SandboxExecutor, and SandboxFilesystem to avoid duplication.
+
+ Args:
+ sandbox_url: The sandbox URL (from _get_sandbox_url() or sandbox._get_sandbox_url())
+ sandbox_secret: The sandbox secret
+ existing_client: Existing client instance to return if not None
+
+ Returns:
+ SandboxClient: Configured client instance
+
+ Raises:
+ SandboxError: If sandbox URL or secret is not available
+ """
+ if existing_client is not None:
+ return existing_client
+
+ if not sandbox_url:
+ raise SandboxError("Unable to get sandbox URL")
+ if not sandbox_secret:
+ raise SandboxError("Sandbox secret not available")
+
+ from .executor_client import SandboxClient
+
+ return SandboxClient(sandbox_url, sandbox_secret)
+
+
+class SandboxError(Exception):
+ """Base exception for sandbox operations"""
+
+
+class SandboxTimeoutError(SandboxError):
+ """Raised when a sandbox operation times out"""
diff --git a/pyproject.toml b/pyproject.toml
index 128bd60c..b523edd0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,6 +13,8 @@ dependencies = [
"python-dateutil (>=2.8.2)",
"pydantic (>=2)",
"typing-extensions (>=4.7.1)",
+ "websockets>=15.0.1",
+ "requests>=2.32.5",
]
[project.urls]
@@ -39,9 +41,11 @@ build-backend = "setuptools.build_meta"
[dependency-groups]
dev = [
+ "autopep8>=2.3.2",
"flake8>=7.3.0",
"pydoc-markdown>=4.8.2",
"pytest>=8.4.2",
+ "tqdm>=4.67.1",
]
[tool.pylint.'MESSAGES CONTROL']
diff --git a/scripts/generate_docs.sh b/scripts/generate_docs.sh
index 93f91719..edf1a23b 100755
--- a/scripts/generate_docs.sh
+++ b/scripts/generate_docs.sh
@@ -1 +1,2 @@
uv run pydoc-markdown -p koyeb/api >docs/api.md
+uv run pydoc-markdown -p koyeb/sandbox >docs/sandbox.md
diff --git a/spec/openapi.json b/spec/openapi.json
index a20d2265..1f0f43c4 100644
--- a/spec/openapi.json
+++ b/spec/openapi.json
@@ -3900,7 +3900,8 @@
"INVALID_TYPE",
"WEB",
"WORKER",
- "DATABASE"
+ "DATABASE",
+ "SANDBOX"
]
},
"collectionFormat": "multi"
@@ -6285,45 +6286,74 @@
"parameters": [
{
"name": "type",
+ "description": "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "app_id",
+ "description": "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "service_id",
+ "description": "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "deployment_id",
+ "description": "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
+ "in": "query",
+ "required": false,
+ "type": "string"
+ },
+ {
+ "name": "regional_deployment_id",
+ "description": "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "instance_id",
+ "description": "Deprecated, prefer using instance_ids instead.",
"in": "query",
"required": false,
"type": "string"
},
+ {
+ "name": "instance_ids",
+ "description": "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
+ "in": "query",
+ "required": false,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
+ },
{
"name": "stream",
+ "description": "Deprecated, prefer using streams instead.",
"in": "query",
"required": false,
"type": "string"
},
{
- "name": "regional_deployment_id",
+ "name": "streams",
+ "description": "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).",
"in": "query",
"required": false,
- "type": "string"
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
},
{
"name": "start",
@@ -6369,6 +6399,17 @@
"in": "query",
"required": false,
"type": "string"
+ },
+ {
+ "name": "regions",
+ "description": "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).",
+ "in": "query",
+ "required": false,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
}
],
"tags": [
@@ -6442,48 +6483,78 @@
"parameters": [
{
"name": "type",
+ "description": "Type of logs to retrieve, either \"build\" or \"runtime\". Defaults to \"runtime\".",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "app_id",
+ "description": "(Optional) Filter on the provided app_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "service_id",
+ "description": "(Optional) Filter on the provided service_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "deployment_id",
+ "description": "(Optional) Filter on the provided deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "regional_deployment_id",
+ "description": "(Optional) Filter on the provided regional_deployment_id. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "instance_id",
+ "description": "Deprecated, prefer using instance_ids instead.",
"in": "query",
"required": false,
"type": "string"
},
+ {
+ "name": "instance_ids",
+ "description": "(Optional) Filter on the provided instance_ids. At least one of app_id, service_id, deployment_id, regional_deployment_id or instance_ids must be set.",
+ "in": "query",
+ "required": false,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
+ },
{
"name": "stream",
+ "description": "Deprecated, prefer using streams instead.",
"in": "query",
"required": false,
"type": "string"
},
+ {
+ "name": "streams",
+ "description": "(Optional) Filter on stream: either \"stdout\", \"stderr\" or \"koyeb\" (for system logs).",
+ "in": "query",
+ "required": false,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
+ },
{
"name": "start",
+ "description": "(Optional) Defaults to 24 hours ago.",
"in": "query",
"required": false,
"type": "string",
@@ -6491,6 +6562,7 @@
},
{
"name": "limit",
+ "description": "(Optional) Defaults to 1000. Maximum of 1000.",
"in": "query",
"required": false,
"type": "string",
@@ -6509,6 +6581,17 @@
"in": "query",
"required": false,
"type": "string"
+ },
+ {
+ "name": "regions",
+ "description": "(Optional) Filter on the provided regions (e.g. [\"fra\", \"was\"]).",
+ "in": "query",
+ "required": false,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
}
],
"tags": [
@@ -13385,7 +13468,8 @@
"enum": [
"INVALID",
"WEB",
- "WORKER"
+ "WORKER",
+ "SANDBOX"
],
"default": "INVALID"
},
@@ -13766,7 +13850,8 @@
"INVALID",
"WEB",
"WORKER",
- "DATABASE"
+ "DATABASE",
+ "SANDBOX"
],
"default": "INVALID"
},
@@ -14157,7 +14242,8 @@
"INVALID_TYPE",
"WEB",
"WORKER",
- "DATABASE"
+ "DATABASE",
+ "SANDBOX"
],
"default": "INVALID_TYPE"
},
@@ -18906,6 +18992,9 @@
"external_id": {
"type": "string"
},
+ "provisioning": {
+ "type": "boolean"
+ },
"address1": {
"type": "string"
},
diff --git a/uv.lock b/uv.lock
index 9fab16bb..2b1c4dff 100644
--- a/uv.lock
+++ b/uv.lock
@@ -15,6 +15,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
]
+[[package]]
+name = "autopep8"
+version = "2.3.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pycodestyle" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210, upload-time = "2025-01-14T14:46:18.454Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807, upload-time = "2025-01-14T14:46:15.466Z" },
+]
+
[[package]]
name = "black"
version = "23.12.1"
@@ -363,35 +376,43 @@ wheels = [
[[package]]
name = "koyeb-sdk"
-version = "1.0.2"
+version = "1.0.3"
source = { editable = "." }
dependencies = [
{ name = "pydantic" },
{ name = "python-dateutil" },
+ { name = "requests" },
{ name = "typing-extensions" },
{ name = "urllib3" },
+ { name = "websockets" },
]
[package.dev-dependencies]
dev = [
+ { name = "autopep8" },
{ name = "flake8" },
{ name = "pydoc-markdown" },
{ name = "pytest" },
+ { name = "tqdm" },
]
[package.metadata]
requires-dist = [
{ name = "pydantic", specifier = ">=2" },
{ name = "python-dateutil", specifier = ">=2.8.2" },
+ { name = "requests", specifier = ">=2.32.5" },
{ name = "typing-extensions", specifier = ">=4.7.1" },
{ name = "urllib3", specifier = ">=2.1.0,<3.0.0" },
+ { name = "websockets", specifier = ">=15.0.1" },
]
[package.metadata.requires-dev]
dev = [
+ { name = "autopep8", specifier = ">=2.3.2" },
{ name = "flake8", specifier = ">=7.3.0" },
{ name = "pydoc-markdown", specifier = ">=4.8.2" },
{ name = "pytest", specifier = ">=8.4.2" },
+ { name = "tqdm", specifier = ">=4.67.1" },
]
[[package]]
@@ -980,6 +1001,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90", size = 6675, upload-time = "2025-01-15T12:07:22.074Z" },
]
+[[package]]
+name = "tqdm"
+version = "4.67.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
+]
+
[[package]]
name = "typeapi"
version = "2.3.0"
@@ -1059,6 +1092,82 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
]
+[[package]]
+name = "websockets"
+version = "15.0.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" },
+ { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" },
+ { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" },
+ { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" },
+ { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" },
+ { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" },
+ { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" },
+ { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" },
+ { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" },
+ { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" },
+ { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" },
+ { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" },
+ { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" },
+ { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" },
+ { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" },
+ { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" },
+ { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" },
+ { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" },
+ { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" },
+ { url = "https://files.pythonhosted.org/packages/36/db/3fff0bcbe339a6fa6a3b9e3fbc2bfb321ec2f4cd233692272c5a8d6cf801/websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5", size = 175424, upload-time = "2025-03-05T20:02:56.505Z" },
+ { url = "https://files.pythonhosted.org/packages/46/e6/519054c2f477def4165b0ec060ad664ed174e140b0d1cbb9fafa4a54f6db/websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a", size = 173077, upload-time = "2025-03-05T20:02:58.37Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/21/c0712e382df64c93a0d16449ecbf87b647163485ca1cc3f6cbadb36d2b03/websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b", size = 173324, upload-time = "2025-03-05T20:02:59.773Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/cb/51ba82e59b3a664df54beed8ad95517c1b4dc1a913730e7a7db778f21291/websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770", size = 182094, upload-time = "2025-03-05T20:03:01.827Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/0f/bf3788c03fec679bcdaef787518dbe60d12fe5615a544a6d4cf82f045193/websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb", size = 181094, upload-time = "2025-03-05T20:03:03.123Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/da/9fb8c21edbc719b66763a571afbaf206cb6d3736d28255a46fc2fe20f902/websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054", size = 181397, upload-time = "2025-03-05T20:03:04.443Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/65/65f379525a2719e91d9d90c38fe8b8bc62bd3c702ac651b7278609b696c4/websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee", size = 181794, upload-time = "2025-03-05T20:03:06.708Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/26/31ac2d08f8e9304d81a1a7ed2851c0300f636019a57cbaa91342015c72cc/websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed", size = 181194, upload-time = "2025-03-05T20:03:08.844Z" },
+ { url = "https://files.pythonhosted.org/packages/98/72/1090de20d6c91994cd4b357c3f75a4f25ee231b63e03adea89671cc12a3f/websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880", size = 181164, upload-time = "2025-03-05T20:03:10.242Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/37/098f2e1c103ae8ed79b0e77f08d83b0ec0b241cf4b7f2f10edd0126472e1/websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411", size = 176381, upload-time = "2025-03-05T20:03:12.77Z" },
+ { url = "https://files.pythonhosted.org/packages/75/8b/a32978a3ab42cebb2ebdd5b05df0696a09f4d436ce69def11893afa301f0/websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4", size = 176841, upload-time = "2025-03-05T20:03:14.367Z" },
+ { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" },
+ { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" },
+ { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/48/4b67623bac4d79beb3a6bb27b803ba75c1bdedc06bd827e465803690a4b2/websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940", size = 173106, upload-time = "2025-03-05T20:03:29.404Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/f0/adb07514a49fe5728192764e04295be78859e4a537ab8fcc518a3dbb3281/websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e", size = 173339, upload-time = "2025-03-05T20:03:30.755Z" },
+ { url = "https://files.pythonhosted.org/packages/87/28/bd23c6344b18fb43df40d0700f6d3fffcd7cef14a6995b4f976978b52e62/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9", size = 174597, upload-time = "2025-03-05T20:03:32.247Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/79/ca288495863d0f23a60f546f0905ae8f3ed467ad87f8b6aceb65f4c013e4/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b", size = 174205, upload-time = "2025-03-05T20:03:33.731Z" },
+ { url = "https://files.pythonhosted.org/packages/04/e4/120ff3180b0872b1fe6637f6f995bcb009fb5c87d597c1fc21456f50c848/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f", size = 174150, upload-time = "2025-03-05T20:03:35.757Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/c3/30e2f9c539b8da8b1d76f64012f3b19253271a63413b2d3adb94b143407f/websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123", size = 176877, upload-time = "2025-03-05T20:03:37.199Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
+]
+
[[package]]
name = "wrapt"
version = "2.0.0"