diff --git a/.claude/agents/sdk-sync-checker.md b/.claude/agents/sdk-sync-checker.md new file mode 100644 index 00000000..41dc8fcb --- /dev/null +++ b/.claude/agents/sdk-sync-checker.md @@ -0,0 +1,76 @@ +--- +name: protobuf-sdk-validator +description: Validates that SDK implementations are synchronized with Protocol Buffer schema definitions. Use when protobuf files change, SDKs need verification, or you suspect schema drift. +tools: Bash, Glob, Grep, Read, TodoWrite +model: sonnet +color: yellow +--- + +You validate SDK implementations against protobuf schemas to ensure synchronization. + +## Process + +### 1. Discovery +- Find all `.proto` files in `guest-agent/rpc/proto/` +- Identify SDK implementations in `sdk/` (python, go, rust, js, curl docs) +- Extract services, RPCs, and message types from proto files + +### 2. Extract Schema +For each message type, extract: +- Field names, types, and numbers +- Required/optional/repeated modifiers +- Nested types and enums +- Service method signatures + +### 3. Compare SDKs +For each SDK, verify message/response types contain: +- All proto fields (accounting for naming conventions) +- Correct type mappings (bytes→hex string, string→string, repeated→array) +- Proper optionality markers + +### 4. Report + +```markdown +# SDK Sync Report + +## Summary +Status: ✅/❌ | Protos: X | SDKs: Y | Issues: Z + +## Findings + +### [SDK Name] (path/to/file.ext) +| Proto Message | Status | Missing Fields | +|---------------|--------|----------------| +| MessageName | ❌ | field1, field2 | + +Details: +- ❌ MessageName.field1: missing (proto line X, expected in SDK) +- ❌ MessageName.field2: missing (proto line Y, expected in SDK) + +## Action Items +1. [SDK]: Add field X to MessageY (file.ext:lineN) +2. [SDK]: Fix type mismatch for field Z +``` + +## Type Mappings +- `bytes` → hex `string` (Python/Go/JS/Rust), `string` JSON (cURL docs) +- `string` → `string` (all) +- `repeated X` → array/list/vec (language-specific) +- `int32/uint32` → number/int types + +## Naming Conventions +- Python: `snake_case` +- Go: `PascalCase` (exported fields) +- Rust: `snake_case` +- JavaScript: `camelCase` +- cURL docs: `snake_case` (JSON wire format) + +## Locations +- Protos: `guest-agent/rpc/proto/*.proto` +- Python: `sdk/python/src/dstack_sdk/dstack_client.py` +- Go: `sdk/go/dstack/client.go` +- Rust: `sdk/rust/types/src/dstack.rs` +- JS: `sdk/js/src/index.ts` +- Docs: `sdk/curl/api.md`, `sdk/curl/api-tappd.md` + +Focus on API surface differences. Provide specific file paths and line numbers. diff --git a/.github/workflows/gateway-release.yml b/.github/workflows/gateway-release.yml index 493a7a41..e983b89a 100644 --- a/.github/workflows/gateway-release.yml +++ b/.github/workflows/gateway-release.yml @@ -51,7 +51,7 @@ jobs: with: context: gateway/dstack-app/builder push: true - tags: ${{ vars.DOCKERHUB_USERNAME }}/gateway:${{ env.VERSION }} + tags: ${{ vars.DOCKERHUB_ORG }}/dstack-gateway:${{ env.VERSION }} platforms: linux/amd64 provenance: false build-args: | @@ -61,7 +61,7 @@ jobs: - name: Generate artifact attestation uses: actions/attest-build-provenance@v1 with: - subject-name: "docker.io/${{ vars.DOCKERHUB_USERNAME }}/gateway" + subject-name: "docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-gateway" subject-digest: ${{ steps.build-and-push.outputs.digest }} push-to-registry: true @@ -72,7 +72,7 @@ jobs: body: | ## Docker Image Information - **Image**: `docker.io/${{ vars.DOCKERHUB_USERNAME }}/gateway:${{ env.VERSION }}` + **Image**: `docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-gateway:${{ env.VERSION }}` **Digest (SHA256)**: `${{ steps.build-and-push.outputs.digest }}` diff --git a/.github/workflows/kms-release.yml b/.github/workflows/kms-release.yml index f3f45e4f..b5384372 100644 --- a/.github/workflows/kms-release.yml +++ b/.github/workflows/kms-release.yml @@ -54,7 +54,7 @@ jobs: with: context: kms/dstack-app/builder push: true - tags: ${{ vars.DOCKERHUB_USERNAME }}/kms:${{ env.VERSION }} + tags: ${{ vars.DOCKERHUB_ORG }}/dstack-kms:${{ env.VERSION }} platforms: linux/amd64 provenance: false build-args: | @@ -65,7 +65,7 @@ jobs: - name: Generate artifact attestation uses: actions/attest-build-provenance@v1 with: - subject-name: "docker.io/${{ vars.DOCKERHUB_USERNAME }}/kms" + subject-name: "docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-kms" subject-digest: ${{ steps.build-and-push.outputs.digest }} push-to-registry: true @@ -92,7 +92,7 @@ jobs: body: | ## Docker Image Information - **Image**: `docker.io/${{ vars.DOCKERHUB_USERNAME }}/kms:${{ env.VERSION }}` + **Image**: `docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-kms:${{ env.VERSION }}` **Digest (SHA256)**: `${{ steps.build-and-push.outputs.digest }}` diff --git a/.github/workflows/rust-sdk-release.yml b/.github/workflows/rust-sdk-release.yml new file mode 100644 index 00000000..9e85c6b2 --- /dev/null +++ b/.github/workflows/rust-sdk-release.yml @@ -0,0 +1,25 @@ +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +name: Publish SDK to crates.io +on: + push: + tags: ['rust-sdk-v*'] +jobs: + publish: + runs-on: ubuntu-latest + environment: sdk-release + permissions: + id-token: write + steps: + - uses: actions/checkout@v5 + - uses: rust-lang/crates-io-auth-action@v1 + id: auth + - run: cargo publish -p dstack-sdk-types + env: + CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }} + - run: cargo publish -p dstack-sdk + env: + CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }} + diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 93ee1828..006c7808 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -25,7 +25,7 @@ jobs: components: clippy, rustfmt - name: Run Clippy - run: cargo clippy -- -D warnings --allow unused_variables + run: cargo clippy -- -D warnings -D clippy::expect_used -D clippy::unwrap_used --allow unused_variables - name: Cargo fmt check run: cargo fmt --check --all diff --git a/.github/workflows/verifier-release.yml b/.github/workflows/verifier-release.yml new file mode 100644 index 00000000..a7a4d28d --- /dev/null +++ b/.github/workflows/verifier-release.yml @@ -0,0 +1,80 @@ +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +name: Verifier Release + +on: + workflow_dispatch: + push: + tags: + - 'verifier-v*' +permissions: + attestations: write + id-token: write + contents: write + packages: write + +jobs: + build-and-release: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Parse version from tag + run: | + VERSION=${GITHUB_REF#refs/tags/verifier-v} + echo "VERSION=$VERSION" >> $GITHUB_ENV + echo "Parsed version: $VERSION" + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Get Git commit timestamps + run: | + echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + echo "GIT_REV=$(git rev-parse HEAD)" >> $GITHUB_ENV + + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v5 + env: + SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} + with: + context: verifier + file: verifier/builder/Dockerfile + push: true + tags: ${{ vars.DOCKERHUB_ORG }}/dstack-verifier:${{ env.VERSION }} + platforms: linux/amd64 + provenance: false + build-args: | + DSTACK_REV=${{ env.GIT_REV }} + DSTACK_SRC_URL=${{ github.server_url }}/${{ github.repository }}.git + SOURCE_DATE_EPOCH=${{ env.TIMESTAMP }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@v1 + with: + subject-name: "docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-verifier" + subject-digest: ${{ steps.build-and-push.outputs.digest }} + push-to-registry: true + + - name: GitHub Release + uses: softprops/action-gh-release@v1 + with: + name: "Verifier Release v${{ env.VERSION }}" + body: | + ## Docker Image Information + + **Image**: `docker.io/${{ vars.DOCKERHUB_ORG }}/dstack-verifier:${{ env.VERSION }}` + + **Digest (SHA256)**: `${{ steps.build-and-push.outputs.digest }}` + + **Verification**: [Verify on Sigstore](https://search.sigstore.dev/?hash=${{ steps.build-and-push.outputs.digest }}) diff --git a/.gitignore b/.gitignore index 8d90929a..c8b7bba0 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ node_modules/ /.cargo .venv /tmp -.claude +.claude/settings.local.json +__pycache__ diff --git a/CHANGELOG.md b/CHANGELOG.md index ef79457d..c88d0365 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,39 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.5] - 2025-10-20 + +### Added +- SDK sync agent for automated protobuf schema synchronization (#366) +- dstack-verifier CLI tool with OS image hash verification (#341) +- built-in swap configuration support for CVMs (#348, #357, #358) +- support for ext4 filesystem type on storage (#348) +- size-parser crate for handling size configurations (#355) +- init_script support in app-compose.json (#337) +- cache for verifier (#341) +- Add QEMU version and image name in VmConfig (#340) +- documentation for minimum version of each compose field (#363) + +### Changed +- max app compose size increased to 256K (#349) +- default timeout increased to 3 secs for python SDK (#339) +- auto reconnect when WireGuard gets stuck (#350) +- put filesystem type in RTMR3 event log (#348) +- read QEMU path from /etc/dstack/client.conf (#332) +- refactor sys-config generation code (#351) +- when formatting app_url, skip port if it's 443 (#326) +- update docker organization references (#342, #343) +- RA-TLS: add KeyCertSign and CrlSign usages for CA cert (#320) + +### Fixed +- guest-agent: request demo cert lazily +- VmConfig decode error (#347) +- potential panic due to int overflow in dstack-mr (#345) +- SDK issues - marked rootfs_hash optional (#339) + +### Removed +- docker_config field from app-compose.json (#374) + ## [0.5.4] - 2025-09-01 ### Security @@ -1237,7 +1270,8 @@ New contributors in this release: * @Leechael made their first contribution * @nanometerzhu made their first contribution * @h4x3rotab made their first contribution -[unreleased]: https://github.com/Dstack-TEE/dstack/compare/v0.5.3..HEAD +[unreleased]: https://github.com/Dstack-TEE/dstack/compare/v0.5.5..HEAD +[0.5.5]: https://github.com/Dstack-TEE/dstack/compare/v0.5.4..v0.5.5 [0.5.4]: https://github.com/Dstack-TEE/dstack/compare/v0.5.3..v0.5.4 [0.5.3]: https://github.com/Dstack-TEE/dstack/compare/v0.5.2..v0.5.3 [0.5.2]: https://github.com/Dstack-TEE/dstack/compare/v0.5.1..v0.5.2 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..502564d0 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,226 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +dstack is a developer-friendly, security-first framwwork for deploying containerized applications into Intel TDX (Trust Domain Extensions) Trusted Execution Environments (TEEs). The system provides end-to-end security through hardware-rooted attestation, automated key management, and zero-trust networking. + +## Architecture + +dstack consists of several core components that interact to provide TEE-based container deployment: + +### Core Components + +- **`dstack-vmm`** (`vmm/`): Virtual Machine Manager that runs on bare-metal TDX hosts. Orchestrates CVM lifecycle, manages QEMU processes, allocates resources, parses docker-compose files, and provides a web UI (port 9080) for deployment. + +- **`dstack-kms`** (`kms/`): Key Management System that handles cryptographic key provisioning after TDX quote verification. Derives keys deterministically per application identity and enforces authorization policies defined in smart contracts on Ethereum. + +- **`dstack-gateway`** (`gateway/`): Reverse proxy providing zero-trust network access. Handles TLS termination, automated ACME certificate provisioning, and traffic routing via ingress mapping rules. + +- **`dstack-guest-agent`** (`guest-agent/`): Runs inside each CVM to provide runtime services including Docker Compose lifecycle management, TDX quote generation, key provisioning from KMS, and log aggregation. Exposes API via Unix socket at `/var/run/dstack.sock`. + +### Communication Protocols + +- **RA-TLS**: Remote Attestation TLS used for all inter-CVM communication, embedding TDX quotes in X.509 certificates for mutual authentication +- **`prpc`**: Protocol Buffers-based RPC framework used across all service APIs +- **`vsock`**: Host-guest communication channel for metadata and configuration +- **Unix Domain Sockets**: Used for local management (e.g., `vmm.sock`) + +### Additional Components + +- **`certbot`** (`certbot/`): Automated ACME DNS-01 certificate management +- **`ct_monitor`** (`ct_monitor/`): Certificate Transparency log monitoring +- **`verifier`** (`verifier/`): TDX quote verification service using `dcap-qvl` +- **`supervisor`** (`supervisor/`): Process supervision inside CVMs +- **SDKs** (`sdk/`): Client SDKs in Rust, Python, Go, and JavaScript for interacting with guest-agent APIs + +## Build Commands + +### Rust Components + +```bash +# Build all components +cargo build --release + +# Build specific components +cargo build --release -p dstack-vmm +cargo build --release -p dstack-kms +cargo build --release -p dstack-gateway +cargo build --release -p dstack-guest-agent + +# Check code +cargo check --all-features + +# Format code +cargo fmt --all + +# Lint with Clippy +cargo clippy -- -D warnings --allow unused_variables +``` + +### Ethereum Smart Contracts (KMS Auth) + +```bash +cd kms/auth-eth +npm install +npm run build # Compile TypeScript +npm test # Run tests +npm run test:coverage # Run tests with coverage + +# Hardhat commands +npx hardhat compile +npx hardhat test +npx hardhat node # Start local node +``` + +### Python SDK + +```bash +cd sdk/python +make install # Install dependencies +make test # Run tests +``` + +## Test Commands + +### Running All Tests + +```bash +# Run all Rust tests (requires simulator) +./run-tests.sh +``` + +This script: +1. Builds the SDK simulator (`sdk/simulator/`) +2. Starts the simulator in background +3. Sets `DSTACK_SIMULATOR_ENDPOINT` and `TAPPD_SIMULATOR_ENDPOINT` +4. Runs `cargo test --all-features -- --show-output` + +### Running Specific Tests + +```bash +# Run tests for a specific package +cargo test -p dstack-kms --all-features + +# Run a specific test +cargo test --all-features test_name + +# Run tests with output +cargo test --all-features -- --show-output --test-threads=1 +``` + +### Foundry Tests (Ethereum Contracts) + +```bash +cd kms/auth-eth + +# Run all Foundry tests +forge test + +# Run with verbosity +forge test -vv + +# Run specific test contract +forge test --match-contract UpgradesWithPluginTest -vv + +# Clean build artifacts +forge clean +``` + +## Code Style Guidelines + +### Logging and Error Messages + +- **Never capitalize** the first letter of log messages and error messages +- Example: `log::info!("starting server on port {}", port);` +- Example: `anyhow::bail!("failed to connect to server");` + +This rule is enforced in `.cursorrules`. + +## Key Security Concepts + +### Attestation Flow + +1. **Quote Generation**: Applications request TDX quotes via `getQuote()` with reportData (up to 64 bytes) +2. **Quote Verification**: `dstack-verifier` validates quotes using `dcap-qvl`, verifies OS image hash, and replays RTMRs from event logs +3. **RTMR Replay**: Compute Runtime Measurement Register values by applying SHA384 hashing to event log entries + +### Key Management + +- **Deterministic Keys**: `getKey(path, purpose)` derives secp256k1 keys using HKDF, with signature chains proving TEE origin +- **TLS Keys**: `getTlsKey()` generates fresh X.509 certificates with optional RA-TLS support +- **Environment Encryption**: Client-side encryption using X25519 ECDH + AES-256-GCM, decrypted only in TEE + +### Smart Contract Integration + +- **DstackKms**: Main KMS contract managing OS image whitelist and app registration +- **DstackApp**: Per-app authorization contract controlling device IDs and compose hash whitelist +- Deployed on Ethereum-compatible networks (Phala Network) + +## Development Workflow + +### Local Development Setup + +1. Build meta-dstack artifacts (see README.md section "Build and Run") +2. Download or build guest OS image +3. Run components in separate terminals: + - KMS: `./dstack-kms -c kms.toml` + - Gateway: `sudo ./dstack-gateway -c gateway.toml` + - VMM: `./dstack-vmm -c vmm.toml` + +### Deploying Apps + +- Via Web UI: `http://localhost:9080` (or configured port) +- Via CLI: `./vmm-cli.py` (see `docs/vmm-cli-user-guide.md`) +- Requires: + 1. On-chain app registration (`npx hardhat kms:create-app`) + 2. Adding compose hash to whitelist (`npx hardhat app:add-hash`) + 3. Deploying via VMM with App ID + +### Accessing Deployed Apps + +Ingress mapping pattern: `[-[][s|g]].` +- Default: TLS termination to TCP +- `s` suffix: TLS passthrough +- `g` suffix: HTTP/2 with TLS termination (gRPC) + +## Important Files + +- `Cargo.toml`: Workspace configuration with all Rust crates +- `vmm.toml`: VMM configuration (CID pool, port mapping, KMS/gateway URLs) +- `kms.toml`: KMS configuration (contract addresses, RPC endpoints) +- `gateway.toml`: Gateway configuration (domain, certificates, WireGuard) +- `docker-compose.yaml`: App deployment format (normalized to `.app-compose.json`) + +## Common Tasks + +### Adding a New Rust Crate + +1. Create crate directory and `Cargo.toml` +2. Add to workspace members in root `Cargo.toml` +3. Add workspace dependency if it will be used by other crates + +### Modifying RPC APIs + +RPC definitions use `prpc` framework with Protocol Buffers: +- Define `.proto` files in `*/rpc/proto/` +- Use `prpc-build` in `build.rs` to generate Rust code +- Implement service traits in main crate + +### Working with TDX Quotes + +- Low-level bindings: `tdx-attest-sys/` (FFI to libtdx-attest) +- High-level API: `tdx-attest/` +- Verification: `verifier/` using `dcap-qvl` +- Event log parsing: `cc-eventlog/` + +## Documentation + +- Main README: `README.md` +- Deployment guide: `docs/deployment.md` +- VMM CLI guide: `docs/vmm-cli-user-guide.md` +- Security guide: `docs/security-guide/security-guide.md` +- Design decisions: `docs/design-and-hardening-decisions.md` + +When need more detailed info, try to use deepwiki mcp. diff --git a/Cargo.lock b/Cargo.lock index b1c8f5da..348a9493 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,9 +93,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b064bd1cea105e70557a258cd2b317731896753ec08edf51da2d1fced587b05" +checksum = "36f63701831729cb154cf0b6945256af46c426074646c98b9d123148ba1d8bde" dependencies = [ "alloy-core", "alloy-signer", @@ -104,15 +104,16 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c3f3bc4f2a6b725970cd354e78e9738ea1e8961a91898f57bf6317970b1915" +checksum = "64a3bd0305a44fb457cae77de1e82856eadd42ea3cdf0dae29df32eb3b592979" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-trie", + "alloy-tx-macros", "auto_impl", "c-kzg", "derive_more 2.0.1", @@ -128,9 +129,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda014fb5591b8d8d24cab30f52690117d238e52254c6fb40658e91ea2ccd6c3" +checksum = "7a842b4023f571835e62ac39fb8d523d19fcdbacfa70bf796ff96e7e19586f50" dependencies = [ "alloy-consensus", "alloy-eips", @@ -187,9 +188,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7b2f7010581f29bcace81776cf2f0e022008d05a7d326884763f16f3044620" +checksum = "5cd749c57f38f8cbf433e651179fc5a676255e6b95044f467d49255d2b81725a" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -202,7 +203,9 @@ dependencies = [ "derive_more 2.0.1", "either", "serde", + "serde_with", "sha2 0.10.9", + "thiserror 2.0.15", ] [[package]] @@ -219,12 +222,13 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca1e31b50f4ed9a83689ae97263d366b15b935a67c4acb5dd46d5b1c3b27e8e6" +checksum = "f614019a029c8fec14ae661aa7d4302e6e66bdbfb869dab40e78dcfba935fc97" dependencies = [ "alloy-primitives", "alloy-sol-types", + "http", "serde", "serde_json", "thiserror 2.0.15", @@ -233,9 +237,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879afc0f4a528908c8fe6935b2ab0bc07f77221a989186f71583f7592831689e" +checksum = "be8b6d58e98803017bbfea01dde96c4d270a29e7aed3beb65c8d28b5ab464e0e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -259,9 +263,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec185bac9d32df79c1132558a450d48f6db0bfb5adef417dbb1a0258153f879b" +checksum = "db489617bffe14847bf89f175b1c183e5dd7563ef84713936e2c34255cfbd845" dependencies = [ "alloy-consensus", "alloy-eips", @@ -321,9 +325,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5a8f1efd77116915dad61092f9ef9295accd0b0b251062390d9c4e81599344" +checksum = "18f27c0c41a16cd0af4f5dbf791f7be2a60502ca8b0e840e0ad29803fac2d587" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -332,9 +336,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc1323310d87f9d950fb3ff58d943fdf832f5e10e6f902f405c0eaa954ffbaf1" +checksum = "7f5812f81c3131abc2cd8953dc03c41999e180cff7252abbccaba68676e15027" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -347,14 +351,15 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror 2.0.15", ] [[package]] name = "alloy-serde" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05ace2ef3da874544c3ffacfd73261cdb1405d8631765deb991436a53ec6069" +checksum = "04dfe41a47805a34b848c83448946ca96f3d36842e8c074bcf8fa0870e337d12" dependencies = [ "alloy-primitives", "serde", @@ -363,9 +368,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fdabad99ad3c71384867374c60bcd311fc1bb90ea87f5f9c779fd8c7ec36aa" +checksum = "f79237b4c1b0934d5869deea4a54e6f0a7425a8cd943a739d6293afdf893d847" dependencies = [ "alloy-primitives", "async-trait", @@ -378,9 +383,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.15.11" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acb3f4e72378566b189624d54618c8adf07afbcf39d5f368f4486e35a66725b3" +checksum = "d6e90a3858da59d1941f496c17db8d505f643260f7e97cdcdd33823ddca48fc1" dependencies = [ "alloy-consensus", "alloy-network", @@ -464,9 +469,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +checksum = "e3412d52bb97c6c6cc27ccc28d4e6e8cf605469101193b50b0bd5813b1f990b5" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -478,6 +483,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "alloy-tx-macros" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e434e0917dce890f755ea774f59d6f12557bc8c7dd9fa06456af80cfe0f0181e" +dependencies = [ + "alloy-primitives", + "darling 0.21.2", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -1254,7 +1272,7 @@ dependencies = [ [[package]] name = "cc-eventlog" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "fs-err", @@ -1269,7 +1287,7 @@ dependencies = [ [[package]] name = "cert-client" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "dstack-kms-rpc", @@ -1282,7 +1300,7 @@ dependencies = [ [[package]] name = "certbot" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bon", @@ -1305,13 +1323,14 @@ dependencies = [ [[package]] name = "certbot-cli" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "certbot", "clap", "documented", "fs-err", + "or-panic", "rustls", "serde", "tokio", @@ -1708,7 +1727,7 @@ dependencies = [ [[package]] name = "ct_monitor" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "clap", @@ -1816,6 +1835,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", + "serde", "strsim", "syn 2.0.106", ] @@ -2149,7 +2169,7 @@ dependencies = [ [[package]] name = "dstack-gateway" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bytes", @@ -2173,6 +2193,7 @@ dependencies = [ "jemallocator", "load_config", "nix", + "or-panic", "parcelona", "pin-project", "ra-rpc", @@ -2197,7 +2218,7 @@ dependencies = [ [[package]] name = "dstack-gateway-rpc" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "parity-scale-codec", @@ -2210,7 +2231,7 @@ dependencies = [ [[package]] name = "dstack-guest-agent" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "base64 0.22.1", @@ -2222,6 +2243,7 @@ dependencies = [ "default-net", "dstack-guest-agent-rpc", "dstack-types", + "ed25519-dalek", "figment", "fs-err", "git-version", @@ -2230,8 +2252,10 @@ dependencies = [ "host-api", "k256", "load_config", + "or-panic", "ra-rpc", "ra-tls", + "rand 0.8.5", "rcgen", "reqwest", "ring", @@ -2246,6 +2270,7 @@ dependencies = [ "strip-ansi-escapes", "sysinfo", "tdx-attest", + "tempfile", "tokio", "tracing", "tracing-subscriber", @@ -2253,7 +2278,7 @@ dependencies = [ [[package]] name = "dstack-guest-agent-rpc" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "parity-scale-codec", @@ -2266,7 +2291,7 @@ dependencies = [ [[package]] name = "dstack-kms" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "chrono", @@ -2307,7 +2332,7 @@ dependencies = [ [[package]] name = "dstack-kms-rpc" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "fs-err", @@ -2321,19 +2346,24 @@ dependencies = [ [[package]] name = "dstack-mr" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bon", + "dstack-types", + "flate2", "fs-err", "hex", "hex-literal 1.0.0", "log", "object", + "parity-scale-codec", + "reqwest", "serde", "serde-human-bytes", "serde_json", "sha2 0.10.9", + "tar", "thiserror 2.0.15", ] @@ -2348,12 +2378,13 @@ dependencies = [ "fs-err", "hex", "serde_json", + "size-parser", "tracing-subscriber", ] [[package]] name = "dstack-sdk" -version = "0.1.0" +version = "0.1.2" dependencies = [ "alloy", "anyhow", @@ -2373,7 +2404,7 @@ dependencies = [ [[package]] name = "dstack-sdk-types" -version = "0.1.0" +version = "0.1.2" dependencies = [ "anyhow", "bon", @@ -2389,16 +2420,17 @@ dependencies = [ [[package]] name = "dstack-types" -version = "0.5.4" +version = "0.5.5" dependencies = [ "serde", "serde-human-bytes", "sha3", + "size-parser", ] [[package]] name = "dstack-util" -version = "0.5.4" +version = "0.5.5" dependencies = [ "aes-gcm", "anyhow", @@ -2442,9 +2474,34 @@ dependencies = [ "x509-parser", ] +[[package]] +name = "dstack-verifier" +version = "0.5.5" +dependencies = [ + "anyhow", + "cc-eventlog", + "clap", + "dcap-qvl", + "dstack-mr", + "dstack-types", + "figment", + "fs-err", + "hex", + "ra-tls", + "reqwest", + "rocket", + "serde", + "serde_json", + "sha2 0.10.9", + "tempfile", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "dstack-vmm" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "base64 0.22.1", @@ -2465,6 +2522,7 @@ dependencies = [ "key-provider-client", "load_config", "lspci", + "or-panic", "path-absolutize", "ra-rpc", "rocket", @@ -2473,9 +2531,11 @@ dependencies = [ "safe-write", "serde", "serde-human-bytes", + "serde_ini", "serde_json", "sha2 0.10.9", "shared_child", + "size-parser", "strip-ansi-escapes", "supervisor-client", "tailf", @@ -2488,7 +2548,7 @@ dependencies = [ [[package]] name = "dstack-vmm-rpc" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "parity-scale-codec", @@ -2526,6 +2586,31 @@ dependencies = [ "spki", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2 0.10.9", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.15.0" @@ -2700,6 +2785,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -3027,7 +3124,7 @@ dependencies = [ [[package]] name = "guest-api" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "http-client", @@ -3301,7 +3398,7 @@ dependencies = [ [[package]] name = "host-api" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "http-client", @@ -3348,7 +3445,7 @@ dependencies = [ [[package]] name = "http-client" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "http-body-util", @@ -3779,7 +3876,7 @@ dependencies = [ [[package]] name = "iohash" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "blake2", @@ -3956,7 +4053,7 @@ dependencies = [ [[package]] name = "key-provider-client" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "serde", @@ -4026,6 +4123,7 @@ checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" dependencies = [ "bitflags 2.9.2", "libc", + "redox_syscall 0.5.17", ] [[package]] @@ -4054,7 +4152,7 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "load_config" -version = "0.5.4" +version = "0.5.5" dependencies = [ "figment", "rocket", @@ -4123,7 +4221,7 @@ checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "lspci" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "insta", @@ -4538,13 +4636,14 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "f0418987d1aaed324d95b4beffc93635e19be965ed5d63ec07a35980fe3b71a4" dependencies = [ "alloy-rlp", - "const-hex", + "cfg-if", "proptest", + "ruint", "serde", "smallvec", ] @@ -4633,6 +4732,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "or-panic" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "596a79faf55e869e7bc0c2162cf2f18a54d4d1112876bceae587ad954fcbd574" + [[package]] name = "os_pipe" version = "1.2.2" @@ -5369,10 +5474,12 @@ checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "ra-rpc" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bon", + "or-panic", + "prost-types 0.13.5", "prpc", "ra-tls", "reqwest", @@ -5386,7 +5493,7 @@ dependencies = [ [[package]] name = "ra-tls" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bon", @@ -5396,6 +5503,7 @@ dependencies = [ "fs-err", "hex", "hkdf", + "or-panic", "p256", "parity-scale-codec", "rcgen", @@ -5693,6 +5801,12 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" +[[package]] +name = "result" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194d8e591e405d1eecf28819740abed6d719d1a2db87fc0bcdedee9a26d55560" + [[package]] name = "rfc6979" version = "0.4.0" @@ -5828,7 +5942,7 @@ dependencies = [ [[package]] name = "rocket-vsock-listener" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "derive_more 2.0.1", @@ -6450,10 +6564,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.225" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "fd6c24dee235d0da097043389623fb913daddf92c76e9f5a1db88607a0bcbd1d" dependencies = [ + "serde_core", "serde_derive", ] @@ -6468,7 +6583,7 @@ dependencies = [ [[package]] name = "serde-duration" -version = "0.5.4" +version = "0.5.5" dependencies = [ "serde", ] @@ -6492,17 +6607,37 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.225" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "659356f9a0cb1e529b24c01e43ad2bdf520ec4ceaf83047b83ddcc2251f96383" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.225" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "0ea936adf78b1f766949a4977b91d2f5595825bd6ec079aa9543ad2685fc4516" dependencies = [ "proc-macro2", "quote", "syn 2.0.106", ] +[[package]] +name = "serde_ini" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb236687e2bb073a7521c021949be944641e671b8505a94069ca37b656c81139" +dependencies = [ + "result", + "serde", + "void", +] + [[package]] name = "serde_json" version = "1.0.142" @@ -6745,6 +6880,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +[[package]] +name = "size-parser" +version = "0.5.5" +dependencies = [ + "anyhow", + "serde", + "serde_json", + "thiserror 2.0.15", +] + [[package]] name = "slab" version = "0.4.11" @@ -6785,6 +6930,7 @@ name = "sodiumbox" version = "0.1.0" dependencies = [ "blake2", + "or-panic", "rand_core 0.6.4", "salsa20", "x25519-dalek", @@ -6881,7 +7027,7 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "supervisor" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "bon", @@ -6893,6 +7039,7 @@ dependencies = [ "load_config", "nix", "notify", + "or-panic", "rocket", "serde", "serde_json", @@ -6903,7 +7050,7 @@ dependencies = [ [[package]] name = "supervisor-client" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "clap", @@ -7034,9 +7181,20 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "tdx-attest" -version = "0.5.4" +version = "0.5.5" dependencies = [ "anyhow", "cc-eventlog", @@ -7055,7 +7213,7 @@ dependencies = [ [[package]] name = "tdx-attest-sys" -version = "0.5.4" +version = "0.5.5" dependencies = [ "bindgen 0.71.1", "cc", @@ -7617,6 +7775,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "vsock" version = "0.5.1" @@ -8278,6 +8442,16 @@ dependencies = [ "time", ] +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix 1.0.8", +] + [[package]] name = "xsalsa20poly1305" version = "0.9.1" diff --git a/Cargo.toml b/Cargo.toml index baa7012a..98bc4551 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ # SPDX-License-Identifier: Apache-2.0 [workspace.package] -version = "0.5.4" +version = "0.5.5" authors = ["Kevin Wang ", "Leechael "] edition = "2021" license = "MIT" @@ -49,7 +49,9 @@ members = [ "serde-duration", "dstack-mr", "dstack-mr/cli", + "verifier", "no_std_check", + "size-parser", ] resolver = "2" @@ -57,7 +59,7 @@ resolver = "2" # Internal dependencies ra-rpc = { path = "ra-rpc", default-features = false } ra-tls = { path = "ra-tls" } -dstack-sdk-types = { path = "sdk/rust/types", default-features = false } +dstack-sdk-types = { path = "sdk/rust/types", version = "0.1.1", default-features = false } dstack-gateway-rpc = { path = "gateway/rpc" } dstack-kms-rpc = { path = "kms/rpc" } dstack-guest-agent-rpc = { path = "guest-agent/rpc" } @@ -80,9 +82,11 @@ lspci = { path = "lspci" } sodiumbox = { path = "sodiumbox" } serde-duration = { path = "serde-duration" } dstack-mr = { path = "dstack-mr" } +size-parser = { path = "size-parser" } # Core dependencies anyhow = { version = "1.0.97", default-features = false } +or-panic = { version = "1.0", default-features = false } chrono = "0.4.40" clap = { version = "4.5.32", features = ["derive", "string"] } dashmap = "6.1.0" @@ -109,12 +113,14 @@ hex = { version = "0.4.3", default-features = false } hex_fmt = "0.3.0" hex-literal = "1.0.0" prost = "0.13.5" +prost-types = "0.13.5" scale = { version = "3.7.4", package = "parity-scale-codec", features = [ "derive", ] } serde = { version = "1.0.219", features = ["derive"], default-features = false } serde-human-bytes = "0.1.0" serde_json = { version = "1.0.140", default-features = false } +serde_ini = "0.2.0" toml = "0.8.20" toml_edit = { version = "0.22.24", features = ["serde"] } yasna = "0.5.2" @@ -167,11 +173,12 @@ blake2 = "0.10.6" tokio-rustls = { version = "0.26.2", features = ["ring"] } x25519-dalek = { version = "2.0.1", features = ["static_secrets"] } k256 = "0.13.4" +ed25519-dalek = { version = "2.2.0", features = ["rand_core"] } # Additional RustCrypto dependencies for sealed box xsalsa20poly1305 = "0.9.0" salsa20 = "0.10" rand_core = "0.6.4" -alloy = { version = "0.15", default-features = false } +alloy = { version = "1.0.32", default-features = false } # Certificate/DNS hickory-resolver = "0.24.4" @@ -214,3 +221,6 @@ serde_yaml2 = "0.1.2" luks2 = "0.5.0" scopeguard = "1.2.0" + +[profile.release] +panic = "abort" diff --git a/README.md b/README.md index a68edb4e..ac18dd98 100644 --- a/README.md +++ b/README.md @@ -212,9 +212,9 @@ Once your app is deployed and listening on an HTTP port, you can access it throu **Examples**: -- `3327603e03f5bd1f830812ca4a789277fc31f577-8080.app.kvin.wang` → port `8080` (TLS termination to any TCP) -- `3327603e03f5bd1f830812ca4a789277fc31f577-8080g.app.kvin.wang` → port `8080` (TLS termination with HTTP/2 negotiation) -- `3327603e03f5bd1f830812ca4a789277fc31f577-8080s.app.kvin.wang` → port `8080` (TLS passthrough to any TCP) +- `3327603e03f5bd1f830812ca4a789277fc31f577-8080.test0.dstack.org` → port `8080` (TLS termination to any TCP) +- `3327603e03f5bd1f830812ca4a789277fc31f577-8080g.test0.dstack.org` → port `8080` (TLS termination with HTTP/2 negotiation) +- `3327603e03f5bd1f830812ca4a789277fc31f577-8080s.test0.dstack.org` → port `8080` (TLS passthrough to any TCP) The `` can be either the app ID or instance ID. When using the app ID, the load balancer will select one of the available instances. Adding an `s` suffix enables TLS passthrough to the app instead of terminating at dstack-gateway. Adding a `g` suffix enables HTTPS/2 with TLS termination for gRPC applications. @@ -258,7 +258,7 @@ curl --unix-socket /var/run/dstack.sock http://localhost/GetQuote?report_data=0x Container logs can be obtained from the CVM's `dashboard` page or by curl: ```bash -curl 'http://.app.kvin.wang:9090/logs/?since=0&until=0&follow=true&text=true×tamps=true&bare=true' +curl 'http://.:9090/logs/?since=0&until=0&follow=true&text=true×tamps=true&bare=true' ``` Replace `` and `` with actual values. Available parameters: @@ -314,7 +314,6 @@ GATEWAY_CERT=${CERBOT_WORKDIR}/live/cert.pem GATEWAY_KEY=${CERBOT_WORKDIR}/live/key.pem # For certbot -CF_ZONE_ID=cc0a40... CF_API_TOKEN=g-DwMH... # ACME_URL=https://acme-v02.api.letsencrypt.org/directory ACME_URL=https://acme-staging-v02.api.letsencrypt.org/directory @@ -334,24 +333,7 @@ Then run the certbot in the `build/` and you will see the following log: $ RUST_LOG=info,certbot=debug ./certbot renew -c certbot.toml 2024-10-25T07:41:00.682990Z INFO certbot::bot: creating new ACME account 2024-10-25T07:41:00.869246Z INFO certbot::bot: created new ACME account: https://acme-staging-v02.api.letsencrypt.org/acme/acct/168601853 -2024-10-25T07:41:00.869270Z INFO certbot::bot: setting CAA records -2024-10-25T07:41:00.869276Z DEBUG certbot::acme_client: setting guard CAA records for app.kvin.wang -2024-10-25T07:41:01.740767Z DEBUG certbot::acme_client: removing existing CAA record app.kvin.wang 0 issuewild "letsencrypt.org;validationmethods=dns-01;accounturi=https://acme-staging-v02.api.letsencrypt.org/acme/acct/168578683" -2024-10-25T07:41:01.991298Z DEBUG certbot::acme_client: removing existing CAA record app.kvin.wang 0 issue "letsencrypt.org;validationmethods=dns-01;accounturi=https://acme-staging-v02.api.letsencrypt.org/acme/acct/168578683" -2024-10-25T07:41:02.216751Z DEBUG certbot::acme_client: setting CAA records for app.kvin.wang, 0 issue "letsencrypt.org;validationmethods=dns-01;accounturi=https://acme-staging-v02.api.letsencrypt.org/acme/acct/168601853" -2024-10-25T07:41:02.424217Z DEBUG certbot::acme_client: setting CAA records for app.kvin.wang, 0 issuewild "letsencrypt.org;validationmethods=dns-01;accounturi=https://acme-staging-v02.api.letsencrypt.org/acme/acct/168601853" -2024-10-25T07:41:02.663824Z DEBUG certbot::acme_client: removing guard CAA records for app.kvin.wang -2024-10-25T07:41:03.095564Z DEBUG certbot::acme_client: generating new cert key pair -2024-10-25T07:41:03.095678Z DEBUG certbot::acme_client: requesting new certificates for *.app.kvin.wang -2024-10-25T07:41:03.095699Z DEBUG certbot::acme_client: creating new order -2024-10-25T07:41:03.250382Z DEBUG certbot::acme_client: order is pending, waiting for authorization -2024-10-25T07:41:03.283600Z DEBUG certbot::acme_client: creating dns record for app.kvin.wang -2024-10-25T07:41:04.027882Z DEBUG certbot::acme_client: challenge not found, waiting 500ms tries=2 domain="_acme-challenge.app.kvin.wang" -2024-10-25T07:41:04.600711Z DEBUG certbot::acme_client: challenge not found, waiting 1s tries=3 domain="_acme-challenge.app.kvin.wang" -2024-10-25T07:41:05.642300Z DEBUG certbot::acme_client: challenge not found, waiting 2s tries=4 domain="_acme-challenge.app.kvin.wang" -2024-10-25T07:41:07.715947Z DEBUG certbot::acme_client: challenge not found, waiting 4s tries=5 domain="_acme-challenge.app.kvin.wang" -2024-10-25T07:41:11.724831Z DEBUG certbot::acme_client: challenge not found, waiting 8s tries=6 domain="_acme-challenge.app.kvin.wang" -2024-10-25T07:41:19.815990Z DEBUG certbot::acme_client: challenge not found, waiting 16s tries=7 domain="_acme-challenge.app.kvin.wang" +... 2024-10-25T07:41:35.852790Z DEBUG certbot::acme_client: setting challenge ready for https://acme-staging-v02.api.letsencrypt.org/acme/chall-v3/14584884443/mQ-I2A 2024-10-25T07:41:35.934425Z DEBUG certbot::acme_client: challenges are ready, waiting for order to be ready 2024-10-25T07:41:37.972434Z DEBUG certbot::acme_client: order is ready, uploading csr @@ -391,16 +373,16 @@ Execute dstack-gateway with `sudo ./dstack-gateway -c gateway.toml`, then access To enhance security, we've limited TLS certificate issuance to dstack-gateway via CAA records. However, since these records can be modified through Cloudflare's domain management, we need to implement global CA certificate monitoring to maintain security oversight. -`ct_monitor` tracks Certificate Transparency logs via [https://crt.sh](https://crt.sh/?q=app.kvin.wang), comparing their public key with the ones got from dstack-gateway RPC. It immediately alerts when detecting unauthorized certificates not issued through dstack-gateway: +`ct_monitor` tracks Certificate Transparency logs via https://crt.sh, comparing their public key with the ones got from dstack-gateway RPC. It immediately alerts when detecting unauthorized certificates not issued through dstack-gateway: ```text -$ ./ct_monitor -t https://localhost:9010/prpc -d app.kvin.wang -2024-10-25T08:12:11.366463Z INFO ct_monitor: monitoring app.kvin.wang... +$ ./ct_monitor -t https://localhost:9010/prpc -d +2024-10-25T08:12:11.366463Z INFO ct_monitor: monitoring ... 2024-10-25T08:12:11.366488Z INFO ct_monitor: fetching known public keys from https://localhost:9010/prpc 2024-10-25T08:12:11.566222Z INFO ct_monitor: got 2 known public keys 2024-10-25T08:12:13.142122Z INFO ct_monitor: ✅ checked log id=14705660685 2024-10-25T08:12:13.802573Z INFO ct_monitor: ✅ checked log id=14705656674 -2024-10-25T08:12:14.494944Z ERROR ct_monitor: ❌ error in CTLog { id: 14666084839, issuer_ca_id: 295815, issuer_name: "C=US, O=Let's Encrypt, CN=R11", common_name: "kvin.wang", name_value: "*.app.kvin.wang", not_before: "2024-09-24T02:23:15", not_after: "2024-12-23T02:23:14", serial_number: "03ae796f56a933c8ff7e32c7c0d662a253d4", result_count: 1, entry_timestamp: "2024-09-24T03:21:45.825" } +2024-10-25T08:12:14.494944Z ERROR ct_monitor: ❌ error in CTLog { id: 14666084839, issuer_ca_id: 295815, issuer_name: "C=US, O=Let's Encrypt, CN=R11", common_name: "", name_value: "*.", not_before: "2024-09-24T02:23:15", not_after: "2024-12-23T02:23:14", serial_number: "03ae796f56a933c8ff7e32c7c0d662a253d4", result_count: 1, entry_timestamp: "2024-09-24T03:21:45.825" } 2024-10-25T08:12:14.494998Z ERROR ct_monitor: error: certificate has issued to unknown pubkey: 30820122300d06092a864886f70d01010105000382010f003082010a02820101009de65c767caf117880626d1acc1ee78f3c6a992e3fe458f34066f92812ac550190a67e49ebf4f537003c393c000a8ec3e114da088c0cb02ffd0881fd39a2b32cc60d2e9989f0efab3345bee418262e0179d307d8d361fd0837f85d17eab92ec6f4126247e614aa01f4efcc05bc6303a8be68230f04326c9e85406fc4d234e9ce92089253b11d002cdf325582df45d5da42981cd546cbd2e9e49f0fa6636e747a345aaf8cefa02556aa258e1f7f90906be8fe51567ac9626f35bc46837e4f3203387fee59c71cea400000007c24e7537debc1941b36ff1612990233e4c219632e35858b1771f17a71944adf6c657dd7303583e3aeed199bd36a3152f49980f4f30203010001 ``` diff --git a/REUSE.toml b/REUSE.toml index e48336ed..8f345c27 100644 --- a/REUSE.toml +++ b/REUSE.toml @@ -64,6 +64,7 @@ SPDX-License-Identifier = "Apache-2.0" [[annotations]] path = [ "docs/security/dstack-audit.pdf", + "dstack_Technical_Charter_Final_10-17-2025.pdf", "sdk/simulator/quote.hex", "ra-tls/assets/tdx_quote", "cc-eventlog/samples/ccel.bin", @@ -157,3 +158,18 @@ SPDX-License-Identifier = "CC0-1.0" path = "guest-api/src/generated/*" SPDX-FileCopyrightText = "NONE" SPDX-License-Identifier = "CC0-1.0" + +[[annotations]] +path = "dstack-util/tests/fixtures/*" +SPDX-FileCopyrightText = "NONE" +SPDX-License-Identifier = "CC0-1.0" + +[[annotations]] +path = "verifier/fixtures/*" +SPDX-FileCopyrightText = "NONE" +SPDX-License-Identifier = "CC0-1.0" + +[[annotations]] +path = "verifier/builder/shared/*.txt" +SPDX-FileCopyrightText = "NONE" +SPDX-License-Identifier = "CC0-1.0" diff --git a/attestation.md b/attestation.md index af7e7837..c10232cd 100644 --- a/attestation.md +++ b/attestation.md @@ -25,7 +25,7 @@ The MR register values indicate the following: - RTMR0: OVMF records CVM's virtual hardware setup, including CPU count, memory size, and device configuration. While dstack uses fixed devices, CPU and memory specifications can vary. RTMR0 can be computed from these specifications. - RTMR1: OVMF records the Linux kernel measurement. - RTMR2: Linux kernel records kernel cmdline (including rootfs hash) and initrd measurements. - - RTMR3: initrd records dstack App details, including compose hash, instance id, app id, rootfs hash, and key provider. + - RTMR3: initrd records dstack App details, including compose hash, instance id, app id, and key provider. MRTD, RTMR0, RTMR1, and RTMR2 can be pre-calculated from the built image (given CPU+RAM specifications). Compare these with the verified quote's MRs to confirm correct base image code execution. @@ -33,26 +33,26 @@ RTMR3 differs as it contains runtime information like compose hash and instance ### 2.2. Determining expected MRs MRTD, RTMR0, RTMR1, and RTMR2 correspond to the image. dstack OS builds all related software from source. -Build version v0.4.0 using these commands: +Build version v0.5.4 using these commands: ```bash git clone https://github.com/Dstack-TEE/meta-dstack.git cd meta-dstack/ -git checkout 15189bcb5397083b5c650a438243ce3f29e705f4 +git checkout f7c795b76faa693f218e1c255007e3a68c541d79 git submodule update --init --recursive cd repro-build && ./repro-build.sh -n ``` -The resulting dstack-v0.4.0.tar.gz contains: +The resulting dstack-0.5.4.tar.gz contains: - ovmf.fd: virtual firmware - bzImage: kernel image - initramfs.cpio.gz: initrd -- rootfs.cpio: root filesystem +- rootfs.img.verity: root filesystem - metadata.json: image metadata, including kernel boot cmdline -Calculate image MRs using [dstack-mr](https://github.com/kvinwang/dstack-mr): +Calculate image MRs using [dstack-mr](dstack-mr/): ```bash -dstack-mr -cpu 4 -ram 4096 -metadata dstack-v0.4.0/metadata.json +cargo run --manifest-path ../dstack/Cargo.toml --bin dstack-mr measure -c 4 -m 4G dstack-0.5.4/metadata.json ``` Once these verification steps are completed successfully, the report_data contained in the verified quote can be considered authentic and trustworthy. diff --git a/basefiles/docker.service.d/dstack-guest-agent.conf b/basefiles/docker.service.d/dstack-guest-agent.conf new file mode 100644 index 00000000..2ca01dce --- /dev/null +++ b/basefiles/docker.service.d/dstack-guest-agent.conf @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: 2024-2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +[Unit] +Wants=dstack-guest-agent.service +After=dstack-guest-agent.service + +[Service] +TimeoutStartSec=0 +ExecStartPre=/bin/sh -c 'while ! systemctl is-active --quiet dstack-guest-agent.service || [ ! -S /var/run/dstack.sock ] || [ ! -S /var/run/tappd.sock ]; do sleep 1; done' diff --git a/basefiles/dstack-prepare.sh b/basefiles/dstack-prepare.sh index cd64f79e..dfa92b9b 100755 --- a/basefiles/dstack-prepare.sh +++ b/basefiles/dstack-prepare.sh @@ -45,3 +45,11 @@ mkdir -p $DATA_MNT/var/lib/docker mount --rbind $DATA_MNT/var/lib/docker /var/lib/docker mount --rbind $WORK_DIR /dstack mount_overlay /etc/users $OVERLAY_PERSIST + +cd /dstack + +if [ $(jq 'has("init_script")' app-compose.json) == true ]; then + echo "Running init script" + dstack-util notify-host -e "boot.progress" -d "init-script" || true + source <(jq -r '.init_script' app-compose.json) +fi diff --git a/basefiles/wg-checker.service b/basefiles/wg-checker.service index eac7b607..d6009b54 100644 --- a/basefiles/wg-checker.service +++ b/basefiles/wg-checker.service @@ -1,6 +1,6 @@ [Unit] Description=WireGuard Endpoint Checker Service -After=network-online.target tboot.service +After=network-online.target dstack-prepare.service Wants=network-online.target [Service] diff --git a/basefiles/wg-checker.sh b/basefiles/wg-checker.sh old mode 100644 new mode 100755 index 4b71fdf6..d9763528 --- a/basefiles/wg-checker.sh +++ b/basefiles/wg-checker.sh @@ -4,27 +4,73 @@ # # SPDX-License-Identifier: Apache-2.0 -get_conf_endpoint() { - grep "Endpoint" /etc/wireguard/wg0.conf | awk "{print \$3}" +HANDSHAKE_TIMEOUT=180 +LAST_REFRESH=0 +STALE_SINCE=0 +DSTACK_WORK_DIR=${DSTACK_WORK_DIR:-/dstack} +IFNAME=dstack-wg0 + +get_latest_handshake() { + wg show $IFNAME latest-handshakes 2>/dev/null | awk 'BEGIN { max = 0 } NF >= 2 { if ($2 > max) max = $2 } END { print max }' } -get_current_endpoint() { - wg show wg0 endpoints | awk "{print \$2}" +maybe_refresh() { + now=$1 + + if [ "$LAST_REFRESH" -ne 0 ] && [ $((now - LAST_REFRESH)) -lt $HANDSHAKE_TIMEOUT ]; then + return + fi + + if ! command -v dstack-util >/dev/null 2>&1; then + printf 'dstack-util not found; cannot refresh gateway.\n' >&2 + LAST_REFRESH=$now + return + fi + + printf 'WireGuard handshake stale; refreshing dstack gateway...\n' + if dstack-util gateway-refresh --work-dir "$DSTACK_WORK_DIR"; then + printf 'dstack gateway refresh succeeded.\n' + else + printf 'dstack gateway refresh failed.\n' >&2 + fi + + LAST_REFRESH=$now + STALE_SINCE=$now } -check_endpoint() { - CONF_ENDPOINT=$(get_conf_endpoint) - CURRENT_ENDPOINT=$(get_current_endpoint) +check_handshake() { + if ! command -v wg >/dev/null 2>&1; then + return + fi + + now=$(date +%s) + latest=$(get_latest_handshake) + + if [ -z "$latest" ]; then + latest=0 + fi - if [ "$CURRENT_ENDPOINT" != "$CONF_ENDPOINT" ]; then - echo "Wg endpoint changed from $CONF_ENDPOINT to $CURRENT_ENDPOINT." - wg syncconf wg0 <(wg-quick strip wg0) + if [ "$latest" -gt 0 ]; then + if [ $((now - latest)) -ge $HANDSHAKE_TIMEOUT ]; then + maybe_refresh "$now" + else + STALE_SINCE=0 + fi + else + if [ "$STALE_SINCE" -eq 0 ]; then + STALE_SINCE=$now + fi + if [ $((now - STALE_SINCE)) -ge $HANDSHAKE_TIMEOUT ]; then + maybe_refresh "$now" + fi fi } while true; do - if [ -f /etc/wireguard/wg0.conf ]; then - check_endpoint + if [ -f /etc/wireguard/$IFNAME.conf ]; then + check_handshake + else + STALE_SINCE=0 fi sleep 10 done diff --git a/cert-client/src/lib.rs b/cert-client/src/lib.rs index ec6cb670..3be6bb47 100644 --- a/cert-client/src/lib.rs +++ b/cert-client/src/lib.rs @@ -68,15 +68,13 @@ impl CertRequestClient { .context("Failed to create CA")?; Ok(CertRequestClient::Local { ca }) } - KeyProvider::Kms { url, .. } => { - let tmp_client = - RaClient::new(url.into(), true).context("Failed to create RA client")?; - let tmp_client = KmsClient::new(tmp_client); - let tmp_ca = tmp_client - .get_temp_ca_cert() - .await - .context("Failed to get temp CA cert")?; - let client_cert = generate_ra_cert(tmp_ca.temp_ca_cert, tmp_ca.temp_ca_key) + KeyProvider::Kms { + url, + tmp_ca_key, + tmp_ca_cert, + .. + } => { + let client_cert = generate_ra_cert(tmp_ca_cert.clone(), tmp_ca_key.clone()) .context("Failed to generate RA cert")?; let ra_client = RaClientConfig::builder() .remote_uri(url.clone()) diff --git a/certbot/cli/Cargo.toml b/certbot/cli/Cargo.toml index a3f21a10..cd415c08 100644 --- a/certbot/cli/Cargo.toml +++ b/certbot/cli/Cargo.toml @@ -24,3 +24,4 @@ tokio = { workspace = true, features = ["full"] } toml_edit.workspace = true tracing-subscriber.workspace = true rustls.workspace = true +or-panic.workspace = true diff --git a/certbot/cli/src/main.rs b/certbot/cli/src/main.rs index 19fed2e1..b22d3246 100644 --- a/certbot/cli/src/main.rs +++ b/certbot/cli/src/main.rs @@ -10,6 +10,7 @@ use certbot::{CertBotConfig, WorkDir}; use clap::Parser; use documented::DocumentedFields; use fs_err as fs; +use or_panic::ResultOrPanic; use serde::{Deserialize, Serialize}; use toml_edit::ser::to_document; @@ -61,8 +62,6 @@ struct Config { acme_url: String, /// Cloudflare API token cf_api_token: String, - /// Cloudflare zone ID - cf_zone_id: String, /// Auto set CAA record auto_set_caa: bool, /// List of domains to issue certificates for @@ -73,6 +72,8 @@ struct Config { renew_days_before: u64, /// Renew timeout in seconds renew_timeout: u64, + /// Maximum time to wait for DNS propagation in seconds + max_dns_wait: u64, /// Command to run after renewal #[serde(default)] renewed_hook: Option, @@ -84,12 +85,12 @@ impl Default for Config { workdir: ".".into(), acme_url: "https://acme-staging-v02.api.letsencrypt.org/directory".into(), cf_api_token: "".into(), - cf_zone_id: "".into(), auto_set_caa: true, domains: vec!["example.com".into()], renew_interval: 3600, renew_days_before: 10, renew_timeout: 120, + max_dns_wait: 300, renewed_hook: None, } } @@ -124,6 +125,7 @@ fn load_config(config: &PathBuf) -> Result { let renew_interval = Duration::from_secs(config.renew_interval); let renew_expires_in = Duration::from_secs(config.renew_days_before * 24 * 60 * 60); let renew_timeout = Duration::from_secs(config.renew_timeout); + let max_dns_wait = Duration::from_secs(config.max_dns_wait); let bot_config = CertBotConfig::builder() .acme_url(config.acme_url) .cert_dir(workdir.backup_dir()) @@ -131,11 +133,11 @@ fn load_config(config: &PathBuf) -> Result { .key_file(workdir.key_path()) .auto_create_account(true) .cert_subject_alt_names(config.domains) - .cf_zone_id(config.cf_zone_id) .cf_api_token(config.cf_api_token) .renew_interval(renew_interval) .renew_timeout(renew_timeout) .renew_expires_in(renew_expires_in) + .max_dns_wait(max_dns_wait) .credentials_file(workdir.account_credentials_path()) .auto_set_caa(config.auto_set_caa) .maybe_renewed_hook(config.renewed_hook) @@ -166,7 +168,7 @@ async fn main() -> Result<()> { } rustls::crypto::ring::default_provider() .install_default() - .expect("Failed to install default crypto provider"); + .or_panic("Failed to install default crypto provider"); let args = Args::parse(); match args.command { diff --git a/certbot/src/acme_client.rs b/certbot/src/acme_client.rs index f5ae739f..d4ebcf51 100644 --- a/certbot/src/acme_client.rs +++ b/certbot/src/acme_client.rs @@ -27,6 +27,7 @@ pub struct AcmeClient { account: Account, credentials: Credentials, dns01_client: Dns01Client, + max_dns_wait: Duration, } #[derive(Debug, Clone)] @@ -53,7 +54,11 @@ pub(crate) fn acme_matches(encoded_credentials: &str, acme_url: &str) -> bool { } impl AcmeClient { - pub async fn load(dns01_client: Dns01Client, encoded_credentials: &str) -> Result { + pub async fn load( + dns01_client: Dns01Client, + encoded_credentials: &str, + max_dns_wait: Duration, + ) -> Result { let credentials: Credentials = serde_json::from_str(encoded_credentials)?; let account = Account::from_credentials(credentials.credentials).await?; let credentials: Credentials = serde_json::from_str(encoded_credentials)?; @@ -61,11 +66,16 @@ impl AcmeClient { account, dns01_client, credentials, + max_dns_wait, }) } /// Create a new account. - pub async fn new_account(acme_url: &str, dns01_client: Dns01Client) -> Result { + pub async fn new_account( + acme_url: &str, + dns01_client: Dns01Client, + max_dns_wait: Duration, + ) -> Result { let (account, credentials) = Account::create( &NewAccount { contact: &[], @@ -86,6 +96,7 @@ impl AcmeClient { account, dns01_client, credentials, + max_dns_wait, }) } @@ -310,14 +321,14 @@ impl AcmeClient { let Identifier::Dns(identifier) = &authz.identifier; let dns_value = order.key_authorization(challenge).dns_value(); - debug!("creating dns record for {}", identifier); + debug!("creating dns record for {identifier}"); let acme_domain = format!("_acme-challenge.{identifier}"); - debug!("removing existing dns record for {}", acme_domain); + debug!("removing existing TXT record for {acme_domain}"); self.dns01_client .remove_txt_records(&acme_domain) .await .context("failed to remove existing dns record")?; - debug!("creating dns record for {}", acme_domain); + debug!("creating TXT record for {acme_domain}"); let id = self .dns01_client .add_txt_record(&acme_domain, &dns_value) @@ -335,6 +346,8 @@ impl AcmeClient { /// Self check the TXT records for the given challenges. async fn check_dns(&self, challenges: &[Challenge]) -> Result<()> { + use tracing::warn; + let mut delay = Duration::from_millis(250); let mut tries = 1u8; @@ -342,11 +355,22 @@ impl AcmeClient { debug!("Unsettled challenges: {unsettled_challenges:#?}"); + let start_time = std::time::Instant::now(); + 'outer: loop { use hickory_resolver::AsyncResolver; sleep(delay).await; + let elapsed = start_time.elapsed(); + if elapsed >= self.max_dns_wait { + warn!( + "DNS propagation timeout after {elapsed:?}, max wait time is {max:?}. proceeding anyway as ACME server may have different DNS view", + max = self.max_dns_wait + ); + break; + } + let dns_resolver = AsyncResolver::tokio_from_system_conf().context("failed to create dns resolver")?; @@ -374,6 +398,8 @@ impl AcmeClient { debug!( tries, domain = &challenge.acme_domain, + elapsed = ?elapsed, + max_wait = ?self.max_dns_wait, "challenge not found, waiting for {delay:?}" ); unsettled_challenges.push(challenge); diff --git a/certbot/src/acme_client/tests.rs b/certbot/src/acme_client/tests.rs index 54eefac0..d77504a6 100644 --- a/certbot/src/acme_client/tests.rs +++ b/certbot/src/acme_client/tests.rs @@ -13,7 +13,7 @@ async fn new_acme_client() -> Result { ); let credentials = std::env::var("LETSENCRYPT_CREDENTIAL").expect("LETSENCRYPT_CREDENTIAL not set"); - AcmeClient::load(dns01_client, &credentials).await + AcmeClient::load(dns01_client, &credentials, Duration::from_secs(300)).await } #[tokio::test] diff --git a/certbot/src/bot.rs b/certbot/src/bot.rs index 7df25499..5a9c775f 100644 --- a/certbot/src/bot.rs +++ b/certbot/src/bot.rs @@ -27,7 +27,6 @@ pub struct CertBotConfig { auto_set_caa: bool, credentials_file: PathBuf, auto_create_account: bool, - cf_zone_id: String, cf_api_token: String, cert_file: PathBuf, key_file: PathBuf, @@ -37,6 +36,7 @@ pub struct CertBotConfig { renew_timeout: Duration, renew_expires_in: Duration, renewed_hook: Option, + max_dns_wait: Duration, } impl CertBotConfig { @@ -55,7 +55,7 @@ async fn create_new_account( dns01_client: Dns01Client, ) -> Result { info!("creating new ACME account"); - let client = AcmeClient::new_account(&config.acme_url, dns01_client) + let client = AcmeClient::new_account(&config.acme_url, dns01_client, config.max_dns_wait) .await .context("failed to create new account")?; let credentials = client @@ -77,12 +77,20 @@ async fn create_new_account( impl CertBot { /// Build a new `CertBot` from a `CertBotConfig`. pub async fn build(config: CertBotConfig) -> Result { + let base_domain = config + .cert_subject_alt_names + .first() + .context("cert_subject_alt_names is empty")? + .trim() + .trim_start_matches("*.") + .trim_end_matches('.') + .to_string(); let dns01_client = - Dns01Client::new_cloudflare(config.cf_zone_id.clone(), config.cf_api_token.clone()); + Dns01Client::new_cloudflare(config.cf_api_token.clone(), base_domain).await?; let acme_client = match fs::read_to_string(&config.credentials_file) { Ok(credentials) => { if acme_matches(&credentials, &config.acme_url) { - AcmeClient::load(dns01_client, &credentials).await? + AcmeClient::load(dns01_client, &credentials, config.max_dns_wait).await? } else { create_new_account(&config, dns01_client).await? } diff --git a/certbot/src/bot/tests.rs b/certbot/src/bot/tests.rs index e7c65e6d..ce8b16f9 100644 --- a/certbot/src/bot/tests.rs +++ b/certbot/src/bot/tests.rs @@ -9,14 +9,12 @@ use instant_acme::LetsEncrypt; use super::*; async fn new_certbot() -> Result { - let cf_zone_id = std::env::var("CLOUDFLARE_ZONE_ID").expect("CLOUDFLARE_ZONE_ID not set"); let cf_api_token = std::env::var("CLOUDFLARE_API_TOKEN").expect("CLOUDFLARE_API_TOKEN not set"); let domains = vec![std::env::var("TEST_DOMAIN").expect("TEST_DOMAIN not set")]; let config = CertBotConfig::builder() .acme_url(LetsEncrypt::Staging.url()) .auto_create_account(true) .credentials_file("./test-workdir/credentials.json") - .cf_zone_id(cf_zone_id) .cf_api_token(cf_api_token) .cert_dir("./test-workdir/backup") .cert_file("./test-workdir/live/cert.pem") @@ -25,6 +23,7 @@ async fn new_certbot() -> Result { .renew_interval(Duration::from_secs(30)) .renew_timeout(Duration::from_secs(120)) .renew_expires_in(Duration::from_secs(7772187)) + .max_dns_wait(Duration::from_secs(300)) .auto_set_caa(false) .build(); config.build_bot().await diff --git a/certbot/src/dns01_client.rs b/certbot/src/dns01_client.rs index 27b1bb6d..701d5ba9 100644 --- a/certbot/src/dns01_client.rs +++ b/certbot/src/dns01_client.rs @@ -6,6 +6,7 @@ use anyhow::Result; use cloudflare::CloudflareClient; use enum_dispatch::enum_dispatch; use serde::{Deserialize, Serialize}; +use tracing::debug; mod cloudflare; @@ -51,9 +52,11 @@ pub(crate) trait Dns01Api { /// Deletes all TXT DNS records matching the given domain. async fn remove_txt_records(&self, domain: &str) -> Result<()> { for record in self.get_records(domain).await? { - if record.r#type == "TXT" { - self.remove_record(&record.id).await?; + if record.r#type != "TXT" { + continue; } + debug!(domain = %domain, id = %record.id, "removing txt record"); + self.remove_record(&record.id).await?; } Ok(()) } @@ -68,7 +71,9 @@ pub enum Dns01Client { } impl Dns01Client { - pub fn new_cloudflare(zone_id: String, api_token: String) -> Self { - Self::Cloudflare(CloudflareClient::new(zone_id, api_token)) + pub async fn new_cloudflare(api_token: String, base_domain: String) -> Result { + Ok(Self::Cloudflare( + CloudflareClient::new(api_token, base_domain).await?, + )) } } diff --git a/certbot/src/dns01_client/cloudflare.rs b/certbot/src/dns01_client/cloudflare.rs index 408f181a..222028da 100644 --- a/certbot/src/dns01_client/cloudflare.rs +++ b/certbot/src/dns01_client/cloudflare.rs @@ -2,10 +2,13 @@ // // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Context, Result}; +use std::collections::HashMap; + +use anyhow::{bail, Context, Result}; use reqwest::Client; use serde::{Deserialize, Serialize}; use serde_json::json; +use tracing::debug; use crate::dns01_client::Record; @@ -29,51 +32,241 @@ struct ApiResult { id: String, } +#[derive(Deserialize, Debug)] +struct CloudflareListResponse { + result: Vec, + result_info: ResultInfo, +} + +#[derive(Deserialize, Debug)] +struct ResultInfo { + total_pages: u32, +} + +#[derive(Deserialize, Debug)] +struct ZoneInfo { + id: String, + name: String, +} + +#[derive(Deserialize, Debug)] +struct ZonesResultInfo { + page: u32, + per_page: u32, + total_pages: u32, + count: u32, + total_count: u32, +} + impl CloudflareClient { - pub fn new(zone_id: String, api_token: String) -> Self { - Self { zone_id, api_token } + pub async fn new(api_token: String, base_domain: String) -> Result { + let zone_id = Self::resolve_zone_id(&api_token, &base_domain).await?; + Ok(Self { api_token, zone_id }) + } + + async fn resolve_zone_id(api_token: &str, base_domain: &str) -> Result { + let base = base_domain + .trim() + .trim_start_matches("*.") + .trim_end_matches('.') + .to_lowercase(); + + let client = Client::new(); + let url = format!("{CLOUDFLARE_API_URL}/zones"); + + let per_page = 50u32; + let mut page = 1u32; + let mut zones: HashMap = HashMap::new(); + let mut total_pages = 1u32; + + while page <= total_pages { + debug!(url = %url, base_domain = %base, page, per_page, "cloudflare list zones request"); + + let response = client + .get(&url) + .header("Authorization", format!("Bearer {api_token}")) + .query(&[ + ("page", page.to_string()), + ("per_page", per_page.to_string()), + ]) + .send() + .await + .context("failed to list zones")?; + + let status = response.status(); + let body = response + .text() + .await + .context("failed to read zones response body")?; + if !status.is_success() { + bail!("failed to list zones: {body}"); + } + + #[derive(Deserialize, Debug)] + struct ZonesPageResponse { + result: Vec, + result_info: ZonesResultInfo, + } + + let zones_response: ZonesPageResponse = + serde_json::from_str(&body).context("failed to parse zones response")?; + + let zone_names = zones_response + .result + .iter() + .map(|z| z.name.as_str()) + .collect::>(); + debug!( + url = %url, + status = %status, + page = zones_response.result_info.page, + per_page = zones_response.result_info.per_page, + count = zones_response.result_info.count, + total_count = zones_response.result_info.total_count, + total_pages = zones_response.result_info.total_pages, + zones = ?zone_names, + "cloudflare list zones response" + ); + + total_pages = zones_response.result_info.total_pages; + for z in zones_response.result { + zones.insert(z.name.to_lowercase(), z.id); + } + + page += 1; + } + + let parts: Vec<&str> = base.split('.').collect(); + for i in 0..parts.len() { + let candidate = parts[i..].join("."); + if let Some(zone_id) = zones.get(&candidate) { + debug!(base_domain = %base, zone = %candidate, zone_id = %zone_id, "resolved cloudflare zone"); + return Ok(zone_id.clone()); + } + } + + bail!("no matching zone found for base_domain: {base_domain}") } async fn add_record(&self, record: &impl Serialize) -> Result { let client = Client::new(); - let url = format!("{}/zones/{}/dns_records", CLOUDFLARE_API_URL, self.zone_id); + let url = format!("{CLOUDFLARE_API_URL}/zones/{}/dns_records", self.zone_id); + let response = client .post(&url) .header("Authorization", format!("Bearer {}", self.api_token)) .header("Content-Type", "application/json") - .json(&record) + .json(record) .send() .await .context("failed to send add_record request")?; - if !response.status().is_success() { - anyhow::bail!("failed to add record: {}", response.text().await?); + + let status = response.status(); + let body = response + .text() + .await + .context("failed to read add_record response body")?; + if !status.is_success() { + anyhow::bail!("failed to add record: {body}"); } - let response = response.json().await.context("failed to parse response")?; + let response = serde_json::from_str(&body).context("failed to parse response")?; Ok(response) } -} -impl Dns01Api for CloudflareClient { - async fn remove_record(&self, record_id: &str) -> Result<()> { + async fn remove_record_inner(&self, record_id: &str) -> Result<()> { let client = Client::new(); let url = format!( - "{}/zones/{}/dns_records/{}", - CLOUDFLARE_API_URL, self.zone_id, record_id + "{CLOUDFLARE_API_URL}/zones/{zone_id}/dns_records/{record_id}", + zone_id = self.zone_id ); + debug!(url = %url, "cloudflare remove_record request"); + let response = client .delete(&url) .header("Authorization", format!("Bearer {}", self.api_token)) .send() .await?; - if !response.status().is_success() { - anyhow::bail!( - "failed to remove acme challenge: {}", - response.text().await? - ); + let status = response.status(); + let body = response + .text() + .await + .context("failed to read remove_record response body")?; + if !status.is_success() { + anyhow::bail!("failed to remove acme challenge: {body}"); } + Ok(()) + } + + async fn get_records_inner(&self, domain: &str) -> Result> { + let client = Client::new(); + let url = format!("{CLOUDFLARE_API_URL}/zones/{}/dns_records", self.zone_id); + + let per_page = 100u32; + let mut records = Vec::new(); + let target = domain.trim_end_matches('.'); + + for page in 1..20 { + // Safety limit to prevent infinite loops + let response = client + .get(&url) + .header("Authorization", format!("Bearer {}", self.api_token)) + .query(&[ + ("name", domain), + ("page", &page.to_string()), + ("per_page", &per_page.to_string()), + ]) + .send() + .await?; + + let status = response.status(); + let body = response + .text() + .await + .context("failed to read get_records response body")?; + + if !status.is_success() { + anyhow::bail!("failed to get dns records: {body}"); + } + + let response: CloudflareListResponse = + serde_json::from_str(&body).context("failed to parse response")?; + + records.extend(response.result.into_iter().filter(|record| { + record + .name + .trim_end_matches('.') + .eq_ignore_ascii_case(target) + })); + + if page >= response.result_info.total_pages { + break; + } + } + + Ok(records) + } +} + +impl Dns01Api for CloudflareClient { + async fn remove_record(&self, record_id: &str) -> Result<()> { + self.remove_record_inner(record_id).await + } + async fn remove_txt_records(&self, domain: &str) -> Result<()> { + let records = self.get_records_inner(domain).await?; + let txt_records = records + .into_iter() + .filter(|r| r.r#type == "TXT") + .collect::>(); + let ids = txt_records.iter().map(|r| r.id.clone()).collect::>(); + debug!(domain = %domain, zone_id = %self.zone_id, count = txt_records.len(), ids = ?ids, "removing txt records"); + + for record in txt_records { + debug!(domain = %domain, id = %record.id, "removing txt record"); + self.remove_record_inner(&record.id).await?; + } Ok(()) } @@ -110,33 +303,7 @@ impl Dns01Api for CloudflareClient { } async fn get_records(&self, domain: &str) -> Result> { - let client = Client::new(); - let url = format!("{}/zones/{}/dns_records", CLOUDFLARE_API_URL, self.zone_id); - - let response = client - .get(&url) - .header("Authorization", format!("Bearer {}", self.api_token)) - .send() - .await?; - - if !response.status().is_success() { - anyhow::bail!("failed to get dns records: {}", response.text().await?); - } - - #[derive(Deserialize, Debug)] - struct CloudflareResponse { - result: Vec, - } - - let response: CloudflareResponse = - response.json().await.context("failed to parse response")?; - - let records = response - .result - .into_iter() - .filter(|record| record.name == domain) - .collect(); - Ok(records) + self.get_records_inner(domain).await } } @@ -168,11 +335,13 @@ mod tests { } } - fn create_client() -> CloudflareClient { + async fn create_client() -> CloudflareClient { CloudflareClient::new( - std::env::var("CLOUDFLARE_ZONE_ID").expect("CLOUDFLARE_ZONE_ID not set"), std::env::var("CLOUDFLARE_API_TOKEN").expect("CLOUDFLARE_API_TOKEN not set"), + std::env::var("TEST_DOMAIN").expect("TEST_DOMAIN not set"), ) + .await + .unwrap() } fn random_subdomain() -> String { @@ -185,7 +354,7 @@ mod tests { #[tokio::test] async fn can_add_txt_record() { - let client = create_client(); + let client = create_client().await; let subdomain = random_subdomain(); println!("subdomain: {}", subdomain); let record_id = client @@ -202,7 +371,7 @@ mod tests { #[tokio::test] async fn can_remove_txt_record() { - let client = create_client(); + let client = create_client().await; let subdomain = random_subdomain(); println!("subdomain: {}", subdomain); let record_id = client @@ -219,7 +388,7 @@ mod tests { #[tokio::test] async fn can_add_caa_record() { - let client = create_client(); + let client = create_client().await; let subdomain = random_subdomain(); let record_id = client .add_caa_record(&subdomain, 0, "issue", "letsencrypt.org;") diff --git a/ct_monitor/src/main.rs b/ct_monitor/src/main.rs index a060ede3..6a168cce 100644 --- a/ct_monitor/src/main.rs +++ b/ct_monitor/src/main.rs @@ -49,7 +49,7 @@ impl Monitor { async fn refresh_known_keys(&mut self) -> Result<()> { info!("fetching known public keys from {}", self.gateway_uri); - let todo = "Use RA-TLS"; + // TODO: Use RA-TLS let tls_no_check = true; let rpc = GatewayClient::new(RaClient::new(self.gateway_uri.clone(), tls_no_check)?); let info = rpc.acme_info().await?; @@ -140,7 +140,8 @@ impl Monitor { fn validate_domain(domain: &str) -> Result<()> { let domain_regex = - Regex::new(r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$").unwrap(); + Regex::new(r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$") + .context("invalid regex")?; if !domain_regex.is_match(domain) { bail!("invalid domain name"); } diff --git a/docs/deployment.md b/docs/deployment.md index f2b7d017..e8466b52 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -104,7 +104,7 @@ GUEST_AGENT_ADDR=127.0.0.1:9205 ETH_RPC_URL=https://rpc.phala.network GIT_REV=HEAD OS_IMAGE=dstack-0.5.2 -IMAGE_DOWNLOAD_URL=https://files.kvin.wang/images/mr_{OS_IMAGE_HASH}.tar.gz +IMAGE_DOWNLOAD_URL=https://download.dstack.org/os-images/mr_{OS_IMAGE_HASH}.tar.gz ``` Then run the script again. @@ -165,7 +165,7 @@ After you get the `os_image_hash`, you can register it to the KMS whitelist by r ```bash cd dstack/kms/auth-eth -npx hardhat kms:add-image --network phala --mr +npx hardhat kms:add-image --network phala 0x ``` ### Register dstack-gateway in KMS @@ -192,6 +192,11 @@ Transaction hash: 0x46cf1959abf309fcde86bcab2518dcf28dd9eec70c74214f0562e7bf847c Deployed with 1 initial device and 0 initial compose hash ``` +Register the app ID to the kms as the gateway app ID: +```bash +npx hardhat kms:set-gateway --network phala 0x32467b43BFa67273FC7dDda0999Ee9A12F2AaA08 +``` + Now go to the `gateway/dstack-app/` directory and run the following command: ```bash cd ../../gateway/dstack-app/ @@ -206,7 +211,6 @@ VMM_RPC=unix:../../vmm-data/vmm.sock # Cloudflare API token for DNS challenge used to get the SSL certificate. CF_API_TOKEN=your_cloudflare_api_token -CF_ZONE_ID=your_zone_id # Service domain SRV_DOMAIN=test2.dstack.phala.network diff --git a/docs/dstack-gateway.md b/docs/dstack-gateway.md index d42aa687..be1e2cf4 100644 --- a/docs/dstack-gateway.md +++ b/docs/dstack-gateway.md @@ -12,17 +12,10 @@ Set up a second-level wildcard domain using Cloudflare; make sure to disable pro You need to get a Cloudflare API Key and ensure the API can manage this domain. -You can check your Cloudflare API key and get `cf_zone_id` using this command: - -```shell -curl -X GET "https://api.cloudflare.com/client/v4/zones" -H "Authorization: Bearer " -H "Content-Type: application/json" | jq . -``` - Open your `certbot.toml`, and update these fields: - `acme_url`: change to `https://acme-v02.api.letsencrypt.org/directory` - `cf_api_token`: Obtain from Cloudflare -- `cf_zone_id`: Obtain from the API call above ## Step 3: Run Certbot Manually and Get First SSL Certificates diff --git a/docs/security-guide/cvm-boundaries.md b/docs/security-guide/cvm-boundaries.md index 433dee40..b15c844c 100644 --- a/docs/security-guide/cvm-boundaries.md +++ b/docs/security-guide/cvm-boundaries.md @@ -23,23 +23,27 @@ dstack OS requires a host shared folder to be attached to the CVM. It copies the ### app-compose.json This is the main configuration file for the application in JSON format: -| Field | Type | Description | -|-------|------|-------------| -| manifest_version | integer | Schema version (currently defaults to "2") | -| name | string | Name of the instance | -| runner | string | Name of the runner (currently defaults to "docker-compose") | -| docker_compose_file | string | YAML string representing docker-compose config | -| docker_config | object | Additional docker settings (currently empty) | -| kms_enabled | boolean | Enable/disable KMS | -| gateway_enabled | boolean | Enable/disable gateway | -| public_logs | boolean | Whether logs are publicly visible | -| public_sysinfo | boolean | Whether system info is public | -| public_tcbinfo | boolean | Whether TCB info is public | -| local_key_provider_enabled | boolean | Use a local key provider | -| allowed_envs | array of string | List of allowed environment variable names | -| no_instance_id | boolean | Disable instance ID generation | -| secure_time | boolean | Whether secure time is enabled | -| pre_launch_script | string | Prelaunch bash script that runs before starting containers | +| Field | Since | Type | Description | +|-------|-------|------|-------------| +| manifest_version | 0.3.1 | integer | Schema version (currently defaults to "2") | +| name | 0.3.1 | string | Name of the instance | +| runner | 0.3.1 | string | Name of the runner (currently defaults to "docker-compose") | +| docker_compose_file | 0.3.1 | string | YAML string representing docker-compose config | +| docker_config | 0.3.1 | object | (Removed since 0.5.5) Additional docker settings (currently empty) | +| kms_enabled | 0.3.1 | boolean | Enable/disable KMS | +| gateway_enabled | 0.3.1 | boolean | Enable/disable gateway | +| local_key_provider_enabled | 0.3.1 | boolean | Use a local key provider | +| public_logs | 0.3.3 | boolean | Whether logs are publicly visible | +| public_sysinfo | 0.3.3 | boolean | Whether system info is public | +| public_tcbinfo | 0.5.1 | boolean | Whether TCB info is public | +| allowed_envs | 0.4.2 | array of string | List of allowed environment variable names | +| no_instance_id | 0.4.2 | boolean | Disable instance ID generation | +| secure_time | 0.5.0 | boolean | Whether secure time is enabled | +| pre_launch_script | 0.4.0 | string | Prelaunch bash script that runs before execute `docker compose up` | +| init_script | 0.5.5 | string | Bash script that executed prior to dockerd startup | +| storage_fs | 0.5.5 | string | Filesystem type for the data disk of the CVM. Supported values: "zfs", "ext4". default to "zfs". **ZFS:** Ensures filesystem integrity with built-in data protection features. **ext4:** Provides better performance for database applications with lower overhead and faster I/O operations, but no strong integrity protection. | +| swap_size | 0.5.5 | string/integer | The linux swap size. default to 0. Can be in byte or human-readable format (e.g., "1G", "256M"). | + The hash of this file content is extended to RTMR3 as event name `compose-hash`. Remote verifier can extract the compose-hash during remote attestation. diff --git a/docs/vmm-cli-user-guide.md b/docs/vmm-cli-user-guide.md index 49fde1b6..242d024b 100644 --- a/docs/vmm-cli-user-guide.md +++ b/docs/vmm-cli-user-guide.md @@ -503,6 +503,79 @@ The whitelist is stored in `~/.dstack-vmm/kms-whitelist.json`. ./vmm-cli.py update-user-config ./new-config.json ``` +#### Update Port Mapping + +Update port mappings for an existing VM: + +```bash +./vmm-cli.py update-ports --port tcp:8080:80 --port tcp:8443:443 +``` + +#### Update Multiple Aspects at Once + +Use the all-in-one `update` command to update multiple VM aspects in a single operation: + +```bash +# Update resources (requires VM to be stopped) +./vmm-cli.py update \ + --vcpu 4 \ + --memory 8G \ + --disk 100G \ + --image "dstack-0.5.4" + +# Update application configuration +./vmm-cli.py update \ + --compose ./new-docker-compose.yml \ + --prelaunch-script ./setup.sh \ + --swap 4G \ + --env-file ./new-secrets.env \ + --user-config ./new-config.json + +# Update networking and GPU +./vmm-cli.py update \ + --port tcp:8080:80 \ + --port tcp:8443:443 \ + --gpu "18:00.0" --gpu "2a:00.0" + +# Detach all GPUs from a VM +./vmm-cli.py update --no-gpus + +# Remove all port mappings from a VM +./vmm-cli.py update --no-ports + +# Update everything at once +./vmm-cli.py update \ + --vcpu 8 \ + --memory 16G \ + --disk 200G \ + --compose ./new-docker-compose.yml \ + --prelaunch-script ./init.sh \ + --swap 8G \ + --env-file ./new-secrets.env \ + --port tcp:8080:80 \ + --ppcie +``` + +**Available Options:** +- **Resource changes** (requires VM to be stopped): `--vcpu`, `--memory`, `--disk`, `--image` +- **Application updates**: `--compose` (docker-compose file), `--prelaunch-script`, `--swap`, `--env-file`, `--user-config` +- **Networking** (mutually exclusive): + - `--port ` (can be used multiple times) + - `--no-ports` (remove all port mappings) + - _No port flag: port configuration remains unchanged_ +- **GPU** (mutually exclusive): + - `--gpu ` (can be used multiple times for specific GPUs) + - `--ppcie` (attach all available GPUs) + - `--no-gpus` (detach all GPUs) + - _No GPU flag: GPU configuration remains unchanged_ +- **KMS**: `--kms-url` (for environment encryption) + +**Notes:** +- Resource changes (vCPU, memory, disk, image) require the VM to be stopped +- Application updates can be applied to running VMs +- Port and GPU options are mutually exclusive within their groups +- If no flag is specified for ports or GPUs, those configurations remain unchanged + ### Performance Optimization #### NUMA Pinning diff --git a/dstack-mr/Cargo.toml b/dstack-mr/Cargo.toml index f695d6b6..32a96f96 100644 --- a/dstack-mr/Cargo.toml +++ b/dstack-mr/Cargo.toml @@ -24,3 +24,10 @@ hex-literal.workspace = true fs-err.workspace = true bon.workspace = true log.workspace = true +scale.workspace = true + +[dev-dependencies] +dstack-types.workspace = true +reqwest = { version = "0.12", default-features = false, features = ["blocking", "rustls-tls"] } +flate2 = "1.0" +tar = "0.4" diff --git a/dstack-mr/cli/Cargo.toml b/dstack-mr/cli/Cargo.toml index 4b890b3c..336b0a79 100644 --- a/dstack-mr/cli/Cargo.toml +++ b/dstack-mr/cli/Cargo.toml @@ -21,3 +21,4 @@ dstack-types.workspace = true fs-err.workspace = true serde_json = { workspace = true, features = ["alloc"] } tracing-subscriber.workspace = true +size-parser.workspace = true diff --git a/dstack-mr/cli/src/main.rs b/dstack-mr/cli/src/main.rs index 74acc07a..f0fc2596 100644 --- a/dstack-mr/cli/src/main.rs +++ b/dstack-mr/cli/src/main.rs @@ -2,11 +2,12 @@ // // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Context, Result, anyhow}; +use anyhow::{Context, Result}; use clap::{Parser, Subcommand}; use dstack_mr::Machine; use dstack_types::ImageInfo; use fs_err as fs; +use size_parser::parse_memory_size; use std::path::PathBuf; #[derive(Parser)] @@ -117,7 +118,7 @@ fn main() -> Result<()> { .context("Failed to measure machine configuration")?; if config.json { - println!("{}", serde_json::to_string_pretty(&measurements).unwrap()); + println!("{}", serde_json::to_string_pretty(&measurements)?); } else { println!("Machine measurements:"); println!("MRTD: {}", hex::encode(measurements.mrtd)); @@ -130,32 +131,3 @@ fn main() -> Result<()> { Ok(()) } - -/// Parse a memory size value that can be decimal or hexadecimal (with 0x prefix) -fn parse_memory_size(s: &str) -> Result { - let s = s.trim(); - - if s.is_empty() { - return Err(anyhow!("Empty memory size")); - } - if s.starts_with("0x") || s.starts_with("0X") { - let hex_str = &s[2..]; - return u64::from_str_radix(hex_str, 16) - .map_err(|e| anyhow!("Invalid hexadecimal value: {}", e)); - } - - if s.chars().all(|c| c.is_ascii_digit()) { - return Ok(s.parse::()?); - } - let len = s.len(); - let (num_part, suffix) = match s.chars().last().unwrap() { - 'k' | 'K' => (&s[0..len - 1], 1024u64), - 'm' | 'M' => (&s[0..len - 1], 1024u64 * 1024), - 'g' | 'G' => (&s[0..len - 1], 1024u64 * 1024 * 1024), - 't' | 'T' => (&s[0..len - 1], 1024u64 * 1024 * 1024 * 1024), - _ => return Err(anyhow!("Unknown memory size suffix")), - }; - - let num = num_part.parse::()?; - Ok(num * suffix) -} diff --git a/dstack-mr/src/acpi.rs b/dstack-mr/src/acpi.rs index b79a6301..5976c10f 100644 --- a/dstack-mr/src/acpi.rs +++ b/dstack-mr/src/acpi.rs @@ -7,12 +7,14 @@ use anyhow::{bail, Context, Result}; use log::debug; +use scale::Decode; use crate::Machine; const LDR_LENGTH: usize = 4096; const FIXED_STRING_LEN: usize = 56; +#[derive(Debug, Clone)] pub struct Tables { pub tables: Vec, pub rsdp: Vec, @@ -85,7 +87,12 @@ impl Machine<'_> { } else { machine.push_str(",smm=off"); } - if self.pic { + + let vopt = self + .versioned_options() + .context("Failed to get versioned options")?; + + if vopt.pic { machine.push_str(",pic=on"); } else { machine.push_str(",pic=off"); @@ -148,8 +155,13 @@ impl Machine<'_> { debug!("qemu command: {cmd:?}"); + let ver = vopt.version; // Execute the command and capture output let output = cmd + .env( + "QEMU_ACPI_COMPAT_VER", + format!("{}.{}.{}", ver.0, ver.1, ver.2), + ) .output() .context("failed to execute dstack-acpi-tables")?; @@ -381,6 +393,13 @@ fn qemu_loader_append(data: &mut Vec, cmd: LoaderCmd) { } } +/// ACPI table header (first 8 bytes of every ACPI table) +#[derive(Debug, Decode)] +struct AcpiTableHeader { + signature: [u8; 4], + length: u32, +} + /// Searches for an ACPI table with the given signature and returns its offset, /// checksum offset, and length. fn find_acpi_table(tables: &[u8], signature: &str) -> Result<(u32, u32, u32)> { @@ -396,22 +415,21 @@ fn find_acpi_table(tables: &[u8], signature: &str) -> Result<(u32, u32, u32)> { bail!("Table not found: {signature}"); } - let tbl_sig = &tables[offset..offset + 4]; - let tbl_len_bytes: [u8; 4] = tables[offset + 4..offset + 8].try_into().unwrap(); - let tbl_len = u32::from_le_bytes(tbl_len_bytes) as usize; + let header = AcpiTableHeader::decode(&mut &tables[offset..]) + .context("failed to decode ACPI table header")?; - if tbl_sig == sig_bytes { + if header.signature == sig_bytes { // Found the table - return Ok((offset as u32, (offset + 9) as u32, tbl_len as u32)); + return Ok((offset as u32, (offset + 9) as u32, header.length)); } - if tbl_len == 0 { + if header.length == 0 { // Invalid table length, stop searching - bail!("Found table with zero length at offset {offset}"); + bail!("found table with zero length at offset {offset}"); } // Move to the next table - offset += tbl_len; + offset += header.length as usize; } - bail!("Table not found: {signature}"); + bail!("table not found: {signature}"); } diff --git a/dstack-mr/src/kernel.rs b/dstack-mr/src/kernel.rs index 1f714ee0..878a2b01 100644 --- a/dstack-mr/src/kernel.rs +++ b/dstack-mr/src/kernel.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -use crate::{measure_log, measure_sha384, num::read_le, utf16_encode, util::debug_print_log}; +use crate::{measure_sha384, num::read_le, utf16_encode}; use anyhow::{bail, Context, Result}; use object::pe; use sha2::{Digest, Sha384}; @@ -99,7 +99,7 @@ fn authenticode_sha384_hash(data: &[u8]) -> Result> { let trailing_data_len = file_size - sum_of_bytes_hashed; if trailing_data_len > cert_table_size { - let hashed_trailing_len = trailing_data_len - cert_table_size; + let hashed_trailing_len = trailing_data_len.saturating_sub(cert_table_size); let trailing_start = sum_of_bytes_hashed; if trailing_start + hashed_trailing_len <= data.len() { @@ -129,7 +129,7 @@ fn patch_kernel( let mut kd = kernel_data.to_vec(); - let protocol = u16::from_le_bytes(kd[0x206..0x208].try_into().unwrap()); + let protocol = u16::from_le_bytes(kd[0x206..0x208].try_into().context("impossible failure")?); let (real_addr, cmdline_addr) = if protocol < 0x200 || (kd[0x211] & 0x01) == 0 { (0x90000_u32, 0x9a000_u32) @@ -142,14 +142,14 @@ fn patch_kernel( } if protocol >= 0x201 { kd[0x211] |= 0x80; // loadflags |= CAN_USE_HEAP - let heap_end_ptr = cmdline_addr - real_addr - 0x200; + let heap_end_ptr = cmdline_addr.saturating_sub(real_addr).saturating_sub(0x200); kd[0x224..0x228].copy_from_slice(&heap_end_ptr.to_le_bytes()); } if protocol >= 0x202 { kd[0x228..0x22C].copy_from_slice(&cmdline_addr.to_le_bytes()); } else { kd[0x20..0x22].copy_from_slice(&0xa33f_u16.to_le_bytes()); - let offset = (cmdline_addr - real_addr) as u16; + let offset = cmdline_addr.saturating_sub(real_addr) as u16; kd[0x22..0x24].copy_from_slice(&offset.to_le_bytes()); } @@ -158,14 +158,16 @@ fn patch_kernel( bail!("the kernel image is too old for ramdisk"); } let mut initrd_max = if protocol >= 0x20c { - let xlf = u16::from_le_bytes(kd[0x236..0x238].try_into().unwrap()); + let xlf = + u16::from_le_bytes(kd[0x236..0x238].try_into().context("impossible failure")?); if (xlf & 0x40) != 0 { u32::MAX } else { 0x37ffffff } } else if protocol >= 0x203 { - let max = u32::from_le_bytes(kd[0x22c..0x230].try_into().unwrap()); + let max = + u32::from_le_bytes(kd[0x22c..0x230].try_into().context("impossible failure")?); if max == 0 { 0x37ffffff } else { @@ -186,14 +188,23 @@ fn patch_kernel( mem_size as u32 }; - if initrd_max >= below_4g_mem_size - acpi_data_size { - initrd_max = below_4g_mem_size - acpi_data_size - 1; + if let Some(available_mem) = below_4g_mem_size.checked_sub(acpi_data_size) { + if initrd_max >= available_mem { + initrd_max = available_mem.saturating_sub(1); + } + } else { + // If acpi_data_size >= below_4g_mem_size, we have no memory available + bail!( + "ACPI data size ({}) exceeds available memory ({})", + acpi_data_size, + below_4g_mem_size + ); } if initrd_size >= initrd_max { bail!("initrd is too large"); } - let initrd_addr = (initrd_max - initrd_size) & !4095; + let initrd_addr = initrd_max.saturating_sub(initrd_size) & !4095; kd[0x218..0x21C].copy_from_slice(&initrd_addr.to_le_bytes()); kd[0x21C..0x220].copy_from_slice(&initrd_size.to_le_bytes()); } @@ -201,24 +212,22 @@ fn patch_kernel( } /// Measures a QEMU-patched TDX kernel image. -pub(crate) fn measure_kernel( +pub(crate) fn rtmr1_log( kernel_data: &[u8], initrd_size: u32, mem_size: u64, acpi_data_size: u32, -) -> Result> { +) -> Result>> { let kd = patch_kernel(kernel_data, initrd_size, mem_size, acpi_data_size) .context("Failed to patch kernel")?; let kernel_hash = authenticode_sha384_hash(&kd).context("Failed to compute kernel hash")?; - let rtmr1_log = vec![ + Ok(vec![ kernel_hash, measure_sha384(b"Calling EFI Application from Boot Option"), measure_sha384(&[0x00, 0x00, 0x00, 0x00]), // Separator measure_sha384(b"Exit Boot Services Invocation"), measure_sha384(b"Exit Boot Services Returned with Success"), - ]; - debug_print_log("RTMR1", &rtmr1_log); - Ok(measure_log(&rtmr1_log)) + ]) } /// Measures the kernel command line by converting to UTF-16LE and hashing. diff --git a/dstack-mr/src/lib.rs b/dstack-mr/src/lib.rs index 936e283f..a8d5825e 100644 --- a/dstack-mr/src/lib.rs +++ b/dstack-mr/src/lib.rs @@ -5,10 +5,13 @@ use serde::{Deserialize, Serialize}; use serde_human_bytes as hex_bytes; -pub use machine::Machine; +pub use machine::{Machine, TdxMeasurementDetails}; use util::{measure_log, measure_sha384, utf16_encode}; +pub type RtmrLog = Vec>; +pub type RtmrLogs = [RtmrLog; 3]; + mod acpi; mod kernel; mod machine; diff --git a/dstack-mr/src/machine.rs b/dstack-mr/src/machine.rs index 27a63a0f..c08e6cdf 100644 --- a/dstack-mr/src/machine.rs +++ b/dstack-mr/src/machine.rs @@ -2,11 +2,12 @@ // // SPDX-License-Identifier: Apache-2.0 +use crate::acpi::Tables; use crate::tdvf::Tdvf; use crate::util::debug_print_log; -use crate::{kernel, TdxMeasurements}; +use crate::{kernel, RtmrLogs, TdxMeasurements}; use crate::{measure_log, measure_sha384}; -use anyhow::{Context, Result}; +use anyhow::{bail, Context, Result}; use fs_err as fs; use log::debug; @@ -18,8 +19,9 @@ pub struct Machine<'a> { pub kernel: &'a str, pub initrd: &'a str, pub kernel_cmdline: &'a str, - pub two_pass_add_pages: bool, - pub pic: bool, + pub two_pass_add_pages: Option, + pub pic: Option, + pub qemu_version: Option, #[builder(default = false)] pub smm: bool, pub pci_hole64_size: Option, @@ -30,21 +32,88 @@ pub struct Machine<'a> { pub root_verity: bool, } +fn parse_version_tuple(v: &str) -> Result<(u32, u32, u32)> { + let parts: Vec = v + .split('.') + .map(|p| p.parse::().context("Invalid version number")) + .collect::, _>>()?; + if parts.len() != 3 { + bail!( + "Version string must have exactly 3 parts (major.minor.patch), got {}", + parts.len() + ); + } + Ok((parts[0], parts[1], parts[2])) +} + +impl Machine<'_> { + pub fn versioned_options(&self) -> Result { + let version = match &self.qemu_version { + Some(v) => Some(parse_version_tuple(v).context("Failed to parse QEMU version")?), + None => None, + }; + let default_pic; + let default_two_pass; + let version = version.unwrap_or((9, 1, 0)); + if version < (8, 0, 0) { + bail!("Unsupported QEMU version: {version:?}"); + } + if ((8, 0, 0)..(9, 0, 0)).contains(&version) { + default_pic = true; + default_two_pass = true; + } else { + default_pic = false; + default_two_pass = false; + }; + Ok(VersionedOptions { + version, + pic: self.pic.unwrap_or(default_pic), + two_pass_add_pages: self.two_pass_add_pages.unwrap_or(default_two_pass), + }) + } +} + +pub struct VersionedOptions { + pub version: (u32, u32, u32), + pub pic: bool, + pub two_pass_add_pages: bool, +} + +#[derive(Debug, Clone)] +pub struct TdxMeasurementDetails { + pub measurements: TdxMeasurements, + pub rtmr_logs: RtmrLogs, + pub acpi_tables: Tables, +} + impl Machine<'_> { pub fn measure(&self) -> Result { + self.measure_with_logs().map(|details| details.measurements) + } + + pub fn measure_with_logs(&self) -> Result { debug!("measuring machine: {self:#?}"); let fw_data = fs::read(self.firmware)?; let kernel_data = fs::read(self.kernel)?; let initrd_data = fs::read(self.initrd)?; let tdvf = Tdvf::parse(&fw_data).context("Failed to parse TDVF metadata")?; + let mrtd = tdvf.mrtd(self).context("Failed to compute MR TD")?; - let rtmr0 = tdvf.rtmr0(self).context("Failed to compute RTMR0")?; - let rtmr1 = kernel::measure_kernel( + + let (rtmr0_log, acpi_tables) = tdvf + .rtmr0_log(self) + .context("Failed to compute RTMR0 log")?; + debug_print_log("RTMR0", &rtmr0_log); + let rtmr0 = measure_log(&rtmr0_log); + + let rtmr1_log = kernel::rtmr1_log( &kernel_data, initrd_data.len() as u32, self.memory_size, 0x28000, )?; + debug_print_log("RTMR1", &rtmr1_log); + let rtmr1 = measure_log(&rtmr1_log); let rtmr2_log = vec![ kernel::measure_cmdline(self.kernel_cmdline), @@ -53,11 +122,15 @@ impl Machine<'_> { debug_print_log("RTMR2", &rtmr2_log); let rtmr2 = measure_log(&rtmr2_log); - Ok(TdxMeasurements { - mrtd, - rtmr0, - rtmr1, - rtmr2, + Ok(TdxMeasurementDetails { + measurements: TdxMeasurements { + mrtd, + rtmr0, + rtmr1, + rtmr2, + }, + rtmr_logs: [rtmr0_log, rtmr1_log, rtmr2_log], + acpi_tables, }) } } diff --git a/dstack-mr/src/tdvf.rs b/dstack-mr/src/tdvf.rs index e04edcb3..b41dee76 100644 --- a/dstack-mr/src/tdvf.rs +++ b/dstack-mr/src/tdvf.rs @@ -4,11 +4,12 @@ use anyhow::{anyhow, bail, Context, Result}; use hex_literal::hex; +use scale::Decode; use sha2::{Digest, Sha384}; +use crate::acpi::Tables; use crate::num::read_le; -use crate::util::debug_print_log; -use crate::{measure_log, measure_sha384, utf16_encode, Machine}; +use crate::{measure_log, measure_sha384, utf16_encode, Machine, RtmrLog}; const PAGE_SIZE: u64 = 0x1000; const MR_EXTEND_GRANULARITY: usize = 0x100; @@ -24,7 +25,13 @@ pub enum PageAddOrder { SinglePass, } -#[derive(Debug)] +/// Helper to decode little-endian integers from byte slice using scale codec +fn decode_le(data: &[u8], context: &str) -> Result { + T::decode(&mut &data[..]) + .with_context(|| format!("failed to decode {} as little-endian", context)) +} + +#[derive(Debug, Decode)] struct TdvfSection { data_offset: u32, raw_data_size: u32, @@ -34,6 +41,14 @@ struct TdvfSection { attributes: u32, } +#[derive(Debug, Decode)] +struct TdvfDescriptor { + signature: [u8; 4], // "TDVF" + _length: u32, + version: u32, + num_sections: u32, +} + #[derive(Debug)] pub(crate) struct Tdvf<'a> { fw: &'a [u8], @@ -77,25 +92,39 @@ fn measure_tdx_efi_variable(vendor_guid: &str, var_name: &str) -> Result } impl<'a> Tdvf<'a> { + /// Parse TDVF firmware metadata + /// + /// This function uses scale codec for clean, panic-free parsing. + /// Correctness is verified by integration test in tests/tdvf_parse.rs + /// which ensures identical measurements to the original implementation. pub fn parse(fw: &'a [u8]) -> Result> { const TDX_METADATA_OFFSET_GUID: &str = "e47a6535-984a-4798-865e-4685a7bf8ec2"; const TABLE_FOOTER_GUID: &str = "96b582de-1fb2-45f7-baea-a366c55a082d"; const BYTES_AFTER_TABLE_FOOTER: usize = 32; + if fw.len() < BYTES_AFTER_TABLE_FOOTER { + bail!("TDVF firmware too small"); + } let offset = fw.len() - BYTES_AFTER_TABLE_FOOTER; let encoded_footer_guid = encode_guid(TABLE_FOOTER_GUID)?; + if offset < 16 { + bail!("TDVF firmware offset too small for GUID"); + } let guid = &fw[offset - 16..offset]; if guid != encoded_footer_guid { bail!("Failed to parse TDVF metadata: Invalid footer GUID"); } - let tables_len = - u16::from_le_bytes(fw[offset - 18..offset - 16].try_into().unwrap()) as usize; - if tables_len == 0 || tables_len > offset - 18 { + if offset < 18 { + bail!("TDVF firmware offset too small for tables length"); + } + let tables_len = decode_le::(&fw[offset - 18..offset - 16], "tables length")? as usize; + if tables_len == 0 || tables_len > offset.saturating_sub(18) { bail!("Failed to parse TDVF metadata: Invalid tables length"); } - let tables = &fw[offset - 18 - tables_len..offset - 18]; + let table_start = offset.saturating_sub(18).saturating_sub(tables_len); + let tables = &fw[table_start..offset - 18]; let mut offset = tables.len(); let mut data: Option<&[u8]> = None; @@ -106,47 +135,51 @@ impl<'a> Tdvf<'a> { } let guid = &tables[offset - 16..offset]; let entry_len = read_le::(tables, offset - 18, "entry length")? as usize; - if entry_len > offset - 18 { + if entry_len > offset.saturating_sub(18) { bail!("Failed to parse TDVF metadata: Invalid entry length"); } if guid == encoded_guid { - data = Some(&tables[offset - 18 - entry_len..offset - 18]); + let entry_start = offset.saturating_sub(18).saturating_sub(entry_len); + data = Some(&tables[entry_start..offset - 18]); break; } - offset -= entry_len; + offset = offset.saturating_sub(entry_len); } let data = data.context("Failed to parse TDVF metadata: Missing TDVF metadata")?; - let tdvf_meta_offset = - u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as usize; - let tdvf_meta_offset = fw.len() - tdvf_meta_offset; - let tdvf_meta_desc = &fw[tdvf_meta_offset..tdvf_meta_offset + 16]; + if data.len() < 4 { + bail!("TDVF metadata data too small"); + } + let tdvf_meta_offset_raw = + decode_le::(&data[data.len() - 4..], "TDVF metadata offset")? as usize; + if tdvf_meta_offset_raw > fw.len() { + bail!("TDVF metadata offset exceeds firmware size"); + } + let tdvf_meta_offset = fw.len() - tdvf_meta_offset_raw; + + // Decode TDVF descriptor using scale codec + let descriptor = TdvfDescriptor::decode(&mut &fw[tdvf_meta_offset..]) + .context("failed to decode TDVF descriptor")?; - if &tdvf_meta_desc[..4] != b"TDVF" { + if &descriptor.signature != b"TDVF" { bail!("Failed to parse TDVF metadata: Invalid TDVF descriptor"); } - let tdvf_version = u32::from_le_bytes(tdvf_meta_desc[8..12].try_into().unwrap()); - if tdvf_version != 1 { + if descriptor.version != 1 { bail!("Failed to parse TDVF metadata: Unsupported TDVF version"); } - let num_sections = u32::from_le_bytes(tdvf_meta_desc[12..16].try_into().unwrap()) as usize; + let num_sections = descriptor.num_sections as usize; let mut meta = Tdvf { fw, sections: Vec::new(), }; + + // Decode all sections using scale codec for i in 0..num_sections { let sec_offset = tdvf_meta_offset + 16 + 32 * i; - let sec_data = &fw[sec_offset..sec_offset + 32]; - let s = TdvfSection { - data_offset: u32::from_le_bytes(sec_data[0..4].try_into().unwrap()), - raw_data_size: u32::from_le_bytes(sec_data[4..8].try_into().unwrap()), - memory_address: u64::from_le_bytes(sec_data[8..16].try_into().unwrap()), - memory_data_size: u64::from_le_bytes(sec_data[16..24].try_into().unwrap()), - sec_type: u32::from_le_bytes(sec_data[24..28].try_into().unwrap()), - attributes: u32::from_le_bytes(sec_data[28..32].try_into().unwrap()), - }; + let s = TdvfSection::decode(&mut &fw[sec_offset..]) + .with_context(|| format!("failed to decode TDVF section {}", i))?; if s.memory_address % PAGE_SIZE != 0 { bail!("Failed to parse TDVF metadata: Section memory address not aligned"); @@ -223,14 +256,23 @@ impl<'a> Tdvf<'a> { } pub fn mrtd(&self, machine: &Machine) -> Result> { - self.compute_mrtd(if machine.two_pass_add_pages { + let opts = machine + .versioned_options() + .context("Failed to get versioned options")?; + self.compute_mrtd(if opts.two_pass_add_pages { PageAddOrder::TwoPass } else { PageAddOrder::SinglePass }) } + #[allow(dead_code)] pub fn rtmr0(&self, machine: &Machine) -> Result> { + let (rtmr0_log, _) = self.rtmr0_log(machine)?; + Ok(measure_log(&rtmr0_log)) + } + + pub fn rtmr0_log(&self, machine: &Machine) -> Result<(RtmrLog, Tables)> { let td_hob_hash = self.measure_td_hob(machine.memory_size)?; let cfv_image_hash = hex!("344BC51C980BA621AAA00DA3ED7436F7D6E549197DFE699515DFA2C6583D95E6412AF21C097D473155875FFD561D6790"); let boot000_hash = hex!("23ADA07F5261F12F34A0BD8E46760962D6B4D576A416F1FEA1C64BC656B1D28EACF7047AE6E967C58FD2A98BFA74C298"); @@ -242,23 +284,24 @@ impl<'a> Tdvf<'a> { // RTMR0 calculation - let rtmr0_log = vec![ - td_hob_hash, - cfv_image_hash.to_vec(), - measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "SecureBoot")?, - measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "PK")?, - measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "KEK")?, - measure_tdx_efi_variable("D719B2CB-3D3A-4596-A3BC-DAD00E67656F", "db")?, - measure_tdx_efi_variable("D719B2CB-3D3A-4596-A3BC-DAD00E67656F", "dbx")?, - measure_sha384(&[0x00, 0x00, 0x00, 0x00]), // Separator - acpi_loader_hash, - acpi_rsdp_hash, - acpi_tables_hash, - measure_sha384(&[0x00, 0x00]), // BootOrder - boot000_hash.to_vec(), - ]; - debug_print_log("RTMR0", &rtmr0_log); - Ok(measure_log(&rtmr0_log)) + Ok(( + vec![ + td_hob_hash, + cfv_image_hash.to_vec(), + measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "SecureBoot")?, + measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "PK")?, + measure_tdx_efi_variable("8BE4DF61-93CA-11D2-AA0D-00E098032B8C", "KEK")?, + measure_tdx_efi_variable("D719B2CB-3D3A-4596-A3BC-DAD00E67656F", "db")?, + measure_tdx_efi_variable("D719B2CB-3D3A-4596-A3BC-DAD00E67656F", "dbx")?, + measure_sha384(&[0x00, 0x00, 0x00, 0x00]), // Separator + acpi_loader_hash, + acpi_rsdp_hash, + acpi_tables_hash, + measure_sha384(&[0x00, 0x00]), // BootOrder + boot000_hash.to_vec(), + ], + tables, + )) } fn measure_td_hob(&self, memory_size: u64) -> Result> { @@ -298,19 +341,30 @@ impl<'a> Tdvf<'a> { td_hob.extend_from_slice(&length.to_le_bytes()); }; - let (_, last_start, last_end) = memory_acceptor.ranges.pop().expect("No ranges"); + let (_, last_start, last_end) = memory_acceptor.ranges.pop().context("No ranges")?; for (accepted, start, end) in memory_acceptor.ranges { + if end < start { + bail!("Invalid memory range: end < start"); + } + let size = end - start; if accepted { - add_memory_resource_hob(0x00, start, end - start); + add_memory_resource_hob(0x00, start, size); } else { - add_memory_resource_hob(0x07, start, end - start); + add_memory_resource_hob(0x07, start, size); } } + if last_end < last_start { + bail!("Invalid last memory range: end < start"); + } if memory_size >= 0xB0000000 { - add_memory_resource_hob(0x07, last_start, 0x80000000u64 - last_start); - add_memory_resource_hob(0x07, 0x100000000, last_end - 0x80000000u64); + if last_start < 0x80000000u64 { + add_memory_resource_hob(0x07, last_start, 0x80000000u64 - last_start); + } + if last_end > 0x80000000u64 { + add_memory_resource_hob(0x07, 0x100000000, last_end - 0x80000000u64); + } } else { add_memory_resource_hob(0x07, last_start, last_end - last_start); } diff --git a/dstack-mr/tests/tdvf_parse.rs b/dstack-mr/tests/tdvf_parse.rs new file mode 100644 index 00000000..6c7e9382 --- /dev/null +++ b/dstack-mr/tests/tdvf_parse.rs @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +//! Integration test to verify TDVF firmware parsing correctness +//! +//! This test ensures that the scale codec-based parsing produces +//! identical measurements to the original implementation. +//! +//! The test downloads a real dstack release from GitHub and verifies +//! that the measurements remain consistent with the baseline. + +use anyhow::{Context, Result}; +use dstack_mr::Machine; +use std::path::PathBuf; + +// dstack release to download for testing +const DSTACK_VERSION: &str = "v0.5.5"; +const DSTACK_RELEASE_URL: &str = + "https://github.com/Dstack-TEE/meta-dstack/releases/download/v0.5.5/dstack-0.5.5.tar.gz"; + +// Expected measurements from baseline (verified with original implementation) +// These are the measurements for dstack v0.5.5 with default configuration +// Generated with: dstack-mr measure /path/to/dstack-0.5.5/metadata.json --json +const EXPECTED_MRTD: &str = "f06dfda6dce1cf904d4e2bab1dc370634cf95cefa2ceb2de2eee127c9382698090d7a4a13e14c536ec6c9c3c8fa87077"; +const EXPECTED_RTMR0: &str = "68102e7b524af310f7b7d426ce75481e36c40f5d513a9009c046e9d37e31551f0134d954b496a3357fd61d03f07ffe96"; +const EXPECTED_RTMR1: &str = "daa9380dc33b14728a9adb222437cf14db2d40ffc4d7061d8f3c329f6c6b339f71486d33521287e8faeae22301f4d815"; +const EXPECTED_RTMR2: &str = "1c41080c9c74be158e55b92f2958129fc1265647324c4a0dc403292cfa41d4c529f39093900347a11c8c1b82ed8c5edf"; + +/// Download and extract dstack release tarball if not already cached +fn get_test_image_dir() -> Result { + let cache_dir = std::env::temp_dir().join("dstack-mr-test-cache"); + let version_dir = cache_dir.join(DSTACK_VERSION); + let image_dir = version_dir.join("dstack-0.5.5"); + let metadata_path = image_dir.join("metadata.json"); + + // Return cached version if it exists + if metadata_path.exists() { + return Ok(image_dir); + } + + eprintln!("Downloading dstack {DSTACK_VERSION} release for testing...",); + std::fs::create_dir_all(&version_dir)?; + + // Download tarball + let tarball_path = version_dir.join("dstack.tar.gz"); + let response = + reqwest::blocking::get(DSTACK_RELEASE_URL).context("failed to download dstack release")?; + + if !response.status().is_success() { + anyhow::bail!("failed to download: HTTP {}", response.status()); + } + + let bytes = response.bytes().context("failed to read response")?; + std::fs::write(&tarball_path, bytes).context("failed to write tarball")?; + + eprintln!("Extracting tarball..."); + + // Extract tarball + let tarball = std::fs::File::open(&tarball_path)?; + let decoder = flate2::read::GzDecoder::new(tarball); + let mut archive = tar::Archive::new(decoder); + archive + .unpack(&version_dir) + .context("failed to extract tarball")?; + + // Verify extraction + if !metadata_path.exists() { + anyhow::bail!("metadata.json not found after extraction"); + } + + eprintln!("Test image ready at: {}", image_dir.display()); + + Ok(image_dir) +} + +#[test] +#[ignore] // Run with: cargo test --release -- --ignored +fn test_tdvf_parse_produces_correct_measurements() -> Result<()> { + // Get or download test image + let image_dir = get_test_image_dir()?; + let metadata_path = image_dir.join("metadata.json"); + + let metadata = std::fs::read_to_string(&metadata_path) + .with_context(|| format!("failed to read {}", metadata_path.display()))?; + let image_info: dstack_types::ImageInfo = serde_json::from_str(&metadata)?; + + let firmware_path = image_dir.join(&image_info.bios).display().to_string(); + let kernel_path = image_dir.join(&image_info.kernel).display().to_string(); + let initrd_path = image_dir.join(&image_info.initrd).display().to_string(); + let cmdline = image_info.cmdline + " initrd=initrd"; + + eprintln!("Building machine configuration..."); + let machine = Machine::builder() + .cpu_count(1) + .memory_size(2 * 1024 * 1024 * 1024) // 2GB + .firmware(&firmware_path) + .kernel(&kernel_path) + .initrd(&initrd_path) + .kernel_cmdline(&cmdline) + .two_pass_add_pages(true) + .pic(true) + .smm(false) + .hugepages(false) + .num_gpus(0) + .num_nvswitches(0) + .hotplug_off(false) + .root_verity(true) + .build(); + + eprintln!("Computing measurements (this parses TDVF firmware)..."); + let measurements = machine.measure()?; + + eprintln!("Verifying measurements against baseline..."); + + // Verify measurements match expected values + assert_eq!( + hex::encode(&measurements.mrtd), + EXPECTED_MRTD, + "MRTD mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr0), + EXPECTED_RTMR0, + "RTMR0 mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr1), + EXPECTED_RTMR1, + "RTMR1 mismatch - TDVF parsing may have regressed" + ); + assert_eq!( + hex::encode(&measurements.rtmr2), + EXPECTED_RTMR2, + "RTMR2 mismatch - TDVF parsing may have regressed" + ); + + eprintln!("✅ All measurements match baseline - TDVF parsing is correct!"); + + Ok(()) +} diff --git a/dstack-types/Cargo.toml b/dstack-types/Cargo.toml index ca3f22cd..d0b0ae64 100644 --- a/dstack-types/Cargo.toml +++ b/dstack-types/Cargo.toml @@ -13,3 +13,4 @@ license.workspace = true serde = { workspace = true, features = ["derive"] } serde-human-bytes.workspace = true sha3.workspace = true +size-parser = { workspace = true, features = ["serde"] } diff --git a/dstack-types/src/lib.rs b/dstack-types/src/lib.rs index 15183248..1188a1f0 100644 --- a/dstack-types/src/lib.rs +++ b/dstack-types/src/lib.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use serde_human_bytes as hex_bytes; +use size_parser::human_size; #[derive(Deserialize, Serialize, Debug, Clone)] pub struct AppCompose { @@ -16,8 +17,6 @@ pub struct AppCompose { #[serde(default)] pub docker_compose_file: Option, #[serde(default)] - pub docker_config: DockerConfig, - #[serde(default)] pub public_logs: bool, #[serde(default)] pub public_sysinfo: bool, @@ -39,6 +38,10 @@ pub struct AppCompose { pub no_instance_id: bool, #[serde(default = "default_true")] pub secure_time: bool, + #[serde(default)] + pub storage_fs: Option, + #[serde(default, with = "human_size")] + pub swap_size: u64, } fn default_true() -> bool { @@ -138,10 +141,12 @@ pub struct VmConfig { pub cpu_count: u32, pub memory_size: u64, // https://github.com/intel-staging/qemu-tdx/issues/1 - #[serde(default)] - pub qemu_single_pass_add_pages: bool, - #[serde(default)] - pub pic: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub qemu_single_pass_add_pages: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pic: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub qemu_version: Option, #[serde(default)] pub pci_hole64_size: u64, #[serde(default)] @@ -152,6 +157,8 @@ pub struct VmConfig { pub num_nvswitches: u32, #[serde(default)] pub hotplug_off: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub image: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -183,6 +190,8 @@ pub enum KeyProvider { url: String, #[serde(with = "hex_bytes")] pubkey: Vec, + tmp_ca_key: String, + tmp_ca_cert: String, }, } diff --git a/dstack-util/src/main.rs b/dstack-util/src/main.rs index 62dbbc3e..f5f2f730 100644 --- a/dstack-util/src/main.rs +++ b/dstack-util/src/main.rs @@ -24,7 +24,7 @@ use std::{ io::{self, Read, Write}, path::PathBuf, }; -use system_setup::{cmd_sys_setup, SetupArgs}; +use system_setup::{cmd_gateway_refresh, cmd_sys_setup, GatewayRefreshArgs, SetupArgs}; use tdx_attest as att; use utils::AppKeys; @@ -64,6 +64,8 @@ enum Commands { Rand(RandArgs), /// Prepare dstack system. Setup(SetupArgs), + /// Refresh the dstack gateway configuration + GatewayRefresh(GatewayRefreshArgs), /// Notify the host about the dstack app NotifyHost(HostNotifyArgs), /// Remove orphaned containers @@ -533,6 +535,9 @@ async fn main() -> Result<()> { Commands::Setup(args) => { cmd_sys_setup(args).await?; } + Commands::GatewayRefresh(args) => { + cmd_gateway_refresh(args).await?; + } Commands::NotifyHost(args) => { cmd_notify_host(args).await?; } diff --git a/dstack-util/src/system_setup.rs b/dstack-util/src/system_setup.rs index 0470b1de..6e568702 100644 --- a/dstack-util/src/system_setup.rs +++ b/dstack-util/src/system_setup.rs @@ -4,8 +4,12 @@ use std::{ collections::{BTreeMap, BTreeSet}, + fmt::Display, ops::Deref, path::{Path, PathBuf}, + process::Command, + str::FromStr, + time::Duration, }; use anyhow::{anyhow, bail, Context, Result}; @@ -67,6 +71,14 @@ pub struct SetupArgs { mount_point: PathBuf, } +#[derive(clap::Parser)] +/// Refresh dstack gateway configuration +pub struct GatewayRefreshArgs { + /// dstack work directory + #[arg(long)] + work_dir: PathBuf, +} + #[derive(Deserialize, Serialize, Clone, Default)] struct InstanceInfo { #[serde(with = "hex_bytes", default)] @@ -77,6 +89,67 @@ struct InstanceInfo { app_id: Vec, } +#[derive(Debug, Clone, Copy, PartialEq, Default)] +enum FsType { + #[default] + Zfs, + Ext4, +} + +impl Display for FsType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FsType::Zfs => write!(f, "zfs"), + FsType::Ext4 => write!(f, "ext4"), + } + } +} + +impl FromStr for FsType { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "zfs" => Ok(FsType::Zfs), + "ext4" => Ok(FsType::Ext4), + _ => bail!("Invalid filesystem type: {s}, supported types: zfs, ext4"), + } + } +} + +#[derive(Debug, Clone, Default)] +struct DstackOptions { + storage_encrypted: bool, + storage_fs: FsType, +} + +fn parse_dstack_options(shared: &HostShared) -> Result { + let cmdline = fs::read_to_string("/proc/cmdline").context("Failed to read /proc/cmdline")?; + + let mut options = DstackOptions { + storage_encrypted: true, // Default to encryption enabled + storage_fs: FsType::Zfs, // Default to ZFS + }; + + for param in cmdline.split_whitespace() { + if let Some(value) = param.strip_prefix("dstack.storage_encrypted=") { + match value { + "0" | "false" | "no" | "off" => options.storage_encrypted = false, + "1" | "true" | "yes" | "on" => options.storage_encrypted = true, + _ => { + bail!("Invalid value for dstack.storage_encrypted: {value}"); + } + } + } else if let Some(value) = param.strip_prefix("dstack.storage_fs=") { + options.storage_fs = value.parse().context("Failed to parse dstack.storage_fs")?; + } + } + + if let Some(fs) = &shared.app_compose.storage_fs { + options.storage_fs = fs.parse().context("Failed to parse storage_fs")?; + } + Ok(options) +} + impl InstanceInfo { fn is_initialized(&self) -> bool { !self.instance_id_seed.is_empty() @@ -190,8 +263,8 @@ impl HostShared { mkdir -p $host_shared_copy_dir; info "Copying host-shared files"; }?; - copy(APP_COMPOSE, SZ_1KB * 128, false)?; - copy(SYS_CONFIG, SZ_1KB * 10, false)?; + copy(APP_COMPOSE, SZ_1KB * 256, false)?; + copy(SYS_CONFIG, SZ_1KB * 32, false)?; copy(INSTANCE_INFO, SZ_1KB * 10, true)?; copy(ENCRYPTED_ENV, SZ_1KB * 256, true)?; copy(USER_CONFIG, SZ_1MB, true)?; @@ -203,6 +276,171 @@ impl HostShared { } } +struct GatewayContext<'a> { + shared: &'a HostShared, + keys: &'a AppKeys, +} + +impl<'a> GatewayContext<'a> { + fn new(shared: &'a HostShared, keys: &'a AppKeys) -> Self { + Self { shared, keys } + } + + async fn register_cvm( + &self, + gateway_url: &str, + client_key: String, + client_cert: String, + wg_pk: String, + ) -> Result { + let url = format!("{}/prpc", gateway_url); + let ca_cert = self.keys.ca_cert.clone(); + let cert_validator = AppIdValidator { + allowed_app_id: self.keys.gateway_app_id.clone(), + }; + let client = RaClientConfig::builder() + .remote_uri(url) + .maybe_pccs_url(self.shared.sys_config.pccs_url.clone()) + .tls_client_cert(client_cert) + .tls_client_key(client_key) + .tls_ca_cert(ca_cert) + .tls_built_in_root_certs(false) + .tls_no_check(self.keys.gateway_app_id == "any") + .verify_server_attestation(false) + .cert_validator(Box::new(move |cert| cert_validator.validate(cert))) + .build() + .into_client() + .context("Failed to create RA client")?; + let client = GatewayClient::new(client); + client + .register_cvm(RegisterCvmRequest { + client_public_key: wg_pk, + }) + .await + .context("Failed to register CVM") + } + + async fn setup(&self) -> Result<()> { + if !self.shared.app_compose.gateway_enabled() { + info!("dstack-gateway is not enabled"); + return Ok(()); + } + if self.keys.gateway_app_id.is_empty() { + bail!("Missing allowed dstack-gateway app id"); + } + + info!("Setting up dstack-gateway"); + // Generate WireGuard keys + let sk = cmd!(wg genkey)?; + let pk = cmd!(echo $sk | wg pubkey).or(Err(anyhow!("Failed to generate public key")))?; + + let config = CertConfig { + org_name: None, + subject: "dstack-guest-agent".to_string(), + subject_alt_names: vec![], + usage_server_auth: false, + usage_client_auth: true, + ext_quote: true, + }; + let cert_client = CertRequestClient::create( + self.keys, + self.shared.sys_config.pccs_url.as_deref(), + self.shared.sys_config.vm_config.clone(), + ) + .await + .context("Failed to create cert client")?; + let client_key = + KeyPair::generate_for(&PKCS_ECDSA_P256_SHA256).context("Failed to generate key")?; + let client_certs = cert_client + .request_cert(&client_key, config, false) + .await + .context("Failed to request cert")?; + let client_cert = client_certs.join("\n"); + let client_key = client_key.serialize_pem(); + + if self.shared.sys_config.gateway_urls.is_empty() { + bail!("Missing gateway urls"); + } + // Read config and make API call + let response = 'out: { + for url in self.shared.sys_config.gateway_urls.iter() { + let response = self + .register_cvm(url, client_key.clone(), client_cert.clone(), pk.clone()) + .await; + match response { + Ok(response) => { + break 'out response; + } + Err(err) => { + warn!("Failed to register CVM: {err:?}, retrying with next dstack-gateway"); + } + } + } + bail!("Failed to register CVM, all dstack-gateway urls are down"); + }; + let wg_info = response.wg.context("Missing wg info")?; + + let client_ip = &wg_info.client_ip; + + // Create WireGuard config + let wg_listen_port = "9182"; + let mut config = format!( + "[Interface]\n\ + PrivateKey = {sk}\n\ + ListenPort = {wg_listen_port}\n\ + Address = {client_ip}/32\n\n" + ); + for WireGuardPeer { pk, ip, endpoint } in &wg_info.servers { + let ip = ip.split('/').next().unwrap_or_default(); + config.push_str(&format!( + "[Peer]\n\ + PublicKey = {pk}\n\ + AllowedIPs = {ip}/32\n\ + Endpoint = {endpoint}\n\ + PersistentKeepalive = 25\n", + )); + } + + let wg_dir = Path::new("/etc/wireguard"); + fs::create_dir_all(wg_dir)?; + fs::write(wg_dir.join("dstack-wg0.conf"), config)?; + + cmd! { + chmod 600 $wg_dir/dstack-wg0.conf; + ignore wg-quick down dstack-wg0; + }?; + + // Setup WireGuard iptables rules + cmd! { + // Create the chain if it doesn't exist + ignore iptables -N DSTACK_WG 2>/dev/null; + // Flush the chain + iptables -F DSTACK_WG; + // Remove any existing jump rule + ignore iptables -D INPUT -p udp --dport $wg_listen_port -j DSTACK_WG 2>/dev/null; + // Insert the new jump rule at the beginning of the INPUT chain + iptables -I INPUT -p udp --dport $wg_listen_port -j DSTACK_WG + }?; + + for peer in &wg_info.servers { + // Avoid issues with field-access in the macro by binding the IP to a local variable. + let endpoint_ip = peer + .endpoint + .split(':') + .next() + .context("Invalid wireguard endpoint")?; + cmd!(iptables -A DSTACK_WG -s $endpoint_ip -j ACCEPT)?; + } + + // Drop any UDP packets that don't come from an allowed IP. + cmd!(iptables -A DSTACK_WG -j DROP)?; + + info!("Starting WireGuard"); + cmd!(wg-quick up dstack-wg0)?; + Ok(()) + } +} + fn truncate(s: &[u8], len: usize) -> &[u8] { if s.len() > len { &s[..len] @@ -233,6 +471,21 @@ pub async fn cmd_sys_setup(args: SetupArgs) -> Result<()> { stage1.setup().await } +pub async fn cmd_gateway_refresh(args: GatewayRefreshArgs) -> Result<()> { + let host_shared_dir = args.work_dir.join(HOST_SHARED_DIR_NAME); + let shared = HostShared::load(host_shared_dir.as_path()).with_context(|| { + format!( + "Failed to load host-shared dir: {}", + host_shared_dir.display() + ) + })?; + let keys_path = shared.dir.join(APP_KEYS); + let keys: AppKeys = deserialize_json_file(&keys_path) + .with_context(|| format!("Failed to load app keys from {}", keys_path.display()))?; + + GatewayContext::new(&shared, &keys).setup().await +} + struct AppIdValidator { allowed_app_id: String, } @@ -308,7 +561,7 @@ impl<'a> Stage0<'a> { .await .context("Failed to get temp ca cert")? }; - let cert_pair = generate_ra_cert(tmp_ca.temp_ca_cert, tmp_ca.temp_ca_key)?; + let cert_pair = generate_ra_cert(tmp_ca.temp_ca_cert.clone(), tmp_ca.temp_ca_key.clone())?; let ra_client = RaClientConfig::builder() .tls_no_check(false) .tls_built_in_root_certs(false) @@ -366,6 +619,8 @@ impl<'a> Stage0<'a> { key_provider: KeyProvider::Kms { url: kms_url, pubkey: root_pubkey, + tmp_ca_key: tmp_ca.temp_ca_key, + tmp_ca_cert: tmp_ca.temp_ca_cert, }, }; Ok(keys) @@ -433,41 +688,204 @@ impl<'a> Stage0<'a> { } } - async fn mount_data_disk(&self, initialized: bool, disk_crypt_key: &str) -> Result<()> { + async fn setup_swap(&self, swap_size: u64, opts: &DstackOptions) -> Result<()> { + match opts.storage_fs { + FsType::Zfs => self.setup_swap_zvol(swap_size).await, + FsType::Ext4 => self.setup_swapfile(swap_size).await, + } + } + + async fn setup_swapfile(&self, swap_size: u64) -> Result<()> { + let swapfile = self.args.mount_point.join("swapfile"); + if swapfile.exists() { + fs::remove_file(&swapfile).context("Failed to remove swapfile")?; + info!("Removed existing swapfile"); + } + if swap_size == 0 { + return Ok(()); + } + let swapfile = swapfile.display().to_string(); + info!("Creating swapfile at {swapfile} (size {swap_size} bytes)"); + let size_str = swap_size.to_string(); + cmd! { + fallocate -l $size_str $swapfile; + chmod 600 $swapfile; + mkswap $swapfile; + swapon $swapfile; + swapon --show; + } + .context("Failed to enable swap on swapfile")?; + Ok(()) + } + + async fn setup_swap_zvol(&self, swap_size: u64) -> Result<()> { + let swapvol_path = "dstack/swap"; + let swapvol_device_path = format!("/dev/zvol/{swapvol_path}"); + + if Path::new(&swapvol_device_path).exists() { + cmd! { + zfs set volmode=none $swapvol_path; + zfs destroy $swapvol_path; + } + .context("Failed to destroy swap zvol")?; + } + + if swap_size == 0 { + return Ok(()); + } + + info!("Creating swap zvol at {swapvol_device_path} (size {swap_size} bytes)"); + + let size_str = swap_size.to_string(); + cmd! { + zfs create -V $size_str + -o compression=zle + -o logbias=throughput + -o sync=always + -o primarycache=metadata + -o com.sun:auto-snapshot=false + $swapvol_path + } + .with_context(|| format!("Failed to create swap zvol {swapvol_path}"))?; + + let mut count = 0u32; + while !Path::new(&swapvol_device_path).exists() && count < 10 { + std::thread::sleep(Duration::from_secs(1)); + count += 1; + } + if !Path::new(&swapvol_device_path).exists() { + bail!("Device {swapvol_device_path} did not appear after 10 seconds"); + } + + cmd! { + mkswap $swapvol_device_path; + swapon $swapvol_device_path; + swapon --show; + } + .context("Failed to enable swap on zvol")?; + + Ok(()) + } + + async fn mount_data_disk( + &self, + initialized: bool, + disk_crypt_key: &str, + opts: &DstackOptions, + ) -> Result<()> { let name = "dstack_data_disk"; - let fs_dev = "/dev/mapper/".to_string() + name; let mount_point = &self.args.mount_point; + + // Determine the device to use based on encryption settings + let fs_dev = if opts.storage_encrypted { + format!("/dev/mapper/{name}") + } else { + self.args.device.to_string_lossy().to_string() + }; + + cmd!(mkdir -p $mount_point).context("Failed to create mount point")?; + if !initialized { self.vmm .notify_q("boot.progress", "initializing data disk") .await; - info!("Setting up disk encryption"); - self.luks_setup(disk_crypt_key, name)?; - cmd! { - mkdir -p $mount_point; - zpool create -o autoexpand=on dstack $fs_dev; - zfs create -o mountpoint=$mount_point -o atime=off -o checksum=blake3 dstack/data; + + if opts.storage_encrypted { + info!("Setting up disk encryption"); + self.luks_setup(disk_crypt_key, name)?; + } else { + info!("Skipping disk encryption as requested by kernel cmdline"); + } + + match opts.storage_fs { + FsType::Zfs => { + info!("Creating ZFS filesystem"); + cmd! { + zpool create -o autoexpand=on dstack $fs_dev; + zfs create -o mountpoint=$mount_point -o atime=off -o checksum=blake3 dstack/data; + } + .context("Failed to create zpool")?; + } + FsType::Ext4 => { + info!("Creating ext4 filesystem"); + cmd! { + mkfs.ext4 -F $fs_dev; + mount $fs_dev $mount_point; + } + .context("Failed to create ext4 filesystem")?; + } } - .context("Failed to create zpool")?; } else { self.vmm .notify_q("boot.progress", "mounting data disk") .await; - info!("Mounting encrypted data disk"); - self.open_encrypted_volume(disk_crypt_key, name)?; - cmd! { - zpool import dstack; - zpool status dstack; - zpool online -e dstack $fs_dev; // triggers autoexpand + + if opts.storage_encrypted { + info!("Mounting encrypted data disk"); + self.open_encrypted_volume(disk_crypt_key, name)?; + } else { + info!("Mounting unencrypted data disk"); } - .context("Failed to import zpool")?; - if cmd!(mountpoint -q $mount_point).is_err() { - cmd!(zfs mount dstack/data).context("Failed to mount zpool")?; + + match opts.storage_fs { + FsType::Zfs => { + cmd! { + zpool import dstack; + zpool status dstack; + zpool online -e dstack $fs_dev; // triggers autoexpand + } + .context("Failed to import zpool")?; + if cmd!(mountpoint -q $mount_point).is_err() { + cmd!(zfs mount dstack/data).context("Failed to mount zpool")?; + } + } + FsType::Ext4 => { + Self::mount_e2fs(&fs_dev, mount_point) + .context("Failed to mount ext4 filesystem")?; + } } } Ok(()) } + fn mount_e2fs(dev: &impl AsRef, mount_point: &impl AsRef) -> Result<()> { + let dev = dev.as_ref(); + let mount_point = mount_point.as_ref(); + info!("Checking filesystem"); + + let e2fsck_status = Command::new("e2fsck") + .arg("-f") + .arg("-p") + .arg(dev) + .status() + .with_context(|| format!("Failed to run e2fsck on {}", dev.display()))?; + + match e2fsck_status.code() { + Some(0 | 1) => {} + Some(code) => { + bail!( + "e2fsck exited with status {code} while checking {}", + dev.display() + ); + } + None => { + bail!( + "e2fsck terminated by signal while checking {}", + dev.display() + ); + } + } + + cmd! { + info "Trying to resize filesystem if needed"; + resize2fs $dev; + info "Mounting filesystem"; + mount $dev $mount_point; + } + .context("Failed to prepare ext4 filesystem")?; + Ok(()) + } + fn luks_setup(&self, disk_crypt_key: &str, name: &str) -> Result<()> { let root_hd = &self.args.device; let sector_offset = PAYLOAD_OFFSET / 512; @@ -614,8 +1032,21 @@ impl<'a> Stage0<'a> { let keys_json = serde_json::to_string(&app_keys).context("Failed to serialize app keys")?; fs::write(self.app_keys_file(), keys_json).context("Failed to write app keys")?; - self.vmm.notify_q("boot.progress", "unsealing env").await; - self.mount_data_disk(is_initialized, &hex::encode(&app_keys.disk_crypt_key)) + // Parse kernel command line options + let opts = parse_dstack_options(&self.shared).context("Failed to parse kernel cmdline")?; + extend_rtmr3("storage-fs", opts.storage_fs.to_string().as_bytes())?; + info!( + "Filesystem options: encryption={}, filesystem={:?}", + opts.storage_encrypted, opts.storage_fs + ); + + self.mount_data_disk( + is_initialized, + &hex::encode(&app_keys.disk_crypt_key), + &opts, + ) + .await?; + self.setup_swap(self.shared.app_compose.swap_size, &opts) .await?; self.vmm .notify_q( @@ -639,10 +1070,6 @@ impl<'a> Stage0<'a> { } impl Stage1<'_> { - fn resolve(&self, path: &str) -> String { - path.to_string() - } - fn decrypt_env_vars( &self, key: &[u8], @@ -697,165 +1124,19 @@ impl Stage1<'_> { } async fn setup(&self) -> Result<()> { - let envs = self.unseal_env_vars()?; + let _envs = self.unseal_env_vars()?; self.link_files()?; self.setup_guest_agent_config()?; self.vmm .notify_q("boot.progress", "setting up dstack-gateway") .await; - self.setup_dstack_gateway().await?; + GatewayContext::new(&self.shared, &self.keys) + .setup() + .await?; self.vmm .notify_q("boot.progress", "setting up docker") .await; self.setup_docker_registry()?; - self.setup_docker_account(&envs)?; - Ok(()) - } - - async fn register_cvm( - &self, - gateway_url: &str, - client_key: String, - client_cert: String, - wg_pk: String, - ) -> Result { - let url = format!("{}/prpc", gateway_url); - let ca_cert = self.keys.ca_cert.clone(); - let cert_validator = AppIdValidator { - allowed_app_id: self.keys.gateway_app_id.clone(), - }; - let client = RaClientConfig::builder() - .remote_uri(url) - .maybe_pccs_url(self.shared.sys_config.pccs_url.clone()) - .tls_client_cert(client_cert) - .tls_client_key(client_key) - .tls_ca_cert(ca_cert) - .tls_built_in_root_certs(false) - .tls_no_check(self.keys.gateway_app_id == "any") - .verify_server_attestation(false) - .cert_validator(Box::new(move |cert| cert_validator.validate(cert))) - .build() - .into_client() - .context("Failed to create RA client")?; - let client = GatewayClient::new(client); - client - .register_cvm(RegisterCvmRequest { - client_public_key: wg_pk, - }) - .await - .context("Failed to register CVM") - } - - async fn setup_dstack_gateway(&self) -> Result<()> { - if !self.shared.app_compose.gateway_enabled() { - info!("dstack-gateway is not enabled"); - return Ok(()); - } - if self.keys.gateway_app_id.is_empty() { - bail!("Missing allowed dstack-gateway app id"); - } - - info!("Setting up dstack-gateway"); - // Generate WireGuard keys - let sk = cmd!(wg genkey)?; - let pk = cmd!(echo $sk | wg pubkey).or(Err(anyhow!("Failed to generate public key")))?; - - let config = CertConfig { - org_name: None, - subject: "dstack-guest-agent".to_string(), - subject_alt_names: vec![], - usage_server_auth: false, - usage_client_auth: true, - ext_quote: true, - }; - let cert_client = CertRequestClient::create( - &self.keys, - self.shared.sys_config.pccs_url.as_deref(), - self.shared.sys_config.vm_config.clone(), - ) - .await - .context("Failed to create cert client")?; - let client_key = - KeyPair::generate_for(&PKCS_ECDSA_P256_SHA256).context("Failed to generate key")?; - let client_certs = cert_client - .request_cert(&client_key, config, false) - .await - .context("Failed to request cert")?; - let client_cert = client_certs.join("\n"); - let client_key = client_key.serialize_pem(); - - if self.shared.sys_config.gateway_urls.is_empty() { - bail!("Missing gateway urls"); - } - // Read config and make API call - let response = 'out: { - for url in self.shared.sys_config.gateway_urls.iter() { - let response = self - .register_cvm(url, client_key.clone(), client_cert.clone(), pk.clone()) - .await; - match response { - Ok(response) => { - break 'out response; - } - Err(err) => { - warn!("Failed to register CVM: {err:?}, retrying with next dstack-gateway"); - } - } - } - bail!("Failed to register CVM, all dstack-gateway urls are down"); - }; - let wg_info = response.wg.context("Missing wg info")?; - - let client_ip = &wg_info.client_ip; - - // Create WireGuard config - let wg_listen_port = "9182"; - let mut config = format!( - "[Interface]\n\ - PrivateKey = {sk}\n\ - ListenPort = {wg_listen_port}\n\ - Address = {client_ip}/32\n\n" - ); - for WireGuardPeer { pk, ip, endpoint } in &wg_info.servers { - let ip = ip.split('/').next().unwrap_or_default(); - config.push_str(&format!( - "[Peer]\n\ - PublicKey = {pk}\n\ - AllowedIPs = {ip}/32\n\ - Endpoint = {endpoint}\n\ - PersistentKeepalive = 25\n", - )); - } - fs::create_dir_all(self.resolve("/etc/wireguard"))?; - fs::write(self.resolve("/etc/wireguard/wg0.conf"), config)?; - - // Setup WireGuard iptables rules - cmd! { - // Create the chain if it doesn't exist - ignore iptables -N DSTACK_WG 2>/dev/null; - // Flush the chain - iptables -F DSTACK_WG; - // Remove any existing jump rule - ignore iptables -D INPUT -p udp --dport $wg_listen_port -j DSTACK_WG 2>/dev/null; - // Insert the new jump rule at the beginning of the INPUT chain - iptables -I INPUT -p udp --dport $wg_listen_port -j DSTACK_WG - }?; - - for peer in &wg_info.servers { - // Avoid issues with field-access in the macro by binding the IP to a local variable. - let endpoint_ip = peer - .endpoint - .split(':') - .next() - .context("Invalid wireguard endpoint")?; - cmd!(iptables -A DSTACK_WG -s $endpoint_ip -j ACCEPT)?; - } - - // Drop any UDP packets that don't come from an allowed IP. - cmd!(iptables -A DSTACK_WG -j DROP)?; - - info!("Starting WireGuard"); - cmd!(wg-quick up wg0)?; Ok(()) } @@ -890,20 +1171,10 @@ impl Stage1<'_> { info!("Setting up docker registry"); let registry_url = self .shared - .app_compose - .docker_config - .registry + .sys_config + .docker_registry .as_deref() .unwrap_or_default(); - let registry_url = if registry_url.is_empty() { - self.shared - .sys_config - .docker_registry - .as_deref() - .unwrap_or_default() - } else { - registry_url - }; if registry_url.is_empty() { return Ok(()); } @@ -923,38 +1194,6 @@ impl Stage1<'_> { fs::write(DAEMON_ENV_FILE, serde_json::to_string(&daemon_env)?)?; Ok(()) } - - fn setup_docker_account(&self, envs: &BTreeMap) -> Result<()> { - info!("Setting up docker account"); - let username = self - .shared - .app_compose - .docker_config - .username - .as_deref() - .unwrap_or_default(); - if username.is_empty() { - return Ok(()); - } - let token_key = self - .shared - .app_compose - .docker_config - .token_key - .as_deref() - .unwrap_or_default(); - if token_key.is_empty() { - return Ok(()); - } - let token = envs - .get(token_key) - .with_context(|| format!("Missing token for {username}"))?; - if token.is_empty() { - bail!("Missing token for {username}"); - } - cmd!(docker login -u $username -p $token)?; - Ok(()) - } } macro_rules! const_pad { diff --git a/dstack_Technical_Charter_Final_10-17-2025.pdf b/dstack_Technical_Charter_Final_10-17-2025.pdf new file mode 100644 index 00000000..e73d6e12 Binary files /dev/null and b/dstack_Technical_Charter_Final_10-17-2025.pdf differ diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index d150126c..8a30c6da 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -50,6 +50,7 @@ reqwest = { workspace = true, features = ["json"] } hyper = { workspace = true, features = ["server", "http1"] } hyper-util = { version = "0.1", features = ["tokio"] } jemallocator.workspace = true +or-panic.workspace = true [target.'cfg(unix)'.dependencies] nix = { workspace = true, features = ["resource"] } diff --git a/gateway/dstack-app/Dockerfile b/gateway/dstack-app/Dockerfile deleted file mode 100644 index 90aa8406..00000000 --- a/gateway/dstack-app/Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -# SPDX-FileCopyrightText: © 2025 Phala Network -# -# SPDX-License-Identifier: Apache-2.0 - -FROM rust:1.86.0@sha256:300ec56abce8cc9448ddea2172747d048ed902a3090e6b57babb2bf19f754081 AS gateway-builder -ARG DSTACK_REV -WORKDIR /src - -# Install build dependencies -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - git \ - build-essential \ - libssl-dev \ - protobuf-compiler \ - libprotobuf-dev \ - libclang-dev \ - && rm -rf /var/lib/apt/lists/* - -# Clone and checkout specific revision -RUN git clone https://github.com/Dstack-TEE/dstack.git && \ - cd dstack && \ - git checkout ${DSTACK_REV} - -# Build the gateway binary -WORKDIR /src/dstack -RUN cargo build --release -p dstack-gateway - -# Runtime stage -FROM debian:bookworm@sha256:ced9eb5eca0a3ba2e29d0045513863b3baaee71cd8c2eed403c9f7d3eaccfd2b -WORKDIR /app - -# Install runtime dependencies -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - wireguard-tools \ - iproute2 \ - jq \ - && rm -rf /var/lib/apt/lists/* - -# Copy the built binary -COPY --from=gateway-builder /src/dstack/target/release/dstack-gateway /usr/local/bin/dstack-gateway - -# Copy entrypoint script -COPY entrypoint.sh /app/entrypoint.sh -RUN chmod +x /app/entrypoint.sh - -# Store git revision for reproducibility -ARG DSTACK_REV -RUN echo "${DSTACK_REV}" > /etc/.GIT_REV - -ENTRYPOINT ["/app/entrypoint.sh"] -CMD ["dstack-gateway"] diff --git a/gateway/dstack-app/builder/Dockerfile b/gateway/dstack-app/builder/Dockerfile index 3e5b9c06..ba5bedb0 100644 --- a/gateway/dstack-app/builder/Dockerfile +++ b/gateway/dstack-app/builder/Dockerfile @@ -34,6 +34,7 @@ RUN ./pin-packages.sh ./pinned-packages.txt && \ wireguard-tools \ iproute2 \ jq \ + ca-certificates \ && rm -rf /var/lib/apt/lists/* /var/log/* /var/cache/ldconfig/aux-cache COPY --from=gateway-builder /build/dstack/target/x86_64-unknown-linux-musl/release/dstack-gateway /usr/local/bin/dstack-gateway COPY --from=gateway-builder /build/.GIT_REV /etc/ @@ -41,4 +42,4 @@ WORKDIR /app COPY entrypoint.sh /app/entrypoint.sh RUN chmod +x /app/entrypoint.sh ENTRYPOINT ["/app/entrypoint.sh"] -CMD ["dstack-gateway"] +CMD ["dstack-gateway", "-c", "/data/gateway/gateway.toml"] diff --git a/gateway/dstack-app/builder/README.md b/gateway/dstack-app/builder/README.md index 40f376a3..b5387fa9 100644 --- a/gateway/dstack-app/builder/README.md +++ b/gateway/dstack-app/builder/README.md @@ -44,7 +44,7 @@ services: environment: - IMAGE_DOWNLOAD_URL=${IMAGE_DOWNLOAD_URL:-http://localhost:8001/mr_{OS_IMAGE_HASH}.tar.gz} - AUTH_TYPE=dev - - DEV_DOMAIN=kms.1022.kvin.wang + - DEV_DOMAIN=kms.1022.dstack.org - QUOTE_ENABLED=false ``` diff --git a/gateway/dstack-app/builder/entrypoint.sh b/gateway/dstack-app/builder/entrypoint.sh index 7696b95d..cd25da1f 100755 --- a/gateway/dstack-app/builder/entrypoint.sh +++ b/gateway/dstack-app/builder/entrypoint.sh @@ -48,7 +48,6 @@ validate_env() { validate_env "$MY_URL" validate_env "$BOOTNODE_URL" validate_env "$CF_API_TOKEN" -validate_env "$CF_ZONE_ID" validate_env "$SRV_DOMAIN" validate_env "$WG_ENDPOINT" @@ -113,12 +112,12 @@ enabled = true workdir = "$CERTBOT_WORKDIR" acme_url = "$ACME_URL" cf_api_token = "$CF_API_TOKEN" -cf_zone_id = "$CF_ZONE_ID" auto_set_caa = true domain = "*.$SRV_DOMAIN" renew_interval = "1h" renew_before_expiration = "10d" renew_timeout = "5m" +max_dns_wait = "${CERTBOT_MAX_DNS_WAIT:-5m}" [core.wg] public_key = "$PUBLIC_KEY" diff --git a/gateway/dstack-app/deploy-to-vmm.sh b/gateway/dstack-app/deploy-to-vmm.sh index b40696c9..2584d450 100755 --- a/gateway/dstack-app/deploy-to-vmm.sh +++ b/gateway/dstack-app/deploy-to-vmm.sh @@ -47,9 +47,6 @@ else # Cloudflare API token for DNS challenge # CF_API_TOKEN=your_cloudflare_api_token -# Cloudflare Zone ID -# CF_ZONE_ID=your_zone_id - # Service domain # SRV_DOMAIN=test5.dstack.phala.network @@ -60,7 +57,7 @@ PUBLIC_IP=$(curl -s4 ifconfig.me) # GATEWAY_APP_ID=31884c4b7775affe4c99735f6c2aff7d7bc6cfcd # Whether to use ACME staging (yes/no) -ACME_STAGING=yes +ACME_STAGING=no # Subnet index. 0~15 SUBNET_INDEX=0 @@ -72,10 +69,10 @@ SUBNET_INDEX=0 # BOOTNODE_URL=https://gateway.test2.dstack.phala.network:9202 # dstack OS image name -OS_IMAGE=dstack-0.5.0 +OS_IMAGE=dstack-0.5.5 # Set defaults for variables that might not be in .env -GIT_REV=HEAD +GATEWAY_IMAGE=dstacktee/dstack-gateway@sha256:a7b7e3144371b053ba21d6ac18141afd49e3cd767ca2715599aa0e2703b3a11a # Port configurations GATEWAY_RPC_ADDR=0.0.0.0:9202 @@ -84,6 +81,8 @@ GATEWAY_SERVING_ADDR=0.0.0.0:9204 GUEST_AGENT_ADDR=127.0.0.1:9206 WG_ADDR=0.0.0.0:9202 +CERTBOT_MAX_DNS_WAIT=5m + # The token used to launch the App APP_LAUNCH_TOKEN=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) @@ -96,7 +95,6 @@ fi required_env_vars=( "VMM_RPC" "CF_API_TOKEN" - "CF_ZONE_ID" "SRV_DOMAIN" "PUBLIC_IP" "WG_ADDR" @@ -120,15 +118,13 @@ CLI="../../vmm/src/vmm-cli.py --url $VMM_RPC" WG_PORT=$(echo $WG_ADDR | cut -d':' -f2) COMPOSE_TMP=$(mktemp) -GIT_REV=$(git rev-parse $GIT_REV) - cp docker-compose.yaml "$COMPOSE_TMP" subvar() { sed -i "s|\${$1}|${!1}|g" "$COMPOSE_TMP" } -subvar GIT_REV +subvar GATEWAY_IMAGE subvar ACME_STAGING echo "Docker compose file:" @@ -137,7 +133,6 @@ cat "$COMPOSE_TMP" # Update .env file with current values cat <.app_env CF_API_TOKEN=$CF_API_TOKEN -CF_ZONE_ID=$CF_ZONE_ID SRV_DOMAIN=$SRV_DOMAIN WG_ENDPOINT=$PUBLIC_IP:$WG_PORT MY_URL=$MY_URL @@ -145,6 +140,7 @@ BOOTNODE_URL=$BOOTNODE_URL SUBNET_INDEX=$SUBNET_INDEX APP_LAUNCH_TOKEN=$APP_LAUNCH_TOKEN RPC_DOMAIN=$RPC_DOMAIN +CERTBOT_MAX_DNS_WAIT=$CERTBOT_MAX_DNS_WAIT EOF if [ -n "$APP_COMPOSE_FILE" ]; then @@ -178,7 +174,7 @@ EOF --no-instance-id \ --secure-time \ --prelaunch-script .prelaunch.sh \ - --output .app-compose.json + --output .app-compose.json > /dev/null fi # Set launch_token_hash in app-compose.json @@ -188,6 +184,9 @@ jq \ '.launch_token_hash = $token_hash' \ .app-compose.json.tmp > .app-compose.json +COMPOSE_HASH=$(sha256sum .app-compose.json | cut -d' ' -f1) +echo "Compose hash: 0x$COMPOSE_HASH" + # Remove the temporary file as it is no longer needed rm "$COMPOSE_TMP" diff --git a/gateway/dstack-app/docker-compose.yaml b/gateway/dstack-app/docker-compose.yaml index 40ec0d56..6fdc1d8b 100644 --- a/gateway/dstack-app/docker-compose.yaml +++ b/gateway/dstack-app/docker-compose.yaml @@ -4,7 +4,7 @@ services: gateway: - image: kvin/gateway@sha256:27fc6872836712c640d22397e4cf86c3e771ebed0116e075dca0eb170d9a9081 + image: ${GATEWAY_IMAGE} volumes: - /var/run/dstack.sock:/var/run/dstack.sock - /dstack:/dstack @@ -15,7 +15,6 @@ services: - WG_ENDPOINT=${WG_ENDPOINT} - SRV_DOMAIN=${SRV_DOMAIN} - CF_API_TOKEN=${CF_API_TOKEN} - - CF_ZONE_ID=${CF_ZONE_ID} - MY_URL=${MY_URL} - BOOTNODE_URL=${BOOTNODE_URL} - ACME_STAGING=${ACME_STAGING} diff --git a/gateway/gateway.toml b/gateway/gateway.toml index a89ff348..78446b0e 100644 --- a/gateway/gateway.toml +++ b/gateway/gateway.toml @@ -32,12 +32,12 @@ enabled = false workdir = "/etc/certbot" acme_url = "https://acme-staging-v02.api.letsencrypt.org/directory" cf_api_token = "" -cf_zone_id = "" auto_set_caa = true domain = "*.example.com" renew_interval = "1h" renew_before_expiration = "10d" renew_timeout = "120s" +max_dns_wait = "5m" [core.wg] public_key = "" diff --git a/gateway/rpc/build.rs b/gateway/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/gateway/rpc/build.rs +++ b/gateway/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/gateway/rpc/src/generated.rs b/gateway/rpc/src/generated.rs index 4ef6ed31..6e700ea4 100644 --- a/gateway/rpc/src/generated.rs +++ b/gateway/rpc/src/generated.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/gateway.rs")); diff --git a/gateway/src/config.rs b/gateway/src/config.rs index 3a3d88db..4809aef8 100644 --- a/gateway/src/config.rs +++ b/gateway/src/config.rs @@ -193,8 +193,6 @@ pub struct CertbotConfig { pub acme_url: String, /// Cloudflare API token pub cf_api_token: String, - /// Cloudflare zone ID - pub cf_zone_id: String, /// Auto set CAA record pub auto_set_caa: bool, /// Domain to issue certificates for @@ -208,6 +206,9 @@ pub struct CertbotConfig { /// Renew timeout #[serde(with = "serde_duration")] pub renew_timeout: Duration, + /// Maximum time to wait for DNS propagation + #[serde(with = "serde_duration")] + pub max_dns_wait: Duration, } impl CertbotConfig { @@ -221,12 +222,12 @@ impl CertbotConfig { .credentials_file(workdir.account_credentials_path()) .acme_url(self.acme_url.clone()) .cert_subject_alt_names(vec![self.domain.clone()]) - .cf_zone_id(self.cf_zone_id.clone()) .cf_api_token(self.cf_api_token.clone()) .renew_interval(self.renew_interval) .renew_timeout(self.renew_timeout) .renew_expires_in(self.renew_before_expiration) .auto_set_caa(self.auto_set_caa) + .max_dns_wait(self.max_dns_wait) .build() } diff --git a/gateway/src/main.rs b/gateway/src/main.rs index 61d25632..d4544ffd 100644 --- a/gateway/src/main.rs +++ b/gateway/src/main.rs @@ -167,7 +167,8 @@ async fn main() -> Result<()> { info!("Starting background tasks"); state.start_bg_tasks().await?; state.lock().reconfigure()?; - proxy::start(proxy_config, state.clone()); + + proxy::start(proxy_config, state.clone()).context("failed to start the proxy")?; let admin_figment = Figment::new() diff --git a/gateway/src/main_service.rs b/gateway/src/main_service.rs index 9162ff40..c85d501c 100644 --- a/gateway/src/main_service.rs +++ b/gateway/src/main_service.rs @@ -23,6 +23,7 @@ use dstack_gateway_rpc::{ use dstack_guest_agent_rpc::{dstack_guest_client::DstackGuestClient, RawQuoteArgs}; use fs_err as fs; use http_client::prpc::PrpcClient; +use or_panic::ResultOrPanic; use ra_rpc::{CallContext, RpcCall, VerifiedAttestation}; use ra_tls::attestation::QuoteContentType; use rand::seq::IteratorRandom; @@ -100,7 +101,7 @@ impl Proxy { impl ProxyInner { pub(crate) fn lock(&self) -> MutexGuard { - self.state.lock().expect("Failed to lock AppState") + self.state.lock().or_panic("Failed to lock AppState") } pub async fn new(config: Config, my_app_id: Option>) -> Result { diff --git a/gateway/src/proxy.rs b/gateway/src/proxy.rs index 75cc286e..73b947cc 100644 --- a/gateway/src/proxy.rs +++ b/gateway/src/proxy.rs @@ -166,7 +166,7 @@ pub async fn proxy_main(config: &ProxyConfig, proxy: Proxy) -> Result<()> { .enable_all() .worker_threads(config.workers) .build() - .expect("Failed to build Tokio runtime"); + .context("Failed to build Tokio runtime")?; let dotted_base_domain = { let base_domain = config.base_domain.as_str(); @@ -232,16 +232,16 @@ fn next_connection_id() -> usize { COUNTER.fetch_add(1, Ordering::Relaxed) } -pub fn start(config: ProxyConfig, app_state: Proxy) { +pub fn start(config: ProxyConfig, app_state: Proxy) -> Result<()> { + // Create a new single-threaded runtime + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .context("Failed to build Tokio runtime")?; + std::thread::Builder::new() .name("proxy-main".to_string()) .spawn(move || { - // Create a new single-threaded runtime - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("Failed to build Tokio runtime"); - // Run the proxy_main function in this runtime if let Err(err) = rt.block_on(proxy_main(&config, app_state)) { error!( @@ -250,7 +250,8 @@ pub fn start(config: ProxyConfig, app_state: Proxy) { ); } }) - .expect("Failed to spawn proxy-main thread"); + .context("Failed to spawn proxy-main thread")?; + Ok(()) } #[cfg(test)] diff --git a/gateway/src/proxy/tls_passthough.rs b/gateway/src/proxy/tls_passthough.rs index 1131eb01..e2cea9d0 100644 --- a/gateway/src/proxy/tls_passthough.rs +++ b/gateway/src/proxy/tls_passthough.rs @@ -150,7 +150,7 @@ mod tests { async fn test_resolve_app_address() { let app_addr = resolve_app_address( "_dstack-app-address", - "3327603e03f5bd1f830812ca4a789277fc31f577.app.kvin.wang", + "3327603e03f5bd1f830812ca4a789277fc31f577.app.dstack.org", false, ) .await diff --git a/gateway/src/proxy/tls_terminate.rs b/gateway/src/proxy/tls_terminate.rs index 9c159492..ad19ebf4 100644 --- a/gateway/src/proxy/tls_terminate.rs +++ b/gateway/src/proxy/tls_terminate.rs @@ -24,6 +24,8 @@ use tokio::time::timeout; use tokio_rustls::{rustls, server::TlsStream, TlsAcceptor}; use tracing::{debug, info}; +use or_panic::ResultOrPanic; + use crate::config::{CryptoProvider, ProxyConfig, TlsVersion}; use crate::main_service::Proxy; @@ -278,12 +280,12 @@ impl Proxy { let acceptor = if h2 { self.h2_acceptor .read() - .expect("Failed to acquire read lock for TLS acceptor") + .or_panic("lock should never fail") .clone() } else { self.acceptor .read() - .expect("Failed to acquire read lock for TLS acceptor") + .or_panic("lock should never fail") .clone() }; let tls_stream = timeout( diff --git a/guest-agent/Cargo.toml b/guest-agent/Cargo.toml index d17ec3b5..b8e0c881 100644 --- a/guest-agent/Cargo.toml +++ b/guest-agent/Cargo.toml @@ -47,3 +47,7 @@ sha3.workspace = true strip-ansi-escapes.workspace = true cert-client.workspace = true ring.workspace = true +ed25519-dalek.workspace = true +tempfile.workspace = true +rand.workspace = true +or-panic.workspace = true diff --git a/guest-agent/rpc/build.rs b/guest-agent/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/guest-agent/rpc/build.rs +++ b/guest-agent/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/guest-agent/rpc/proto/agent_rpc.proto b/guest-agent/rpc/proto/agent_rpc.proto index 15606dbc..4d728dc3 100644 --- a/guest-agent/rpc/proto/agent_rpc.proto +++ b/guest-agent/rpc/proto/agent_rpc.proto @@ -37,7 +37,7 @@ service DstackGuest { // Returns the derived key along with its TLS certificate chain. rpc GetTlsKey(GetTlsKeyArgs) returns (GetTlsKeyResponse) {} - // Derives a new ECDSA key with k256 EC curve. + // Derives a new key. rpc GetKey(GetKeyArgs) returns (GetKeyResponse) {} // Generates a TDX quote with given report data. @@ -48,6 +48,12 @@ service DstackGuest { // Get app info rpc Info(google.protobuf.Empty) returns (AppInfo) {} + + // Sign a payload + rpc Sign(SignRequest) returns (SignResponse) {} + + // Verify a signature + rpc Verify(VerifyRequest) returns (VerifyResponse) {} } // The request to derive a key @@ -91,12 +97,14 @@ message GetTlsKeyResponse { repeated string certificate_chain = 2; } -// The request to derive a new ECDSA key with k256 EC curve +// The request to derive a new key message GetKeyArgs { // Path to the key to derive string path = 1; // Purpose of the key string purpose = 2; + // Algorithm of the key. Either `secp256k1` or `ed25519`. Defaults to `secp256k1` + string algorithm = 3; } // The response to a DeriveK256Key request @@ -109,9 +117,11 @@ message DeriveK256KeyResponse { // The response to a GetEthKey request message GetKeyResponse { - // Derived k256 key + // Derived key bytes key = 1; - // Derived k256 signature chain + // The signature chain consists of the following signatures: + // [0] - the k256 signature of the derived pK signed by the app root key + // [1] - the k256 signature of the app root pK signed by the KMS root key repeated bytes signature_chain = 2; } @@ -216,4 +226,38 @@ service Worker { rpc Info(google.protobuf.Empty) returns (AppInfo) {} // Get the guest agent version rpc Version(google.protobuf.Empty) returns (WorkerVersion) {} + // Get attestation + rpc GetAttestationForAppKey(GetAttestationForAppKeyRequest) returns (GetQuoteResponse) {} +} + +message SignRequest { + string algorithm = 1; + bytes data = 2; +} + +message SignResponse { + // the signature of the data + bytes signature = 1; + // The signature chain consists of the following signatures: + // [0] - the signature of the data + // [1] - the k256 signature of the message signing pubkey signed by the app root key + // [2] - the k256 signature of the app root pubkey signed by the KMS root key + repeated bytes signature_chain = 2; + // The public key signing the data + bytes public_key = 3; +} + +message VerifyRequest { + string algorithm = 1; + bytes data = 2; + bytes signature = 3; + bytes public_key = 4; +} + +message VerifyResponse { + bool valid = 1; +} + +message GetAttestationForAppKeyRequest { + string algorithm = 1; } diff --git a/guest-agent/rpc/src/generated.rs b/guest-agent/rpc/src/generated.rs index fc7678f7..28748082 100644 --- a/guest-agent/rpc/src/generated.rs +++ b/guest-agent/rpc/src/generated.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/dstack_guest.rs")); diff --git a/guest-agent/src/guest_api_service.rs b/guest-agent/src/guest_api_service.rs index cd5272c5..1376cb25 100644 --- a/guest-agent/src/guest_api_service.rs +++ b/guest-agent/src/guest_api_service.rs @@ -147,8 +147,8 @@ fn get_interfaces() -> Vec { sysinfo::Networks::new_with_refreshed_list() .into_iter() .filter_map(|(interface_name, network)| { - if !(interface_name == "wg0" || interface_name.starts_with("enp")) { - // We only get wg0 and enp interfaces. + if !(interface_name == "dstack-wg0" || interface_name.starts_with("enp")) { + // We only get dstack-wg0 and enp interfaces. // Docker bridge is not included due to privacy concerns. return None; } diff --git a/guest-agent/src/rpc_service.rs b/guest-agent/src/rpc_service.rs index b3122902..16fe61a4 100644 --- a/guest-agent/src/rpc_service.rs +++ b/guest-agent/src/rpc_service.rs @@ -2,21 +2,28 @@ // // SPDX-License-Identifier: Apache-2.0 -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use anyhow::{Context, Result}; +use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; use cert_client::CertRequestClient; use dstack_guest_agent_rpc::{ dstack_guest_server::{DstackGuestRpc, DstackGuestServer}, tappd_server::{TappdRpc, TappdServer}, worker_server::{WorkerRpc, WorkerServer}, - AppInfo, DeriveK256KeyResponse, DeriveKeyArgs, EmitEventArgs, GetKeyArgs, GetKeyResponse, - GetQuoteResponse, GetTlsKeyArgs, GetTlsKeyResponse, RawQuoteArgs, TdxQuoteArgs, - TdxQuoteResponse, WorkerVersion, + AppInfo, DeriveK256KeyResponse, DeriveKeyArgs, EmitEventArgs, GetAttestationForAppKeyRequest, + GetKeyArgs, GetKeyResponse, GetQuoteResponse, GetTlsKeyArgs, GetTlsKeyResponse, RawQuoteArgs, + SignRequest, SignResponse, TdxQuoteArgs, TdxQuoteResponse, VerifyRequest, VerifyResponse, + WorkerVersion, }; use dstack_types::{AppKeys, SysConfig}; +use ed25519_dalek::ed25519::signature::hazmat::{PrehashSigner, PrehashVerifier}; +use ed25519_dalek::{ + Signer as Ed25519Signer, SigningKey as Ed25519SigningKey, Verifier as Ed25519Verifier, +}; use fs_err as fs; use k256::ecdsa::SigningKey; +use or_panic::ResultOrPanic; use ra_rpc::{Attestation, CallContext, RpcCall}; use ra_tls::{ attestation::{QuoteContentType, DEFAULT_HASH_ALGORITHM}, @@ -28,6 +35,7 @@ use ring::rand::{SecureRandom, SystemRandom}; use serde_json::json; use sha3::{Digest, Keccak256}; use tdx_attest::eventlog::read_event_logs; +use tracing::error; use crate::config::Config; @@ -41,23 +49,14 @@ struct AppStateInner { keys: AppKeys, vm_config: String, cert_client: CertRequestClient, - demo_cert: String, + demo_cert: RwLock, } -impl AppState { - pub async fn new(config: Config) -> Result { - let keys: AppKeys = serde_json::from_str(&fs::read_to_string(&config.keys_file)?) - .context("Failed to parse app keys")?; - let sys_config: SysConfig = - serde_json::from_str(&fs::read_to_string(&config.sys_config_file)?) - .context("Failed to parse VM config")?; - let vm_config = sys_config.vm_config; - let cert_client = - CertRequestClient::create(&keys, config.pccs_url.as_deref(), vm_config.clone()) - .await - .context("Failed to create cert signer")?; +impl AppStateInner { + async fn request_demo_cert(&self) -> Result { let key = KeyPair::generate().context("Failed to generate demo key")?; - let demo_cert = cert_client + let demo_cert = self + .cert_client .request_cert( &key, CertConfig { @@ -68,20 +67,60 @@ impl AppState { usage_client_auth: true, ext_quote: true, }, - config.simulator.enabled, + self.config.simulator.enabled, ) .await .context("Failed to get app cert")? .join("\n"); - Ok(Self { + Ok(demo_cert) + } +} + +impl AppState { + fn maybe_request_demo_cert(&self) { + let state = self.inner.clone(); + if !state + .demo_cert + .read() + .or_panic("lock shoud never fail") + .is_empty() + { + return; + } + tokio::spawn(async move { + match state.request_demo_cert().await { + Ok(demo_cert) => { + *state.demo_cert.write().or_panic("lock shoud never fail") = demo_cert; + } + Err(e) => { + error!("Failed to request demo cert: {e}"); + } + } + }); + } + + pub async fn new(config: Config) -> Result { + let keys: AppKeys = serde_json::from_str(&fs::read_to_string(&config.keys_file)?) + .context("Failed to parse app keys")?; + let sys_config: SysConfig = + serde_json::from_str(&fs::read_to_string(&config.sys_config_file)?) + .context("Failed to parse VM config")?; + let vm_config = sys_config.vm_config; + let cert_client = + CertRequestClient::create(&keys, config.pccs_url.as_deref(), vm_config.clone()) + .await + .context("Failed to create cert signer")?; + let me = Self { inner: Arc::new(AppStateInner { config, keys, cert_client, - demo_cert, + demo_cert: RwLock::new(String::new()), vm_config, }), - }) + }; + me.maybe_request_demo_cert(); + Ok(me) } pub fn config(&self) -> &Config { @@ -136,6 +175,7 @@ pub async fn get_info(state: &AppState, external: bool) -> Result { } else { state.inner.vm_config.clone() }; + state.maybe_request_demo_cert(); Ok(AppInfo { app_name: state.config().app_compose.name.clone(), app_id: app_info.app_id, @@ -145,7 +185,12 @@ pub async fn get_info(state: &AppState, external: bool) -> Result { os_image_hash: app_info.os_image_hash.clone(), key_provider_info: String::from_utf8(app_info.key_provider_info).unwrap_or_default(), compose_hash: app_info.compose_hash.clone(), - app_cert: state.inner.demo_cert.clone(), + app_cert: state + .inner + .demo_cert + .read() + .or_panic("lock should not fail") + .clone(), tcb_info, vm_config, }) @@ -182,16 +227,33 @@ impl DstackGuestRpc for InternalRpcHandler { async fn get_key(self, request: GetKeyArgs) -> Result { let k256_app_key = &self.state.inner.keys.k256_key; - let derived_k256_key = derive_ecdsa_key(k256_app_key, &[request.path.as_bytes()], 32) - .context("Failed to derive k256 key")?; - let derived_k256_key = - SigningKey::from_slice(&derived_k256_key).context("Failed to parse k256 key")?; - let derived_k256_pubkey = derived_k256_key.verifying_key(); - let msg_to_sign = format!( - "{}:{}", - request.purpose, - hex::encode(derived_k256_pubkey.to_sec1_bytes()) - ); + + let (key, pubkey_hex) = match request.algorithm.as_str() { + "ed25519" => { + let derived_key = derive_ecdsa_key(k256_app_key, &[request.path.as_bytes()], 32) + .context("Failed to derive ed25519 key")?; + let signing_key = Ed25519SigningKey::from_bytes( + &derived_key + .as_slice() + .try_into() + .or(Err(anyhow::anyhow!("Invalid key length")))?, + ); + let pubkey_hex = hex::encode(signing_key.verifying_key().as_bytes()); + (derived_key, pubkey_hex) + } + "secp256k1" | "secp256k1_prehashed" | "" => { + let derived_key = derive_ecdsa_key(k256_app_key, &[request.path.as_bytes()], 32) + .context("Failed to derive k256 key")?; + + let signing_key = + SigningKey::from_slice(&derived_key).context("Failed to parse k256 key")?; + let pubkey_hex = hex::encode(signing_key.verifying_key().to_sec1_bytes()); + (derived_key, pubkey_hex) + } + _ => return Err(anyhow::anyhow!("Unsupported algorithm")), + }; + + let msg_to_sign = format!("{}:{}", request.purpose, pubkey_hex); let app_signing_key = SigningKey::from_slice(k256_app_key).context("Failed to parse app k256 key")?; let digest = Keccak256::new_with_prefix(msg_to_sign); @@ -200,7 +262,7 @@ impl DstackGuestRpc for InternalRpcHandler { signature.push(recid.to_byte()); Ok(GetKeyResponse { - key: derived_k256_key.to_bytes().to_vec(), + key, signature_chain: vec![signature, self.state.inner.keys.k256_signature.clone()], }) } @@ -246,6 +308,92 @@ impl DstackGuestRpc for InternalRpcHandler { async fn info(self) -> Result { get_info(&self.state, false).await } + + async fn sign(self, request: SignRequest) -> Result { + let key_response = self + .get_key(GetKeyArgs { + path: "vms".to_string(), + purpose: "signing".to_string(), + algorithm: request.algorithm.clone(), + }) + .await?; + let (signature, public_key) = match request.algorithm.as_str() { + "ed25519" => { + let key_bytes: [u8; 32] = key_response + .key + .try_into() + .ok() + .context("Key is incorrect")?; + let signing_key = Ed25519SigningKey::from_bytes(&key_bytes); + let signature = signing_key.sign(&request.data); + let public_key = signing_key.verifying_key().to_bytes().to_vec(); + (signature.to_bytes().to_vec(), public_key) + } + "secp256k1" => { + let signing_key = SigningKey::from_slice(&key_response.key) + .context("Failed to parse secp256k1 key")?; + let signature: k256::ecdsa::Signature = signing_key.sign(&request.data); + let public_key = signing_key.verifying_key().to_sec1_bytes().to_vec(); + (signature.to_bytes().to_vec(), public_key) + } + "secp256k1_prehashed" => { + if request.data.len() != 32 { + return Err(anyhow::anyhow!( + "Pre-hashed signing requires a 32-byte digest, but received {} bytes", + request.data.len() + )); + } + let signing_key = SigningKey::from_slice(&key_response.key) + .context("Failed to parse secp256k1 key")?; + let signature: k256::ecdsa::Signature = signing_key.sign_prehash(&request.data)?; + let public_key = signing_key.verifying_key().to_sec1_bytes().to_vec(); + (signature.to_bytes().to_vec(), public_key) + } + _ => return Err(anyhow::anyhow!("Unsupported algorithm")), + }; + Ok(SignResponse { + signature: signature.clone(), + signature_chain: vec![ + signature, + key_response.signature_chain[0].clone(), + key_response.signature_chain[1].clone(), + ], + public_key, + }) + } + + async fn verify(self, request: VerifyRequest) -> Result { + let valid = match request.algorithm.as_str() { + "ed25519" => { + let verifying_key = ed25519_dalek::VerifyingKey::from_bytes( + &request + .public_key + .as_slice() + .try_into() + .ok() + .context("invalid public key")?, + )?; + let signature = ed25519_dalek::Signature::from_slice(&request.signature)?; + verifying_key.verify(&request.data, &signature).is_ok() + } + "secp256k1" => { + let verifying_key = + k256::ecdsa::VerifyingKey::from_sec1_bytes(&request.public_key)?; + let signature = k256::ecdsa::Signature::from_slice(&request.signature)?; + verifying_key.verify(&request.data, &signature).is_ok() + } + "secp256k1_prehashed" => { + let verifying_key = + k256::ecdsa::VerifyingKey::from_sec1_bytes(&request.public_key)?; + let signature = k256::ecdsa::Signature::from_slice(&request.signature)?; + verifying_key + .verify_prehash(&request.data, &signature) + .is_ok() + } + _ => return Err(anyhow::anyhow!("Unsupported algorithm")), + }; + Ok(VerifyResponse { valid }) + } } fn simulate_quote( @@ -417,6 +565,94 @@ impl WorkerRpc for ExternalRpcHandler { rev: super::GIT_REV.to_string(), }) } + + async fn get_attestation_for_app_key( + self, + request: GetAttestationForAppKeyRequest, + ) -> Result { + let key_response = InternalRpcHandler { + state: self.state.clone(), + } + .get_key(GetKeyArgs { + path: "vms".to_string(), + purpose: "signing".to_string(), + algorithm: request.algorithm.clone(), + }) + .await?; + + match request.algorithm.as_str() { + "ed25519" => { + let key_bytes: [u8; 32] = key_response + .key + .try_into() + .ok() + .context("Key is incorrect")?; + let ed25519_key = Ed25519SigningKey::from_bytes(&key_bytes); + let ed25519_pubkey = ed25519_key.verifying_key().to_bytes(); + + let mut ed25519_report_data = [0u8; 64]; + let ed25519_b64 = URL_SAFE_NO_PAD.encode(ed25519_pubkey); + let ed25519_report_string = format!("dip1::ed25519-pk:{}", ed25519_b64); + let ed_bytes = ed25519_report_string.as_bytes(); + ed25519_report_data[..ed_bytes.len()].copy_from_slice(ed_bytes); + + if self.state.config().simulator.enabled { + Ok(simulate_quote( + self.state.config(), + ed25519_report_data, + &self.state.inner.vm_config, + )?) + } else { + let ed25519_quote = tdx_attest::get_quote(&ed25519_report_data, None) + .context("Failed to get ed25519 quote")? + .1; + let event_log = serde_json::to_string( + &read_event_logs().context("Failed to read event log")?, + )?; + Ok(GetQuoteResponse { + quote: ed25519_quote, + event_log: event_log.clone(), + report_data: ed25519_report_data.to_vec(), + vm_config: self.state.inner.vm_config.clone(), + }) + } + } + "secp256k1" | "secp256k1_prehashed" => { + let secp256k1_key = SigningKey::from_slice(&key_response.key) + .context("Failed to parse secp256k1 key")?; + let secp256k1_pubkey = secp256k1_key.verifying_key().to_sec1_bytes(); + + let mut secp256k1_report_data = [0u8; 64]; + let secp256k1_b64 = URL_SAFE_NO_PAD.encode(secp256k1_pubkey); + let secp256k1_report_string = format!("dip1::secp256k1c-pk:{}", secp256k1_b64); + let secp_bytes = secp256k1_report_string.as_bytes(); + secp256k1_report_data[..secp_bytes.len()].copy_from_slice(secp_bytes); + + if self.state.config().simulator.enabled { + Ok(simulate_quote( + self.state.config(), + secp256k1_report_data, + &self.state.inner.vm_config, + )?) + } else { + let secp256k1_quote = tdx_attest::get_quote(&secp256k1_report_data, None) + .context("Failed to get secp256k1 quote")? + .1; + let event_log = serde_json::to_string( + &read_event_logs().context("Failed to read event log")?, + )?; + + Ok(GetQuoteResponse { + quote: secp256k1_quote, + event_log, + report_data: secp256k1_report_data.to_vec(), + vm_config: self.state.inner.vm_config.clone(), + }) + } + } + _ => Err(anyhow::anyhow!("Unsupported algorithm")), + } + } } impl RpcCall for ExternalRpcHandler { @@ -428,3 +664,408 @@ impl RpcCall for ExternalRpcHandler { }) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::{AppComposeWrapper, Config, Simulator}; + use dstack_guest_agent_rpc::{GetAttestationForAppKeyRequest, SignRequest}; + use dstack_types::{AppCompose, AppKeys, KeyProvider}; + use ed25519_dalek::ed25519::signature::hazmat::PrehashVerifier; + use ed25519_dalek::{ + Signature as Ed25519Signature, Verifier, VerifyingKey as Ed25519VerifyingKey, + }; + use k256::ecdsa::{Signature as K256Signature, VerifyingKey}; + use sha2::Sha256; + use std::collections::HashSet; + use std::convert::TryFrom; + use std::io::Write; + + fn extract_pubkey_from_report_data(report_data: &[u8], prefix: &str) -> Result> { + let end = report_data + .iter() + .position(|&b| b == 0) + .unwrap_or(report_data.len()); + let report_str = std::str::from_utf8(&report_data[..end])?; + + if let Some(base64_pk) = report_str.strip_prefix(prefix) { + URL_SAFE_NO_PAD + .decode(base64_pk) + .context("Failed to decode base64") + } else { + Err(anyhow::anyhow!("Prefix not found in report data")) + } + } + + async fn setup_test_state() -> (AppState, tempfile::NamedTempFile, tempfile::NamedTempFile) { + let mut dummy_quote_file = tempfile::NamedTempFile::new().unwrap(); + let dummy_event_log_file = tempfile::NamedTempFile::new().unwrap(); + + let dummy_quote = vec![b'0'; 10020]; + dummy_quote_file.write_all(&dummy_quote).unwrap(); + dummy_quote_file.flush().unwrap(); + + let dummy_simulator = Simulator { + enabled: true, + quote_file: dummy_quote_file.path().to_str().unwrap().to_string(), + event_log_file: dummy_event_log_file.path().to_str().unwrap().to_string(), + }; + + let dummy_appcompose = AppCompose { + manifest_version: 0, + name: String::new(), + features: Vec::new(), + runner: String::new(), + docker_compose_file: None, + public_logs: false, + public_sysinfo: false, + public_tcbinfo: false, + kms_enabled: false, + gateway_enabled: false, + local_key_provider_enabled: false, + key_provider: None, + key_provider_id: Vec::new(), + allowed_envs: Vec::new(), + no_instance_id: false, + secure_time: false, + storage_fs: None, + swap_size: 0, + }; + + let dummy_appcompose_wrapper = AppComposeWrapper { + app_compose: dummy_appcompose, + raw: String::new(), + }; + + let dummy_config = Config { + keys_file: String::new(), + app_compose: dummy_appcompose_wrapper, + sys_config_file: String::new().into(), + pccs_url: None, + simulator: dummy_simulator, + data_disks: HashSet::new(), + }; + + const DUMMY_PEM_KEY: &str = r#"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCSeV81CKVqILf/ +bk+OarAkZeph4ggb1d9Qt4bzJjVNsowpc/iWbacO6dHvrjXrqNdK7WEHDuxYlQCS +xppINUCKyCoelAt2OJuUonLHtT3s41pGM0k69fcUb420fhKqNAHIaCCc38vOFDZ7 +aqLUGNDooc7bXgZxHUJHmq9QneeB74Ia+6TzA2KKXMu4ixvZWvrgRt64XKyL3+4J +sQ6QqSgopGeyTv0blxFxF6X8UTUO/nZPnqf7BN9GnkJtHglb0TLI1H7BYvFmnpjT +8yfjmdbRxvnczvRJuKCzTq9ePEvhRrwAzqQk3Ide0/KWdIiu2nrrfO/Imvia1DNp +GgJsV0L7AgMBAAECggEARUbTcV1kAwRzkgOF7CloouZzCxWhWSz4AJC06oadOmDi +qu53WgqFs2eCjBZ82TdTkFQiiniT7zeV/FWjfdh17M3MIgdKPoF6kDufBvahUcuc +FEzIa3MPB+LVBlOEl2yelT8ugZPVrGPh+tBOL/uGvyhckmNvr4szoHM4TOxKJSk/ +njFbJcoX3UmampyxSa6MMSGaxM2pdziTujoj5+sJ/a0x/wwIih/XEZSWgLzDjGZS +qaKmldjD0SRJQrZ1LTjjguKtkbOwKa2dtNOoHBkAtHyI+vWOLXNzZisXMazpmHNT +mE2X6oQFcAXI7HHuHzkLaLpEdqlHA16nwFPNF0LzAQKBgQDLaE1eZnutK+nxHpUq +cb3vMGN8dPxCrQJz/fvEb6lP93RCWBZbGen2gLGvFKyFwPcD/OR0HfBnFRjHIy25 +V4ta+iubQM3GFO2FOp9SwequCPY2H6YXah4LyXrCIw4Pv3x/I2bpbLOlltmMT5PS +qPV86dH546kxOsJS6VhMCcQXAQKBgQC4WJu9VTBPfKf8JL8f7b/K0+MBN3OBkhsN +V6nCR8JizAa1hxmxpMaeq7PqlGpJhQKinBblR314Cpqqrt7AL005gCxD0ddBM9Ib +/7HafmLrAuhEDxnYx/QAyprTOsqjLS8Vd+eaA0nGF68R1LLHLxfXfhiuAjMwScCs +afCrbdG1+wKBgAyZ3ZEnkCneOpPxbRRAD6AtwzwGk0oeJbTB20MEF90YW19wzZG/ +PTtEJb3O7hErLyJUHGMFJ8t7BxnvF/oPblaogOMRVK4cxconI4+g68T0USxxMXzp +2gqo5K36NfjLyA6oRsvXLBnqCngixembBfpDEfsFG4otNbSlOA8d28QBAoGBAKdG +YCtxPaEi8BtwDK2gQsR9eCMGeh08wqdcwIG2M8EKeZwGt13mswQPsfZOLhQASd/b +2zq5oDRpCueOPjoNsflXQNNZegWETEdzwaMNxByUSsZXHZED/3koX00EsBNZULwe +TV4HVc4Wd5mqc38iUHQNy78559ENW3QXvXcQ85Y5AoGBAIQlSbNRupo/5ATwJW0e +bggPyacIhS9GrsgP9qz9p8xxNSfcyAFRGiXnlGoiRbNchbUiZPRjoJ08lOHGxVQw +O17ivI85heZnG+i5Yz0ZolMd8fbc4h78oA9FnJQJV5AeTDqTxf528A2jyWCAmu11 +Sv2zO+vcYHN7bT2UTCEWkeAw +-----END PRIVATE KEY----- +"#; + + const DUMMY_PEM_CERT: &str = r#"-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIUYRX7SNHsL6EGSy0ACQzjX4cfaw0wDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI1MTAwOTEyNDMyN1oXDTI2MTAw +OTEyNDMyN1owFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAknlfNQilaiC3/25PjmqwJGXqYeIIG9XfULeG8yY1TbKM +KXP4lm2nDunR764166jXSu1hBw7sWJUAksaaSDVAisgqHpQLdjiblKJyx7U97ONa +RjNJOvX3FG+NtH4SqjQByGggnN/LzhQ2e2qi1BjQ6KHO214GcR1CR5qvUJ3nge+C +Gvuk8wNiilzLuIsb2Vr64EbeuFysi9/uCbEOkKkoKKRnsk79G5cRcRel/FE1Dv52 +T56n+wTfRp5CbR4JW9EyyNR+wWLxZp6Y0/Mn45nW0cb53M70Sbigs06vXjxL4Ua8 +AM6kJNyHXtPylnSIrtp663zvyJr4mtQzaRoCbFdC+wIDAQABo1MwUTAdBgNVHQ4E +FgQUsnBjoCWFH3il0MvjO9p0o/vcACgwHwYDVR0jBBgwFoAUsnBjoCWFH3il0Mvj +O9p0o/vcACgwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAj9rI +cHDTj9LhD2Nca/Mj2dNwUa1Fq81I5EF3GWi6mosTT4hfQupUC1i/6UE6ubLHRUGr +J3JnHBG8hUCddx5VxLncDmYP/4LHVEue/XdCURgY+K2WxQnUPDzZV2mXJXUzp8si +6xzFyiPyf4qsQaoRQnpOmyUXvBwtdf3M28EA/pTBBDZ4pZJ1QaSTlT7fpDgK2e6L +arBh7HebdS9UBaWLtYBMsRWRK5qpOQnLiy8H6J93/W6i4X3DSxeZXeYiMSO/jsJ8 +5XxL9zqOVjsw9Bxr79zCe7JF6fp6r3miUndMHQch/WXOY07lxH00cEqYo+2/Vk5D +pNs85uhOZE8z2jr8Pg== +-----END CERTIFICATE----- +"#; + + const DUMMY_K256_KEY: [u8; 32] = [ + 0x1A, 0x2B, 0x3C, 0x4D, 0x5E, 0x6F, 0x7A, 0x8B, 0x9C, 0x0D, 0x1E, 0x2F, 0x3A, 0x4B, + 0x5C, 0x6D, 0x7E, 0x8F, 0x9A, 0x0B, 0x1C, 0x2D, 0x3E, 0x4F, 0x5A, 0x6B, 0x7C, 0x8D, + 0x9E, 0x0F, 0x1A, 0x2B, + ]; + + let dummy_keys = AppKeys { + disk_crypt_key: Vec::new(), + env_crypt_key: Vec::new(), + k256_key: DUMMY_K256_KEY.to_vec(), + k256_signature: Vec::new(), + gateway_app_id: String::new(), + ca_cert: DUMMY_PEM_CERT.to_string(), + key_provider: KeyProvider::None { + key: DUMMY_PEM_KEY.to_string(), + }, + }; + + let dummy_cert_client = CertRequestClient::create(&dummy_keys, None, String::new()) + .await + .expect("Failed to create CertRequestClient"); + + let inner = AppStateInner { + config: dummy_config, + keys: dummy_keys, + vm_config: String::new(), + cert_client: dummy_cert_client, + demo_cert: RwLock::new(String::new()), + }; + + ( + AppState { + inner: Arc::new(inner), + }, + dummy_quote_file, + dummy_event_log_file, + ) + } + + #[tokio::test] + async fn test_verify_ed25519_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let data_to_sign = b"test message for ed25519"; + let sign_request = SignRequest { + algorithm: "ed25519".to_string(), + data: data_to_sign.to_vec(), + }; + + let sign_response = handler.sign(sign_request).await.unwrap(); + + let verify_request = VerifyRequest { + algorithm: "ed25519".to_string(), + data: data_to_sign.to_vec(), + signature: sign_response.signature, + public_key: sign_response.public_key, + }; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let verify_response = handler.verify(verify_request).await.unwrap(); + assert!(verify_response.valid); + } + + #[tokio::test] + async fn test_verify_secp256k1_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let data_to_sign = b"test message for secp256k1"; + let sign_request = SignRequest { + algorithm: "secp256k1".to_string(), + data: data_to_sign.to_vec(), + }; + + let sign_response = handler.sign(sign_request).await.unwrap(); + + let verify_request = VerifyRequest { + algorithm: "secp256k1".to_string(), + data: data_to_sign.to_vec(), + signature: sign_response.signature, + public_key: sign_response.public_key, + }; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let verify_response = handler.verify(verify_request).await.unwrap(); + assert!(verify_response.valid); + } + + #[tokio::test] + async fn test_sign_ed25519_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let data_to_sign = b"test message for ed25519"; + let request = SignRequest { + algorithm: "ed25519".to_string(), + data: data_to_sign.to_vec(), + }; + + let response = handler.sign(request).await.unwrap(); + + let attestation_response = ExternalRpcHandler::new(state) + .get_attestation_for_app_key(GetAttestationForAppKeyRequest { + algorithm: "ed25519".to_string(), + }) + .await + .unwrap(); + + let pk_bytes = + extract_pubkey_from_report_data(&attestation_response.report_data, "dip1::ed25519-pk:") + .unwrap(); + + let public_key = Ed25519VerifyingKey::try_from(pk_bytes.as_slice()).unwrap(); + let signature = Ed25519Signature::try_from(response.signature.as_slice()).unwrap(); + assert!(public_key.verify(data_to_sign, &signature).is_ok()); + } + + #[tokio::test] + async fn test_sign_secp256k1_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let data_to_sign = b"test message for secp256k1"; + let request = SignRequest { + algorithm: "secp256k1".to_string(), + data: data_to_sign.to_vec(), + }; + + let response = handler.sign(request).await.unwrap(); + + let attestation_response = ExternalRpcHandler::new(state) + .get_attestation_for_app_key(GetAttestationForAppKeyRequest { + algorithm: "secp256k1".to_string(), + }) + .await + .unwrap(); + + let pk_bytes = extract_pubkey_from_report_data( + &attestation_response.report_data, + "dip1::secp256k1c-pk:", + ) + .unwrap(); + + let public_key = VerifyingKey::from_sec1_bytes(&pk_bytes).unwrap(); + let signature = K256Signature::try_from(response.signature.as_slice()).unwrap(); + assert!(public_key.verify(data_to_sign, &signature).is_ok()); + } + + #[tokio::test] + async fn test_sign_secp256k1_prehashed_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + let data_to_sign = b"test message for secp256k1 prehashed"; + + let digest = Sha256::digest(data_to_sign); + + let request = SignRequest { + algorithm: "secp256k1_prehashed".to_string(), + data: digest.to_vec(), + }; + + let response = handler.sign(request).await.unwrap(); + + let attestation_response = ExternalRpcHandler::new(state) + .get_attestation_for_app_key(GetAttestationForAppKeyRequest { + algorithm: "secp256k1".to_string(), + }) + .await + .unwrap(); + + let pk_bytes = extract_pubkey_from_report_data( + &attestation_response.report_data, + "dip1::secp256k1c-pk:", + ) + .unwrap(); + + let public_key = VerifyingKey::from_sec1_bytes(&pk_bytes).unwrap(); + let signature = K256Signature::try_from(response.signature.as_slice()).unwrap(); + assert!(public_key + .verify_prehash(digest.as_slice(), &signature) + .is_ok()); + } + + #[tokio::test] + async fn test_sign_secp256k1_prehashed_invalid_length_fails() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { + state: state.clone(), + }; + + // digest with an invalid length + let invalid_digest = vec![0; 31]; + + let request = SignRequest { + algorithm: "secp256k1_prehashed".to_string(), + data: invalid_digest, + }; + + let response = handler.sign(request).await; + assert!(response.is_err()); + assert!(response + .unwrap_err() + .to_string() + .contains("requires a 32-byte digest")); + } + + #[tokio::test] + async fn test_sign_unsupported_algorithm_fails() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = InternalRpcHandler { state }; + let request = SignRequest { + algorithm: "rsa".to_string(), // Unsupported algorithm + data: b"test message".to_vec(), + }; + + let result = handler.sign(request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "Unsupported algorithm"); + } + + #[tokio::test] + async fn test_get_attestation_for_app_key_ed25519_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = ExternalRpcHandler::new(state.clone()); + let request = GetAttestationForAppKeyRequest { + algorithm: "ed25519".to_string(), + }; + + let response = handler.get_attestation_for_app_key(request).await.unwrap(); + + const EXPECTED_REPORT_DATA: &str = + "dip1::ed25519-pk:5Pbre1Amf1hrp2V2bbfKlIfxpQb2pJAmrgmhxgVoG9s\0\0\0\0"; + assert_eq!(EXPECTED_REPORT_DATA.as_bytes(), response.report_data); + } + + #[tokio::test] + async fn test_get_attestation_for_app_key_secp256k1_success() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = ExternalRpcHandler::new(state.clone()); + let request = GetAttestationForAppKeyRequest { + algorithm: "secp256k1".to_string(), + }; + + let response = handler.get_attestation_for_app_key(request).await.unwrap(); + + const EXPECTED_REPORT_DATA: &str = + "dip1::secp256k1c-pk:A6t_JdVkVdMAocH3f1f20WGT6JzdntxcXimUtEax8zc9"; + assert_eq!(EXPECTED_REPORT_DATA.as_bytes(), response.report_data); + } + + #[tokio::test] + async fn test_get_attestation_for_app_key_unsupported_algorithm_fails() { + let (state, _quote_file, _log_file) = setup_test_state().await; + let handler = ExternalRpcHandler::new(state); + let request = GetAttestationForAppKeyRequest { + algorithm: "ecdsa".to_string(), // Unsupported algorithm + }; + + let result = handler.get_attestation_for_app_key(request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "Unsupported algorithm"); + } +} diff --git a/guest-api/build.rs b/guest-api/build.rs index 2292ec24..dc3e9d96 100644 --- a/guest-api/build.rs +++ b/guest-api/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_service_name_emission() diff --git a/guest-api/proto/guest_api.proto b/guest-api/proto/guest_api.proto index 8c3c5337..5d5868cb 100644 --- a/guest-api/proto/guest_api.proto +++ b/guest-api/proto/guest_api.proto @@ -8,10 +8,12 @@ import "google/protobuf/empty.proto"; package guest_api; +// Identifies a running guest worker instance. message Id { string id = 1; } +// Static attestation and registration info reported by the guest agent. message GuestInfo { // Guest software version string version = 1; @@ -27,11 +29,13 @@ message GuestInfo { bytes device_id = 6; } +// IPv4/IPv6 address with prefix length. message IpAddress { string address = 1; uint32 prefix = 2; } +// Observability metrics for a single NIC inside the guest. message Interface { string name = 1; repeated IpAddress addresses = 2; @@ -41,10 +45,12 @@ message Interface { uint64 tx_errors = 6; } +// Default gateway entry advertised to the guest. message Gateway { string address = 1; } +// Complete networking snapshot including WireGuard info. message NetworkInformation { repeated string dns_servers = 1; repeated Gateway gateways = 2; @@ -52,10 +58,12 @@ message NetworkInformation { string wg_info = 4; } +// Lists all containers currently scheduled in the guest. message ListContainersResponse { repeated Container containers = 1; } +// Docker-compatible view of an application container. message Container { // The ID of this container string id = 1; @@ -73,7 +81,7 @@ message Container { string status = 8; } -// The system info +// OS, kernel, and resource metrics for the guest worker. message SystemInfo { // Operating system string os_name = 1; @@ -109,6 +117,7 @@ message SystemInfo { repeated DiskInfo disks = 17; } +// Disk usage metrics scoped per device or mount point. message DiskInfo { // Device name string name = 1; @@ -120,14 +129,21 @@ message DiskInfo { uint64 free_size = 5; } +// Direct gRPC surface exposed by the in-guest agent. service GuestApi { + // Returns attestation material and identifiers for the calling guest. rpc Info(google.protobuf.Empty) returns (GuestInfo); + // Reports the guest's OS/kernel and resource statistics. rpc SysInfo(google.protobuf.Empty) returns (SystemInfo); + // Dumps NIC/Gateway configuration so operators can debug connectivity. rpc NetworkInfo(google.protobuf.Empty) returns (NetworkInformation); + // Enumerates the containers running under the guest supervisor. rpc ListContainers(google.protobuf.Empty) returns (ListContainersResponse); + // Initiates a graceful shutdown inside the guest VM. rpc Shutdown(google.protobuf.Empty) returns (google.protobuf.Empty); } +// Same API surface as GuestApi but multiplexed by VM ID through VMM. service ProxiedGuestApi { rpc Info(Id) returns (GuestInfo); rpc SysInfo(Id) returns (SystemInfo); diff --git a/guest-api/src/generated/mod.rs b/guest-api/src/generated/mod.rs index 90ec5c9f..f7aa7e5d 100644 --- a/guest-api/src/generated/mod.rs +++ b/guest-api/src/generated/mod.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/guest_api.rs")); diff --git a/host-api/build.rs b/host-api/build.rs index 2292ec24..dc3e9d96 100644 --- a/host-api/build.rs +++ b/host-api/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_service_name_emission() diff --git a/host-api/src/generated/mod.rs b/host-api/src/generated/mod.rs index 390b8345..9aa65314 100644 --- a/host-api/src/generated/mod.rs +++ b/host-api/src/generated/mod.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/host_api.rs")); diff --git a/http-client/src/lib.rs b/http-client/src/lib.rs index cd121c96..ee5c7f9f 100644 --- a/http-client/src/lib.rs +++ b/http-client/src/lib.rs @@ -36,14 +36,14 @@ pub async fn http_request( body: &[u8], ) -> Result<(u16, Vec)> { debug!("Sending HTTP request to {base}, path={path}"); - let mut response = if base.starts_with("unix:") { + let mut response = if let Some(uds) = base.strip_prefix("unix:") { let path = if path.starts_with("/") { path.to_string() } else { format!("/{path}") }; let client: Client> = Client::unix(); - let unix_uri: hyper::Uri = Uri::new(base.strip_prefix("unix:").unwrap(), &path).into(); + let unix_uri: hyper::Uri = Uri::new(uds, &path).into(); let req = Request::builder() .method(method) .uri(unix_uri) diff --git a/kms/README.md b/kms/README.md index 7fa27dd8..8e7936fc 100644 --- a/kms/README.md +++ b/kms/README.md @@ -28,7 +28,7 @@ CVMs running in dstack support three boot modes: - Supports application upgrades - Requires control contract configuration - `key-provider` in RTMR: `{"type": "kms", "id": ""}` -- `app-id` is derived from the deployer's eth address + salt +- `app-id` is equal to the address of the deployed App Smart Contract. ## KMS Implementation @@ -150,7 +150,7 @@ The verification process follows these steps: ## The RPC Interface -The KMS RPC interface is defined in [kms.proto](rpc/proto/kms.proto). +The KMS RPC interface is defined in [kms_rpc.proto](rpc/proto/kms_rpc.proto). The core interface serving the dstack app are: - `GetAppKey`: Requests an app key using the app ID and TDX quote @@ -171,7 +171,7 @@ The `GetAppKey` RPC is used by the dstack app to request an app key. In this RPC Note: -There are multiple keys derived for different usage, see [kms.proto](rpc/proto/kms.proto) for more details. +There are multiple keys derived for different usage, see [kms_rpc.proto](rpc/proto/kms_rpc.proto) for more details. The root key is generated by a genesis KMS node in TEE and would be stored in the KMS node's encrypted local disk, replicated to other KMS nodes. The keys are derived with app id which guarantees apps can not get the keys from other apps. diff --git a/kms/auth-eth-bun/package.json b/kms/auth-eth-bun/package.json index 8b7ca5c3..3bef0eb8 100644 --- a/kms/auth-eth-bun/package.json +++ b/kms/auth-eth-bun/package.json @@ -15,7 +15,7 @@ "check": "bun run lint && bun run test:run" }, "dependencies": { - "hono": "4.8.5", + "hono": "4.10.3", "@hono/zod-validator": "0.2.2", "zod": "3.25.76", "viem": "2.31.7" diff --git a/kms/auth-eth/package-lock.json b/kms/auth-eth/package-lock.json index 18003a43..f9336526 100644 --- a/kms/auth-eth/package-lock.json +++ b/kms/auth-eth/package-lock.json @@ -3218,9 +3218,9 @@ } }, "node_modules/@openzeppelin/hardhat-upgrades/node_modules/undici": { - "version": "6.21.1", - "resolved": "https://registry.npmjs.org/undici/-/undici-6.21.1.tgz", - "integrity": "sha512-q/1rj5D0/zayJB2FraXdaWxbhWiNKDvu8naDT2dl1yTlvJp4BLtOcp2a5BvgGNQpYYJzau7tf1WgKv3b+7mqpQ==", + "version": "6.21.3", + "resolved": "https://registry.npmjs.org/undici/-/undici-6.21.3.tgz", + "integrity": "sha512-gBLkYIlEnSp8pFbT64yFgGE6UIB9tAkhukC23PmMDCe5Nd+cRqKxSjw5y54MK2AZMgZfJWMaNE4nYUHgi1XEOw==", "dev": true, "license": "MIT", "engines": { @@ -9599,9 +9599,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "license": "MIT", "dependencies": { @@ -10243,9 +10243,9 @@ } }, "node_modules/mocha/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -13186,9 +13186,9 @@ } }, "node_modules/undici": { - "version": "5.28.4", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.4.tgz", - "integrity": "sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==", + "version": "5.29.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.29.0.tgz", + "integrity": "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg==", "dev": true, "license": "MIT", "dependencies": { diff --git a/kms/auth-mock/package.json b/kms/auth-mock/package.json index 6493ab83..3c62e80e 100644 --- a/kms/auth-mock/package.json +++ b/kms/auth-mock/package.json @@ -15,7 +15,7 @@ "check": "bun run lint && bun run test:run" }, "dependencies": { - "hono": "4.8.5", + "hono": "4.10.3", "@hono/zod-validator": "0.2.2", "zod": "3.25.76" }, diff --git a/kms/dstack-app/builder/Dockerfile b/kms/dstack-app/builder/Dockerfile index 5e954755..e9c9448b 100644 --- a/kms/dstack-app/builder/Dockerfile +++ b/kms/dstack-app/builder/Dockerfile @@ -27,7 +27,7 @@ RUN cd dstack && cargo build --release -p dstack-kms --target x86_64-unknown-lin FROM debian:bookworm@sha256:0d8498a0e9e6a60011df39aab78534cfe940785e7c59d19dfae1eb53ea59babe COPY ./shared /build WORKDIR /build -ARG QEMU_REV=d98440811192c08eafc07c7af110593c6b3758ff +ARG QEMU_REV=dbcec07c0854bf873d346a09e87e4c993ccf2633 RUN ./pin-packages.sh ./qemu-pinned-packages.txt && \ apt-get update && \ apt-get install -y --no-install-recommends \ @@ -43,7 +43,7 @@ RUN ./pin-packages.sh ./qemu-pinned-packages.txt && \ flex \ bison && \ rm -rf /var/lib/apt/lists/* /var/log/* /var/cache/ldconfig/aux-cache -RUN git clone https://github.com/kvinwang/qemu-tdx.git --depth 1 --branch passthrough-dump-acpi --single-branch && \ +RUN git clone https://github.com/kvinwang/qemu-tdx.git --depth 1 --branch dstack-qemu-9.2.1 --single-branch && \ cd qemu-tdx && git fetch --depth 1 origin ${QEMU_REV} && \ git checkout ${QEMU_REV} && \ ../config-qemu.sh ./build /usr/local && \ diff --git a/kms/dstack-app/builder/README.md b/kms/dstack-app/builder/README.md index 40f376a3..b5387fa9 100644 --- a/kms/dstack-app/builder/README.md +++ b/kms/dstack-app/builder/README.md @@ -44,7 +44,7 @@ services: environment: - IMAGE_DOWNLOAD_URL=${IMAGE_DOWNLOAD_URL:-http://localhost:8001/mr_{OS_IMAGE_HASH}.tar.gz} - AUTH_TYPE=dev - - DEV_DOMAIN=kms.1022.kvin.wang + - DEV_DOMAIN=kms.1022.dstack.org - QUOTE_ENABLED=false ``` diff --git a/kms/dstack-app/compose-dev.yaml b/kms/dstack-app/compose-dev.yaml index 9193c338..aacb0e81 100644 --- a/kms/dstack-app/compose-dev.yaml +++ b/kms/dstack-app/compose-dev.yaml @@ -29,7 +29,7 @@ services: - 8001:8000 kms: - image: kvin/kms@sha256:ad6a8c5c43aed7278e665cd0960ae5be95060847f7d517633be685cabda95a3d + image: ${KMS_IMAGE} volumes: - kms-volume:/kms - /var/run/dstack.sock:/var/run/dstack.sock diff --git a/kms/dstack-app/deploy-to-vmm.sh b/kms/dstack-app/deploy-to-vmm.sh index d2d6ce5b..b8f6aeee 100755 --- a/kms/dstack-app/deploy-to-vmm.sh +++ b/kms/dstack-app/deploy-to-vmm.sh @@ -35,7 +35,7 @@ else # GUEST_AGENT_ADDR=127.0.0.1:9205 # The URL of the dstack app image download URL -# IMAGE_DOWNLOAD_URL=https://files.kvin.wang/images/mr_{OS_IMAGE_HASH}.tar.gz +# IMAGE_DOWNLOAD_URL=https://download.dstack.org/os-images/mr_{OS_IMAGE_HASH}.tar.gz # Image hash verification feature flag VERIFY_IMAGE=true @@ -50,7 +50,10 @@ GIT_REPOSITORY=https://github.com/Dstack-TEE/dstack.git GIT_REV=HEAD # The dstack OS image name to use for the KMS app -OS_IMAGE=dstack-0.5.0 +OS_IMAGE=dstack-0.5.5 + +# The dstack KMS image name to use for the KMS app +KMS_IMAGE=dstacktee/dstack-kms@sha256:11ac59f524a22462ccd2152219b0bec48a28ceb734e32500152d4abefab7a62a # The admin token for the KMS app ADMIN_TOKEN=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) @@ -100,6 +103,7 @@ subvar IMAGE_DOWNLOAD_URL subvar ADMIN_TOKEN_HASH subvar VERIFY_IMAGE subvar GIT_REPOSITORY +subvar KMS_IMAGE echo "Docker compose file:" cat "$COMPOSE_TMP" diff --git a/kms/rpc/build.rs b/kms/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/kms/rpc/build.rs +++ b/kms/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/kms/rpc/src/generated.rs b/kms/rpc/src/generated.rs index e7661af3..3e990136 100644 --- a/kms/rpc/src/generated.rs +++ b/kms/rpc/src/generated.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/kms.rs")); diff --git a/kms/src/main_service.rs b/kms/src/main_service.rs index f7e50b1d..66fbf87b 100644 --- a/kms/src/main_service.rs +++ b/kms/src/main_service.rs @@ -203,7 +203,9 @@ impl RpcHandler { fn cache_mrs(&self, key: &str, mrs: &Mrs) -> Result<()> { let path = self.mr_cache_dir().join(key); - fs::create_dir_all(path.parent().unwrap()).context("Failed to create cache directory")?; + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).context("Failed to create cache directory")?; + } safe_write::safe_write( &path, serde_json::to_string(mrs).context("Failed to serialize cached MRs")?, @@ -268,8 +270,9 @@ impl RpcHandler { .kernel_cmdline(&kernel_cmdline) .root_verity(true) .hotplug_off(vm_config.hotplug_off) - .two_pass_add_pages(vm_config.qemu_single_pass_add_pages) - .pic(vm_config.pic) + .maybe_two_pass_add_pages(vm_config.qemu_single_pass_add_pages) + .maybe_pic(vm_config.pic) + .maybe_qemu_version(vm_config.qemu_version.clone()) .maybe_pci_hole64_size(if vm_config.pci_hole64_size > 0 { Some(vm_config.pci_hole64_size) } else { @@ -481,8 +484,8 @@ impl RpcHandler { .context("Failed to derive app disk key")?; let req = CertRequest::builder() .key(&app_key) - .org_name("dstack") - .subject("dstack App CA") + .org_name("Dstack") + .subject("Dstack App CA") .ca_level(0) .app_id(app_id) .special_usage("app:ca") diff --git a/kms/src/onboard_service.rs b/kms/src/onboard_service.rs index a92c4cee..ffe38f16 100644 --- a/kms/src/onboard_service.rs +++ b/kms/src/onboard_service.rs @@ -128,8 +128,8 @@ impl Keys { quote_enabled: bool, ) -> Result { let tmp_ca_cert = CertRequest::builder() - .org_name("dstack") - .subject("dstack Client Temp CA") + .org_name("Dstack") + .subject("Dstack Client Temp CA") .ca_level(0) .key(&tmp_ca_key) .build() @@ -137,8 +137,8 @@ impl Keys { // Create self-signed KMS cert let ca_cert = CertRequest::builder() - .org_name("dstack") - .subject("dstack KMS CA") + .org_name("Dstack") + .subject("Dstack KMS CA") .ca_level(1) .key(&ca_key) .build() diff --git a/ra-rpc/Cargo.toml b/ra-rpc/Cargo.toml index b7171eda..2ec2b636 100644 --- a/ra-rpc/Cargo.toml +++ b/ra-rpc/Cargo.toml @@ -22,8 +22,11 @@ bon.workspace = true rocket-vsock-listener = { workspace = true, optional = true } serde.workspace = true x509-parser.workspace = true +prost-types = { workspace = true, optional = true } +or-panic.workspace = true [features] default = ["rocket", "client"] rocket = ["dep:rocket", "dep:rocket-vsock-listener"] client = ["reqwest"] +openapi = ["dep:prost-types"] diff --git a/ra-rpc/prpc-openapi.md b/ra-rpc/prpc-openapi.md new file mode 100644 index 00000000..5e234657 --- /dev/null +++ b/ra-rpc/prpc-openapi.md @@ -0,0 +1,73 @@ +# Generating OpenAPI docs for pRPC services + +This repository now ships a lightweight OpenAPI generator inside `ra-rpc`. It can +derive a specification (plus a Swagger UI test page) directly from the protobuf +descriptors that are already produced during the `prpc_build` step. + +## 1. Enable the feature + +Add the `openapi` feature when depending on `ra-rpc`: + +```toml +[dependencies] +ra-rpc = { path = "../ra-rpc", features = ["openapi", "rocket"] } +``` + +The `rocket` feature is optional if you only need the JSON document and plan to +serve it through another framework. + +## 2. Export the descriptor from your `*-rpc` crate + +Every RPC crate already has access to `file_descriptor_set.bin`. Expose it so +that application binaries can include it: + +```rust +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); +``` + +This repository now does that for all existing RPC crates. + +## 3. Build and mount the document + +```rust +use ra_rpc::openapi::{ + build_openapi_doc, DescriptorSource, DocumentInfo, ServiceConfig, SwaggerUiConfig, +}; + +fn openapi_doc() -> anyhow::Result { + let descriptor = dstack_guest_agent_rpc::FILE_DESCRIPTOR_SET; + let sources = vec![DescriptorSource::new( + descriptor, + vec![ + // Mounts /prpc/Worker.Version (prefix is optional when you don't trim). + ServiceConfig::new("Worker", "/prpc").with_method_prefix("Worker."), + ], + )]; + + let info = DocumentInfo::new("Guest Worker API", env!("CARGO_PKG_VERSION")) + .with_description("Auto generated from protobuf descriptors") + .add_server("https://example.com/prpc"); + + let ui = SwaggerUiConfig { + title: "Guest Worker RPC".into(), + ..Default::default() + }; + + build_openapi_doc(&sources, &info, ui) +} +``` + +Serving it through Rocket is one line: + +```rust +let openapi = openapi_doc()?; +let rocket = ra_rpc::rocket_helper::mount_openapi_docs(rocket, openapi, "/rpc-docs"); +``` + +* `GET /rpc-docs/openapi.json` returns the specification. +* `GET /rpc-docs/docs` serves a Swagger UI page backed by the same spec. + +You can mount as many descriptor sources as you need (for example when the same +binary exposes both admin and user RPC stacks). Just add more `DescriptorSource` +entries that point to the relevant `FILE_DESCRIPTOR_SET` constants. diff --git a/ra-rpc/src/lib.rs b/ra-rpc/src/lib.rs index e6f088ae..43a5e257 100644 --- a/ra-rpc/src/lib.rs +++ b/ra-rpc/src/lib.rs @@ -18,6 +18,9 @@ pub mod rocket_helper; #[cfg(feature = "client")] pub mod client; +#[cfg(feature = "openapi")] +pub mod openapi; + #[derive(Debug, Clone, PartialEq, Eq)] pub enum RemoteEndpoint { Tcp(SocketAddr), diff --git a/ra-rpc/src/openapi.rs b/ra-rpc/src/openapi.rs new file mode 100644 index 00000000..7eb0a0e9 --- /dev/null +++ b/ra-rpc/src/openapi.rs @@ -0,0 +1,1142 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +//! Utilities to derive OpenAPI documents for pRPC services from their compiled +//! protobuf descriptors. The resulting spec can be served directly or embedded +//! inside a Swagger UI helper. + +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap}, + convert::TryFrom, + sync::Arc, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use prost_types::{ + field_descriptor_proto::{Label as FieldLabel, Type as FieldType}, + DescriptorProto, EnumDescriptorProto, FieldDescriptorProto, FileDescriptorSet, + ServiceDescriptorProto, SourceCodeInfo, +}; +use prpc::Message as _; +use serde_json::{json, Map, Value}; + +/// High level metadata used for the `info` and `servers` sections of the +/// generated OpenAPI specification. +#[derive(Clone, Debug)] +pub struct DocumentInfo<'a> { + pub title: Cow<'a, str>, + pub version: Cow<'a, str>, + pub description: Option>, + pub servers: Vec>, +} + +impl<'a> DocumentInfo<'a> { + pub fn new(title: impl Into>, version: impl Into>) -> Self { + Self { + title: title.into(), + version: version.into(), + description: None, + servers: Vec::new(), + } + } + + pub fn with_description(mut self, description: impl Into>) -> Self { + self.description = Some(description.into()); + self + } + + pub fn add_server(mut self, server: impl Into>) -> Self { + self.servers.push(server.into()); + self + } +} + +/// Configuration describing how a pRPC service should be exposed over HTTP. +#[derive(Clone, Debug)] +pub struct ServiceConfig<'a> { + pub name: Cow<'a, str>, + pub mount_path: Cow<'a, str>, + pub method_prefix: Cow<'a, str>, + pub tag: Option>, + pub description: Option>, +} + +impl<'a> ServiceConfig<'a> { + pub fn new(name: impl Into>, mount_path: impl Into>) -> Self { + Self { + name: name.into(), + mount_path: mount_path.into(), + method_prefix: Cow::Borrowed(""), + tag: None, + description: None, + } + } + + pub fn with_method_prefix(mut self, prefix: impl Into>) -> Self { + self.method_prefix = prefix.into(); + self + } + + pub fn with_tag(mut self, tag: impl Into>) -> Self { + self.tag = Some(tag.into()); + self + } + + pub fn with_description(mut self, description: impl Into>) -> Self { + self.description = Some(description.into()); + self + } +} + +/// Descriptor blob plus the set of services that should be surfaced from it. +#[derive(Clone, Debug)] +pub struct DescriptorSource<'a> { + pub descriptor: &'a [u8], + pub services: Vec>, +} + +impl<'a> DescriptorSource<'a> { + pub fn new(descriptor: &'a [u8], services: Vec>) -> Self { + Self { + descriptor, + services, + } + } +} + +/// Combined OpenAPI document + UI preferences. +#[derive(Clone)] +pub struct OpenApiDoc { + spec: Arc, + ui: SwaggerUiConfig, +} + +impl OpenApiDoc { + pub fn new(spec_json: String, ui: SwaggerUiConfig) -> Self { + Self { + spec: Arc::new(spec_json), + ui, + } + } + + pub fn spec_json(&self) -> &str { + self.spec.as_str() + } + + pub fn clone_spec_json(&self) -> String { + (*self.spec).clone() + } + + pub(crate) fn render(&self, spec_url: &str) -> RenderedDoc { + let html = build_swagger_ui_html(spec_url, &self.ui); + RenderedDoc { + spec: self.spec.clone(), + ui_html: html, + } + } +} + +#[derive(Default)] +struct SourceCodeComments { + entries: HashMap, String>, +} + +impl SourceCodeComments { + fn from_source_info(info: Option) -> Self { + let mut entries = HashMap::new(); + if let Some(info) = info { + for location in info.location { + if let Some(comment) = comment_from_location(&location) { + entries.insert(location.path, comment); + } + } + } + Self { entries } + } + + fn comment_for(&self, path: &[i32]) -> Option<&str> { + self.entries.get(path).map(String::as_str) + } +} + +fn comment_from_location(location: &prost_types::source_code_info::Location) -> Option { + if let Some(text) = location.leading_comments.as_deref() { + return normalize_comment(text); + } + + let mut detached = Vec::new(); + for comment in &location.leading_detached_comments { + if let Some(normalized) = normalize_comment(comment) { + detached.push(normalized); + } + } + if !detached.is_empty() { + return Some(detached.join("\n\n")); + } + + if let Some(text) = location.trailing_comments.as_deref() { + return normalize_comment(text); + } + + None +} + +fn normalize_comment(raw: &str) -> Option { + let trimmed = raw.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.replace("\r\n", "\n")) + } +} + +fn extend_path(base: &[i32], field_number: i32, index: i32) -> Vec { + let mut path = base.to_vec(); + path.push(field_number); + path.push(index); + path +} + +/// Final resources consumed by the Rocket helper. +#[derive(Clone)] +pub(crate) struct RenderedDoc { + pub spec: Arc, + pub ui_html: String, +} + +/// Customisation knobs for the embedded Swagger UI page. +#[derive(Clone, Debug)] +pub struct SwaggerUiConfig { + pub title: String, + pub dark_mode: bool, + pub swagger_ui_dist: String, +} + +impl Default for SwaggerUiConfig { + fn default() -> Self { + Self { + title: "pRPC Explorer".to_string(), + dark_mode: true, + swagger_ui_dist: "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5".to_string(), + } + } +} + +/// Builds an OpenAPI specification for the provided descriptor sources. +pub fn generate_document( + sources: &[DescriptorSource<'_>], + info: &DocumentInfo<'_>, +) -> Result { + if sources.is_empty() { + bail!("at least one descriptor source is required"); + } + + let mut registry = DescriptorRegistry::default(); + for (source_id, source) in sources.iter().enumerate() { + let descriptor_set = FileDescriptorSet::decode(source.descriptor) + .context("failed to decode descriptor set")?; + registry.ingest(descriptor_set, source_id); + } + + let mut schema_builder = SchemaBuilder::new(®istry); + let mut paths = BTreeMap::::new(); + + for (source_id, source) in sources.iter().enumerate() { + for svc_cfg in &source.services { + let service = registry + .resolve_service(source_id, svc_cfg.name.as_ref()) + .with_context(|| format!("service {} not found in descriptor", svc_cfg.name))?; + + for method in &service.methods { + if method.client_streaming || method.server_streaming { + bail!( + "streaming method {}.{} is not supported by the HTTP bridge", + service.full_name, + method.name + ); + } + + let base = normalize_mount_path(svc_cfg.mount_path.as_ref()); + let method_segment = format!("{}{}", svc_cfg.method_prefix, method.name); + let path = join_path(&base, &method_segment); + let post_operation = + build_operation(service, method, svc_cfg, &mut schema_builder)?; + + let mut op_map = Map::new(); + op_map.insert("post".to_string(), post_operation); + paths.insert(path, Value::Object(op_map)); + } + } + } + + if paths.is_empty() { + bail!("no RPC methods were registered for OpenAPI export"); + } + + let mut schemas = schema_builder.finish(); + schemas.insert("RpcError".to_string(), rpc_error_schema()); + + let mut doc = Map::new(); + doc.insert("openapi".into(), Value::String("3.1.0".into())); + + let mut info_obj = Map::new(); + info_obj.insert("title".into(), Value::String(info.title.to_string())); + info_obj.insert("version".into(), Value::String(info.version.to_string())); + if let Some(description) = &info.description { + info_obj.insert("description".into(), Value::String(description.to_string())); + } + doc.insert("info".into(), Value::Object(info_obj)); + + if !info.servers.is_empty() { + let mut servers = Vec::new(); + for server in &info.servers { + let mut server_obj = Map::new(); + server_obj.insert("url".into(), Value::String(server.to_string())); + servers.push(Value::Object(server_obj)); + } + doc.insert("servers".into(), Value::Array(servers)); + } + + let mut components = Map::new(); + components.insert("schemas".into(), Value::Object(schemas)); + + doc.insert("paths".into(), map_to_value(paths)); + doc.insert("components".into(), Value::Object(components)); + + serde_json::to_string_pretty(&Value::Object(doc)).context("failed to serialize OpenAPI spec") +} + +/// Convenience helper that returns a ready-to-serve [`OpenApiDoc`]. +pub fn build_openapi_doc( + sources: &[DescriptorSource<'_>], + info: &DocumentInfo<'_>, + ui: SwaggerUiConfig, +) -> Result { + let spec = generate_document(sources, info)?; + Ok(OpenApiDoc::new(spec, ui)) +} + +fn build_operation( + service: &ServiceInfo, + method: &MethodInfo, + svc_cfg: &ServiceConfig<'_>, + schema_builder: &mut SchemaBuilder<'_>, +) -> Result { + let mut operation = Map::new(); + let tag = svc_cfg + .tag + .as_ref() + .map(|t| t.to_string()) + .unwrap_or_else(|| service.full_name.clone()); + operation.insert("tags".into(), Value::Array(vec![Value::String(tag)])); + operation.insert( + "operationId".into(), + Value::String(format!( + "{}_{}", + service.full_name.replace('.', "_"), + method.name + )), + ); + let summary = method + .description + .as_deref() + .and_then(|doc| doc.lines().find(|line| !line.trim().is_empty())) + .map(|line| line.trim().to_string()) + .unwrap_or_else(|| method.name.clone()); + operation.insert("summary".into(), Value::String(summary)); + + let mut description_parts = Vec::new(); + if let Some(doc) = method.description.as_deref() { + let trimmed = doc.trim(); + if !trimmed.is_empty() { + description_parts.push(trimmed.to_string()); + } + } + let mut base = format!( + "pRPC method `{}` on service `{}`.", + method.name, service.full_name + ); + if let Some(extra) = svc_cfg + .description + .as_ref() + .map(|c| c.as_ref()) + .or(service.description.as_deref()) + { + base.push_str("\n\n"); + base.push_str(extra); + } + description_parts.push(base); + let description = description_parts.join("\n\n"); + operation.insert("description".into(), Value::String(description)); + + if !is_empty_type(&method.input_type) { + let schema = schema_builder.schema_ref(&method.input_type)?; + let request = json!({ + "required": true, + "content": { + "application/json": { + "schema": schema + } + } + }); + operation.insert("requestBody".into(), request); + } + + let success_schema = if is_empty_type(&method.output_type) { + json!({ "type": "object" }) + } else { + schema_builder.schema_ref(&method.output_type)? + }; + + let mut responses = Map::new(); + responses.insert( + "200".into(), + json!({ + "description": "Successful response", + "content": { + "application/json": { + "schema": success_schema + } + } + }), + ); + responses.insert( + "400".into(), + json!({ + "description": "RPC error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/RpcError" } + } + } + }), + ); + operation.insert("responses".into(), Value::Object(responses)); + + Ok(Value::Object(operation)) +} + +fn rpc_error_schema() -> Value { + json!({ + "type": "object", + "properties": { + "error": { "type": "string" } + }, + "required": ["error"] + }) +} + +fn normalize_mount_path(path: &str) -> String { + if path.is_empty() { + return "/".to_string(); + } + let mut normalized = path.trim().to_string(); + if !normalized.starts_with('/') { + normalized.insert(0, '/'); + } + if normalized.len() > 1 && normalized.ends_with('/') { + normalized.pop(); + } + if normalized.is_empty() { + "/".to_string() + } else { + normalized + } +} + +fn join_path(base: &str, segment: &str) -> String { + if base == "/" { + format!("/{}", segment.trim_start_matches('/')) + } else { + format!( + "{}/{}", + base.trim_end_matches('/'), + segment.trim_start_matches('/') + ) + } +} + +fn map_to_value(map: BTreeMap) -> Value { + let mut json_map = Map::new(); + for (k, v) in map { + json_map.insert(k, v); + } + Value::Object(json_map) +} + +fn is_empty_type(type_name: &str) -> bool { + matches!( + type_name, + ".google.protobuf.Empty" | "google.protobuf.Empty" | "" + ) +} + +#[derive(Default)] +struct DescriptorRegistry { + messages: HashMap, + enums: HashMap, + services: Vec, + service_by_full_name: HashMap, + service_by_simple_name: HashMap>, +} + +impl DescriptorRegistry { + fn ingest(&mut self, set: FileDescriptorSet, source_id: usize) { + for file in set.file { + let package = file.package.unwrap_or_default(); + let comments = SourceCodeComments::from_source_info(file.source_code_info.clone()); + for (idx, message) in file.message_type.into_iter().enumerate() { + let path = vec![4, idx as i32]; + self.register_message(&package, &[], message, &path, &comments); + } + for (idx, enumeration) in file.enum_type.into_iter().enumerate() { + let path = vec![5, idx as i32]; + self.register_enum(&package, &[], enumeration, &path, &comments); + } + for (idx, service) in file.service.into_iter().enumerate() { + let path = vec![6, idx as i32]; + self.register_service(&package, service, source_id, &path, &comments); + } + } + } + + fn register_message( + &mut self, + package: &str, + parents: &[String], + descriptor: DescriptorProto, + descriptor_path: &[i32], + comments: &SourceCodeComments, + ) { + let name = descriptor.name.clone().unwrap_or_default(); + let mut path = parents.to_owned(); + path.push(name.clone()); + let full_name = canonical_name(package, &path); + let is_map = descriptor + .options + .as_ref() + .and_then(|opt| opt.map_entry) + .unwrap_or(false); + let description = comments.comment_for(descriptor_path).map(|s| s.to_string()); + let mut field_comments = HashMap::new(); + for (idx, field) in descriptor.field.iter().enumerate() { + if let Some(field_name) = field.name.as_ref() { + let field_path = extend_path(descriptor_path, 2, idx as i32); + if let Some(comment) = comments.comment_for(&field_path) { + field_comments.insert(field_name.clone(), comment.to_string()); + } + } + } + let info = MessageInfo { + full_name: full_name.clone(), + descriptor: descriptor.clone(), + is_map_entry: is_map, + description, + field_comments, + }; + self.messages.insert(full_name.clone(), info); + + for (idx, nested) in descriptor.nested_type.into_iter().enumerate() { + let nested_path = extend_path(descriptor_path, 3, idx as i32); + self.register_message(package, &path, nested, &nested_path, comments); + } + for (idx, enumeration) in descriptor.enum_type.into_iter().enumerate() { + let enum_path = extend_path(descriptor_path, 4, idx as i32); + self.register_enum(package, &path, enumeration, &enum_path, comments); + } + } + + fn register_enum( + &mut self, + package: &str, + parents: &[String], + descriptor: EnumDescriptorProto, + descriptor_path: &[i32], + comments: &SourceCodeComments, + ) { + let name = descriptor.name.clone().unwrap_or_default(); + let mut path = parents.to_owned(); + path.push(name); + let full_name = canonical_name(package, &path); + let description = comments.comment_for(descriptor_path).map(|s| s.to_string()); + let info = EnumInfo { + descriptor, + description, + }; + self.enums.insert(full_name, info); + } + + fn register_service( + &mut self, + package: &str, + descriptor: ServiceDescriptorProto, + source_id: usize, + descriptor_path: &[i32], + comments: &SourceCodeComments, + ) { + let simple_name = descriptor.name.clone().unwrap_or_default(); + let full_name = qualified_service_name(package, &simple_name); + let methods = descriptor + .method + .into_iter() + .enumerate() + .map(|(idx, method)| { + let description = comments + .comment_for(&extend_path(descriptor_path, 2, idx as i32)) + .map(|s| s.to_string()); + MethodInfo { + name: method.name.unwrap_or_default(), + input_type: normalize_type_name(&method.input_type.unwrap_or_default()), + output_type: normalize_type_name(&method.output_type.unwrap_or_default()), + client_streaming: method.client_streaming.unwrap_or(false), + server_streaming: method.server_streaming.unwrap_or(false), + description, + } + }) + .collect(); + let description = comments.comment_for(descriptor_path).map(|s| s.to_string()); + let service = ServiceInfo { + full_name: full_name.clone(), + source_id, + description, + methods, + }; + let idx = self.services.len(); + self.services.push(service); + self.service_by_full_name.insert(full_name, idx); + self.service_by_simple_name + .entry(simple_name) + .or_default() + .push(idx); + } + + fn resolve_service(&self, source_id: usize, query: &str) -> Result<&ServiceInfo> { + let normalized = query.trim_start_matches('.').to_string(); + if let Some(&idx) = self.service_by_full_name.get(&normalized) { + let service = &self.services[idx]; + if service.source_id == source_id { + return Ok(service); + } + } + + let matches = self + .service_by_simple_name + .get(query) + .into_iter() + .flatten() + .filter_map(|idx| { + let service = &self.services[*idx]; + (service.source_id == source_id).then_some(service) + }) + .collect::>(); + + match matches.as_slice() { + [service] => Ok(service), + [] => bail!("service {} not found in descriptor {}", query, source_id), + _ => bail!( + "service name {} is ambiguous, please use the fully qualified name", + query + ), + } + } + + fn message(&self, name: &str) -> Option<&MessageInfo> { + self.messages.get(name) + } + + fn enumeration(&self, name: &str) -> Option<&EnumInfo> { + self.enums.get(name) + } +} + +#[derive(Clone)] +struct MessageInfo { + full_name: String, + descriptor: DescriptorProto, + is_map_entry: bool, + description: Option, + field_comments: HashMap, +} + +#[derive(Clone)] +struct EnumInfo { + descriptor: EnumDescriptorProto, + description: Option, +} + +#[derive(Clone)] +struct ServiceInfo { + full_name: String, + source_id: usize, + description: Option, + methods: Vec, +} + +#[derive(Clone)] +struct MethodInfo { + name: String, + input_type: String, + output_type: String, + client_streaming: bool, + server_streaming: bool, + description: Option, +} + +struct SchemaBuilder<'a> { + registry: &'a DescriptorRegistry, + generated: BTreeMap, + visited: BTreeSet, +} + +impl<'a> SchemaBuilder<'a> { + fn new(registry: &'a DescriptorRegistry) -> Self { + Self { + registry, + generated: BTreeMap::new(), + visited: BTreeSet::new(), + } + } + + fn schema_ref(&mut self, type_name: &str) -> Result { + let normalized = normalize_type_name(type_name); + if let Some(schema) = builtin_type_schema(&normalized) { + return Ok(schema); + } + + if let Some(message) = self.registry.message(&normalized) { + if message.is_map_entry { + bail!( + "map entry type {} cannot be referenced directly", + normalized + ); + } + self.ensure_message_generated(&normalized)?; + return Ok(json!({ + "$ref": format!("#/components/schemas/{}", schema_key(&normalized)) + })); + } + + if self.registry.enumeration(&normalized).is_some() { + self.ensure_enum_generated(&normalized)?; + return Ok(json!({ + "$ref": format!("#/components/schemas/{}", schema_key(&normalized)) + })); + } + + bail!("unknown type referenced in proto: {}", normalized); + } + + fn ensure_message_generated(&mut self, name: &str) -> Result<()> { + if self.generated.contains_key(&schema_key(name)) { + return Ok(()); + } + if !self.visited.insert(name.to_string()) { + bail!("cyclic reference detected while processing {}", name); + } + let descriptor = self + .registry + .message(name) + .ok_or_else(|| anyhow!("message {} not found", name))?; + + let mut required = Vec::new(); + let mut props = BTreeMap::new(); + for field in &descriptor.descriptor.field { + let field_name = field.name.clone().unwrap_or_default(); + let mut schema = self.field_schema(field)?; + if let Some(doc) = descriptor.field_comments.get(&field_name) { + apply_schema_description(&mut schema, doc); + } + if is_required_field(field) { + required.push(field_name.clone()); + } + props.insert(field_name, schema); + } + + let mut obj = Map::new(); + obj.insert("type".into(), Value::String("object".into())); + let mut properties = Map::new(); + for (k, v) in props { + properties.insert(k, v); + } + obj.insert("properties".into(), Value::Object(properties)); + if let Some(doc) = &descriptor.description { + obj.insert("description".into(), Value::String(doc.clone())); + } + if !required.is_empty() { + obj.insert( + "required".into(), + Value::Array(required.into_iter().map(Value::String).collect()), + ); + } + + self.generated.insert(schema_key(name), Value::Object(obj)); + self.visited.remove(name); + Ok(()) + } + + fn ensure_enum_generated(&mut self, name: &str) -> Result<()> { + if self.generated.contains_key(&schema_key(name)) { + return Ok(()); + } + let descriptor = self + .registry + .enumeration(name) + .ok_or_else(|| anyhow!("enum {} not found", name))?; + let mut variants = Vec::new(); + for value in &descriptor.descriptor.value { + if let Some(name) = &value.name { + variants.push(Value::String(name.clone())); + } + } + let mut schema = Map::new(); + schema.insert("type".into(), Value::String("string".into())); + schema.insert("enum".into(), Value::Array(variants)); + if let Some(doc) = &descriptor.description { + schema.insert("description".into(), Value::String(doc.clone())); + } + self.generated + .insert(schema_key(name), Value::Object(schema)); + Ok(()) + } + + fn field_schema(&mut self, field: &FieldDescriptorProto) -> Result { + if matches!(field_type(field), FieldType::Message) + && matches!(field_label(field), FieldLabel::Repeated) + { + if let Some(type_name) = &field.type_name { + let normalized = normalize_type_name(type_name); + if let Some(message) = self.registry.message(&normalized) { + if message.is_map_entry { + return self.map_field_schema(message); + } + } + } + } + + let schema = match field_label(field) { + FieldLabel::Repeated => { + let inner = self.scalar_schema(field)?; + json!({ + "type": "array", + "items": inner + }) + } + _ => self.scalar_schema(field)?, + }; + Ok(schema) + } + + fn scalar_schema(&mut self, field: &FieldDescriptorProto) -> Result { + Ok(match field_type(field) { + FieldType::Double => json!({"type": "number", "format": "double"}), + FieldType::Float => json!({"type": "number", "format": "float"}), + FieldType::Int64 | FieldType::Sint64 | FieldType::Sfixed64 => { + json!({"type": "integer", "format": "int64"}) + } + FieldType::Uint64 | FieldType::Fixed64 => { + json!({"type": "integer", "format": "uint64"}) + } + FieldType::Int32 | FieldType::Sint32 | FieldType::Sfixed32 => { + json!({"type": "integer", "format": "int32"}) + } + FieldType::Uint32 | FieldType::Fixed32 => { + json!({"type": "integer", "format": "uint32"}) + } + FieldType::Bool => json!({"type": "boolean"}), + FieldType::String => json!({"type": "string"}), + FieldType::Bytes => json!({"type": "string", "format": "byte"}), + FieldType::Enum => { + let type_name = field + .type_name + .as_ref() + .ok_or_else(|| anyhow!("enum field missing type name"))?; + self.schema_ref(type_name)? + } + FieldType::Message => { + let type_name = field + .type_name + .as_ref() + .ok_or_else(|| anyhow!("message field missing type name"))?; + self.schema_ref(type_name)? + } + FieldType::Group => { + bail!("group fields are not supported in OpenAPI export") + } + }) + } + + fn map_field_schema(&mut self, entry: &MessageInfo) -> Result { + let mut value_field = None; + for field in &entry.descriptor.field { + if field.number.unwrap_or_default() == 2 { + value_field = Some(field.clone()); + } + } + let value_field = value_field + .ok_or_else(|| anyhow!("map entry {} is missing value field", entry.full_name))?; + let value_schema = self.scalar_schema(&value_field)?; + Ok(json!({ + "type": "object", + "additionalProperties": value_schema + })) + } + + fn finish(self) -> Map { + let mut map = Map::new(); + for (k, v) in self.generated { + map.insert(k, v); + } + map + } +} + +fn apply_schema_description(schema: &mut Value, doc: &str) { + let trimmed = doc.trim(); + if trimmed.is_empty() { + return; + } + if let Value::Object(obj) = schema { + obj.insert("description".into(), Value::String(trimmed.to_string())); + } +} + +fn builtin_type_schema(name: &str) -> Option { + match name { + ".google.protobuf.Empty" => Some(json!({"type": "object"})), + ".google.protobuf.Timestamp" => Some(json!({"type": "string", "format": "date-time"})), + ".google.protobuf.Duration" => { + Some(json!({"type": "string", "description": "Duration string"})) + } + ".google.protobuf.BytesValue" => { + Some(wrapper_schema(json!({"type": "string", "format": "byte"}))) + } + ".google.protobuf.StringValue" => Some(wrapper_schema(json!({"type": "string"}))), + ".google.protobuf.BoolValue" => Some(wrapper_schema(json!({"type": "boolean"}))), + ".google.protobuf.Int32Value" | ".google.protobuf.Sint32Value" => Some(wrapper_schema( + json!({"type": "integer", "format": "int32"}), + )), + ".google.protobuf.UInt32Value" => Some(wrapper_schema( + json!({"type": "integer", "format": "uint32"}), + )), + ".google.protobuf.Int64Value" | ".google.protobuf.Sint64Value" => Some(wrapper_schema( + json!({"type": "integer", "format": "int64"}), + )), + ".google.protobuf.UInt64Value" => Some(wrapper_schema( + json!({"type": "integer", "format": "uint64"}), + )), + ".google.protobuf.DoubleValue" => Some(wrapper_schema( + json!({"type": "number", "format": "double"}), + )), + ".google.protobuf.FloatValue" => { + Some(wrapper_schema(json!({"type": "number", "format": "float"}))) + } + ".google.protobuf.Any" => Some(json!({"type": "object"})), + _ if name.starts_with(".google.protobuf.") => Some(json!({"type": "object"})), + _ => None, + } +} + +fn wrapper_schema(inner: Value) -> Value { + json!({ + "type": "object", + "properties": { "value": inner }, + "required": ["value"] + }) +} + +fn schema_key(full_name: &str) -> String { + full_name.trim_start_matches('.').to_string() +} + +fn canonical_name(package: &str, path: &[String]) -> String { + let mut name = String::new(); + name.push('.'); + if !package.is_empty() { + name.push_str(package); + if !path.is_empty() { + name.push('.'); + } + } + name.push_str(&path.join(".")); + name +} + +fn qualified_service_name(package: &str, name: &str) -> String { + if package.is_empty() { + name.to_string() + } else { + format!("{package}.{name}") + } +} + +fn normalize_type_name(name: &str) -> String { + if name.is_empty() { + return String::new(); + } + if name.starts_with('.') { + name.to_string() + } else { + format!(".{name}") + } +} + +fn is_required_field(field: &FieldDescriptorProto) -> bool { + matches!(field_label(field), FieldLabel::Required) +} + +fn field_label(field: &FieldDescriptorProto) -> FieldLabel { + FieldLabel::try_from(field.label.unwrap_or_default()).unwrap_or(FieldLabel::Optional) +} + +fn field_type(field: &FieldDescriptorProto) -> FieldType { + FieldType::try_from(field.r#type.unwrap_or_default()).unwrap_or(FieldType::Message) +} + +fn build_swagger_ui_html(spec_url: &str, cfg: &SwaggerUiConfig) -> String { + let spec = spec_url.replace('\'', "\\'"); + let css = format!( + "{}/swagger-ui.css", + cfg.swagger_ui_dist.trim_end_matches('/') + ); + let bundle = format!( + "{}/swagger-ui-bundle.js", + cfg.swagger_ui_dist.trim_end_matches('/') + ); + let preset = format!( + "{}/swagger-ui-standalone-preset.js", + cfg.swagger_ui_dist.trim_end_matches('/') + ); + let background = if cfg.dark_mode { "#0b0d10" } else { "#fafafa" }; + format!( + r#" + + + + {title} + + + + +
+ + + + + +"#, + title = cfg.title, + css = css, + bundle = bundle, + preset = preset, + spec = spec, + background = background + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use prost_types::{FileDescriptorProto, MethodDescriptorProto}; + fn test_descriptor() -> Vec { + let request = DescriptorProto { + name: Some("PingRequest".into()), + field: vec![FieldDescriptorProto { + name: Some("message".into()), + number: Some(1), + label: Some(FieldLabel::Optional as i32), + r#type: Some(FieldType::String as i32), + ..Default::default() + }], + ..Default::default() + }; + + let response = DescriptorProto { + name: Some("PingResponse".into()), + field: vec![FieldDescriptorProto { + name: Some("echo".into()), + number: Some(1), + label: Some(FieldLabel::Optional as i32), + r#type: Some(FieldType::String as i32), + ..Default::default() + }], + ..Default::default() + }; + + let service = ServiceDescriptorProto { + name: Some("TestService".into()), + method: vec![MethodDescriptorProto { + name: Some("Ping".into()), + input_type: Some(".test.PingRequest".into()), + output_type: Some(".test.PingResponse".into()), + ..Default::default() + }], + ..Default::default() + }; + + let file = FileDescriptorProto { + name: Some("test.proto".into()), + package: Some("test".into()), + message_type: vec![request, response], + service: vec![service], + ..Default::default() + }; + + let set = FileDescriptorSet { file: vec![file] }; + let mut buf = Vec::new(); + set.encode(&mut buf).unwrap(); + buf + } + + #[test] + fn generates_document() { + let descriptor = test_descriptor(); + let sources = vec![DescriptorSource::new( + &descriptor, + vec![ServiceConfig::new("TestService", "/prpc")], + )]; + let info = DocumentInfo::new("Test API", "1.0.0") + .with_description("test-only spec") + .add_server("http://localhost:8000/prpc"); + let json = generate_document(&sources, &info).expect("spec"); + let doc: Value = serde_json::from_str(&json).expect("valid json"); + assert_eq!(doc["info"]["title"], "Test API"); + assert!( + doc["paths"]["/prpc/Ping"]["post"]["requestBody"]["content"]["application/json"] + ["schema"] + .is_object() + ); + } +} diff --git a/ra-rpc/src/rocket_helper.rs b/ra-rpc/src/rocket_helper.rs index e2c398ce..dcdbfa15 100644 --- a/ra-rpc/src/rocket_helper.rs +++ b/ra-rpc/src/rocket_helper.rs @@ -4,6 +4,13 @@ use std::convert::Infallible; +#[cfg(all(feature = "rocket", feature = "openapi"))] +use crate::openapi::{OpenApiDoc, RenderedDoc}; +#[cfg(all(feature = "rocket", feature = "openapi"))] +use rocket::response::content::{RawHtml, RawJson}; +#[cfg(all(feature = "rocket", feature = "openapi"))] +use std::{borrow::Cow, sync::Arc}; + use anyhow::{Context, Result}; use ra_tls::{attestation::Attestation, traits::CertExt}; use rocket::{ @@ -301,7 +308,7 @@ pub async fn handle_prpc_impl>( (Some(quote_verifier), Some(attestation)) => { let pubkey = request .certificate - .expect("certificate is missing") + .context("certificate is missing")? .public_key() .raw .to_vec(); @@ -356,3 +363,76 @@ impl CertExt for RocketCertificate<'_> { Ok(Some(ext.value.to_vec())) } } + +#[cfg(all(feature = "rocket", feature = "openapi"))] +#[derive(Clone)] +struct OpenApiState { + spec_json: Arc, + ui_html: Arc, +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +impl From for OpenApiState { + fn from(doc: RenderedDoc) -> Self { + Self { + spec_json: doc.spec.clone(), + ui_html: Arc::new(doc.ui_html), + } + } +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +#[rocket::get("/openapi.json")] +fn openapi_spec(state: &rocket::State) -> RawJson { + RawJson((*state.spec_json).clone()) +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +#[rocket::get("/docs")] +fn openapi_docs(state: &rocket::State) -> RawHtml { + RawHtml((*state.ui_html).clone()) +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +pub fn mount_openapi_docs( + rocket: rocket::Rocket, + doc: OpenApiDoc, + mount_path: impl Into>, +) -> rocket::Rocket { + let base = normalize_openapi_mount(mount_path.into()); + let spec_url = join_mount_path(base.as_ref(), "openapi.json"); + let rendered = doc.render(&spec_url); + let state = OpenApiState::from(rendered); + let mount_point = base.clone().into_owned(); + rocket + .manage(state) + .mount(mount_point, rocket::routes![openapi_spec, openapi_docs]) +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +fn normalize_openapi_mount(path: Cow<'static, str>) -> Cow<'static, str> { + let mut owned = path.into_owned(); + if owned.is_empty() { + owned.push('/'); + } + if !owned.starts_with('/') { + owned.insert(0, '/'); + } + if owned.len() > 1 && owned.ends_with('/') { + owned.pop(); + } + Cow::Owned(if owned.is_empty() { "/".into() } else { owned }) +} + +#[cfg(all(feature = "rocket", feature = "openapi"))] +fn join_mount_path(base: &str, segment: &str) -> String { + if base == "/" { + format!("/{}", segment.trim_start_matches('/')) + } else { + format!( + "{}/{}", + base.trim_end_matches('/'), + segment.trim_start_matches('/') + ) + } +} diff --git a/ra-tls/Cargo.toml b/ra-tls/Cargo.toml index 9d7be749..c6451fbb 100644 --- a/ra-tls/Cargo.toml +++ b/ra-tls/Cargo.toml @@ -33,3 +33,4 @@ scale.workspace = true cc-eventlog.workspace = true serde-human-bytes.workspace = true +or-panic.workspace = true diff --git a/ra-tls/src/attestation.rs b/ra-tls/src/attestation.rs index 65f591d0..103f7f62 100644 --- a/ra-tls/src/attestation.rs +++ b/ra-tls/src/attestation.rs @@ -18,6 +18,7 @@ use x509_parser::parse_x509_certificate; use crate::{oids, traits::CertExt}; use cc_eventlog::TdxEventLog as EventLog; +use or_panic::ResultOrPanic; use serde_human_bytes as hex_bytes; /// The content type of a quote. A CVM should only generate quotes for these types. @@ -50,7 +51,7 @@ impl QuoteContentType<'_> { /// Convert the content to the report data. pub fn to_report_data(&self, content: &[u8]) -> [u8; 64] { self.to_report_data_with_hash(content, "") - .expect("sha512 hash should not fail") + .or_panic("sha512 hash should not fail") } /// Convert the content to the report data with a specific hash algorithm. diff --git a/ra-tls/src/cert.rs b/ra-tls/src/cert.rs index 30939ec0..2cfa3e0c 100644 --- a/ra-tls/src/cert.rs +++ b/ra-tls/src/cert.rs @@ -48,7 +48,7 @@ impl CaCert { let key = KeyPair::from_pem(&pem_key).context("Failed to parse key")?; let cert = CertificateParams::from_ca_cert_pem(&pem_cert).context("Failed to parse cert")?; - let todo = "load the cert from the file directly: blocked by https://github.com/rustls/rcgen/issues/274"; + // TODO: load the cert from the file directly, blocked by https://github.com/rustls/rcgen/issues/274 let cert = cert.self_signed(&key).context("Failed to self-sign cert")?; Ok(Self { pem_cert, @@ -149,7 +149,7 @@ impl CertSigningRequest { // Sign the encoded CSR let signature = key_pair .sign(&rng, &encoded) - .expect("Failed to sign CSR") + .context("Failed to sign CSR")? .as_ref() .to_vec(); Ok(signature) @@ -258,6 +258,8 @@ impl CertRequest<'_, Key> { } if let Some(ca_level) = self.ca_level { params.is_ca = IsCa::Ca(BasicConstraints::Constrained(ca_level)); + params.key_usages.push(KeyUsagePurpose::KeyCertSign); + params.key_usages.push(KeyUsagePurpose::CrlSign); } if let Some(not_before) = self.not_before { params.not_before = not_before.into(); diff --git a/sdk/curl/api.md b/sdk/curl/api.md index 73400d38..2a18c393 100644 --- a/sdk/curl/api.md +++ b/sdk/curl/api.md @@ -71,6 +71,7 @@ Generates an ECDSA key using the k256 elliptic curve, derived from the applicati |-------|------|-------------|----------| | `path` | string | Path for the key | `"my/key/path"` | | `purpose` | string | Purpose for the key. Can be any string. This is used in the signature chain. | `"signing"` | `"encryption"` | +| `algorithm` | string | Either `secp256k1` or `ed25519`. Defaults to `secp256k1` | `ed25519` | **Example:** ```bash @@ -79,14 +80,15 @@ curl --unix-socket /var/run/dstack.sock -X POST \ -H 'Content-Type: application/json' \ -d '{ "path": "my/key/path", - "purpose": "signing" + "purpose": "signing", + "algorithm": "ed25519", }' ``` Or ```bash -curl --unix-socket /var/run/dstack.sock http://dstack/GetKey?path=my/key/path&purpose=signing +curl --unix-socket /var/run/dstack.sock http://dstack/GetKey?path=my/key/path&purpose=signing&algorithm=ed25519 ``` **Response:** @@ -131,7 +133,8 @@ curl --unix-socket /var/run/dstack.sock http://dstack/GetQuote?report_data=00000 { "quote": "", "event_log": "quote generation log", - "report_data": "" + "report_data": "", + "vm_config": "" } ``` @@ -155,9 +158,11 @@ curl --unix-socket /var/run/dstack.sock http://dstack/Info "tcb_info": "", "app_name": "my-app", "device_id": "", + "mr_aggregated": "", "os_image_hash": "", "key_provider_info": "", - "compose_hash": "" + "compose_hash": "", + "vm_config": "" } ``` @@ -188,6 +193,80 @@ curl --unix-socket /var/run/dstack.sock -X POST \ **Response:** Empty response with HTTP 200 status code on success. +### 6. Sign + +Signs a payload. + +**Endpoint:** `/Sign` + +**Request Parameters:** + +| Field | Type | Description | Example | +|-------|------|-------------|----------| +| `algorithm` | string | `ed25519`, `secp256k1_prehashed` or `secp256k1`| `ed25519` | +| `data` | string | Hex-encoded payload data | `deadbeef` | + +**Example:** +```bash +curl --unix-socket /var/run/dstack.sock -X POST \ + http://dstack/Sign \ + -H 'Content-Type: application/json' \ + -d '{ + "algorithm": "ed25519", + "data": "deadbeef" + }' +``` + +**Response:** +```json +{ + "signature": "", + "signature_chain": [ + "", + "", + "" + ] + "public_key": "" +} +``` + +### 7. Verify + +Verifies a signature. + +**Endpoint:** `/Verify` + +**Request Parameters:** + +| Field | Type | Description | Example | +|-------|------|-------------|----------| +| `algorithm` | string | `ed25519`, `secp256k1_prehashed` or `secp256k1`| `ed25519` | +| `data` | string | Hex-encoded payload data | `deadbeef` | +| `signature` | string | Hex-encoded signature | `deadbeef` | +| `public_key` | string | Hex-encoded public key | `deadbeef` | + +**Example:** +```bash +curl --unix-socket /var/run/dstack.sock -X POST \ + http://dstack/Verify \ + -H 'Content-Type: application/json' \ + -d '{ + "algorithm": "ed25519", + "data": "deadbeef", + "signature": "deadbeef", + "public_key": "deadbeef" + }' +``` + +**Response:** +```json +{ + "valid": "" +} +``` + +``` + ## Error Responses All endpoints may return the following HTTP status codes: diff --git a/sdk/go/README.md b/sdk/go/README.md index a8b6a97d..4379d2e6 100644 --- a/sdk/go/README.md +++ b/sdk/go/README.md @@ -91,9 +91,11 @@ NOTE: Leave endpoint empty in production. You only need to add `volumes` in your #### Methods - `Info(ctx context.Context) (*InfoResponse, error)`: Retrieves information about the CVM instance. -- `GetKey(ctx context.Context, path string, purpose string) (*GetKeyResponse, error)`: Derives a key for the given path and purpose. +- `GetKey(ctx context.Context, path string, purpose string, algorithm string) (*GetKeyResponse, error)`: Derives a key for the given path, purpose and algorithm. - `GetQuote(ctx context.Context, reportData []byte) (*GetQuoteResponse, error)`: Generates a TDX quote using SHA512 as the hash algorithm. - `GetTlsKey(ctx context.Context, path string, subject string, altNames []string, usageRaTls bool, usageServerAuth bool, usageClientAuth bool, randomSeed bool) (*GetTlsKeyResponse, error)`: Derives a key for the given path and purpose. +- `Sign(ctx context.Context, algorithm string, data []byte) (*SignResponse, error)`: Signs a payload +- `Verify(ctx context.Context, algorithm string, data []byte, signature []byte, public_key []byte) (*VerifyResponse, error)`: Verifies a payload ## Development diff --git a/sdk/go/dstack/client.go b/sdk/go/dstack/client.go index ce60b45c..ebff5724 100644 --- a/sdk/go/dstack/client.go +++ b/sdk/go/dstack/client.go @@ -38,6 +38,7 @@ type GetQuoteResponse struct { Quote []byte `json:"quote"` EventLog string `json:"event_log"` ReportData []byte `json:"report_data"` + VmConfig string `json:"vm_config"` } // Represents an event log entry in the TCB info @@ -72,10 +73,12 @@ type InfoResponse struct { TcbInfo string `json:"tcb_info"` AppName string `json:"app_name"` DeviceID string `json:"device_id"` + MrAggregated string `json:"mr_aggregated,omitempty"` KeyProviderInfo string `json:"key_provider_info"` // Optional: empty if OS image is not measured by KMS OsImageHash string `json:"os_image_hash,omitempty"` ComposeHash string `json:"compose_hash"` + VmConfig string `json:"vm_config,omitempty"` } // DecodeTcbInfo decodes the TcbInfo string into a TcbInfo struct @@ -348,10 +351,11 @@ func (c *DstackClient) GetTlsKey( } // Gets a key from the dstack service. -func (c *DstackClient) GetKey(ctx context.Context, path string, purpose string) (*GetKeyResponse, error) { +func (c *DstackClient) GetKey(ctx context.Context, path string, purpose string, algorithm string) (*GetKeyResponse, error) { payload := map[string]interface{}{ - "path": path, - "purpose": purpose, + "path": path, + "purpose": purpose, + "algorithm": algorithm, } data, err := c.sendRPCRequest(ctx, "/GetKey", payload) @@ -422,6 +426,83 @@ func (c *DstackClient) Info(ctx context.Context) (*InfoResponse, error) { return &response, nil } +type SignResponse struct { + Signature []byte + SignatureChain [][]byte + PublicKey []byte +} + +// Signs a payload. +func (c *DstackClient) Sign(ctx context.Context, algorithm string, data []byte) (*SignResponse, error) { + payload := map[string]interface{}{ + "algorithm": algorithm, + "data": hex.EncodeToString(data), + } + + respData, err := c.sendRPCRequest(ctx, "/Sign", payload) + if err != nil { + return nil, err + } + + var response struct { + Signature string `json:"signature"` + SignatureChain []string `json:"signature_chain"` + PublicKey string `json:"public_key"` + } + if err := json.Unmarshal(respData, &response); err != nil { + return nil, fmt.Errorf("failed to unmarshal sign response: %w", err) + } + + sig, err := hex.DecodeString(response.Signature) + if err != nil { + return nil, fmt.Errorf("failed to decode signature: %w", err) + } + pubKey, err := hex.DecodeString(response.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to decode public key: %w", err) + } + + sigChain := make([][]byte, len(response.SignatureChain)) + for i, s := range response.SignatureChain { + sigChain[i], err = hex.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode signature chain element %d: %w", i, err) + } + } + + return &SignResponse{ + Signature: sig, + SignatureChain: sigChain, + PublicKey: pubKey, + }, nil +} + +type VerifyResponse struct { + Valid bool `json:"valid"` +} + +// Verifies a payload. +func (c *DstackClient) Verify(ctx context.Context, algorithm string, data []byte, signature []byte, publicKey []byte) (*VerifyResponse, error) { + payload := map[string]interface{}{ + "algorithm": algorithm, + "data": hex.EncodeToString(data), + "signature": hex.EncodeToString(signature), + "public_key": hex.EncodeToString(publicKey), + } + + respData, err := c.sendRPCRequest(ctx, "/Verify", payload) + if err != nil { + return nil, err + } + + var response VerifyResponse + if err := json.Unmarshal(respData, &response); err != nil { + return nil, fmt.Errorf("failed to unmarshal verify response: %w", err) + } + + return &response, nil +} + // EmitEvent sends an event to be extended to RTMR3 on TDX platform. // The event will be extended to RTMR3 with the provided name and payload. // diff --git a/sdk/go/dstack/client_test.go b/sdk/go/dstack/client_test.go index 73c5c360..ee8df0ff 100644 --- a/sdk/go/dstack/client_test.go +++ b/sdk/go/dstack/client_test.go @@ -7,6 +7,7 @@ package dstack_test import ( "bytes" "context" + "crypto/sha256" "encoding/hex" "encoding/json" "fmt" @@ -25,7 +26,7 @@ import ( func TestGetKey(t *testing.T) { client := dstack.NewDstackClient() - resp, err := client.GetKey(context.Background(), "/", "test") + resp, err := client.GetKey(context.Background(), "/", "test", "ed25519") if err != nil { t.Fatal(err) } @@ -434,7 +435,7 @@ func TestGetKeySignatureVerification(t *testing.T) { client := dstack.NewDstackClient() path := "/test/path" purpose := "test-purpose" - resp, err := client.GetKey(context.Background(), path, purpose) + resp, err := client.GetKey(context.Background(), path, purpose, "secp256k1") if err != nil { t.Fatal(err) } @@ -608,3 +609,111 @@ func compressPublicKey(uncompressedKey []byte) ([]byte, error) { } return crypto.CompressPubkey(pubKey), nil } + +func TestSignAndVerifyEd25519(t *testing.T) { + client := dstack.NewDstackClient() + dataToSign := []byte("test message for ed25519") + algorithm := "ed25519" + + signResp, err := client.Sign(context.Background(), algorithm, dataToSign) + if err != nil { + t.Fatalf("Sign() error = %v", err) + } + + if len(signResp.Signature) == 0 { + t.Error("expected signature to not be empty") + } + if len(signResp.PublicKey) == 0 { + t.Error("expected public key to not be empty") + } + if len(signResp.SignatureChain) != 3 { + t.Errorf("expected signature chain to have 3 elements, got %d", len(signResp.SignatureChain)) + } + if !bytes.Equal(signResp.Signature, signResp.SignatureChain[0]) { + t.Error("expected Signature to be the same as SignatureChain[0]") + } + + verifyResp, err := client.Verify(context.Background(), algorithm, dataToSign, signResp.Signature, signResp.PublicKey) + if err != nil { + t.Fatalf("Verify() error = %v", err) + } + + if !verifyResp.Valid { + t.Error("expected verification to be valid") + } + + badData := []byte("wrong message") + verifyResp, err = client.Verify(context.Background(), algorithm, badData, signResp.Signature, signResp.PublicKey) + if err != nil { + t.Fatalf("Verify() with bad data error = %v", err) + } + + if verifyResp.Valid { + t.Error("expected verification with bad data to be invalid") + } +} + +func TestSignAndVerifySecp256k1(t *testing.T) { + client := dstack.NewDstackClient() + dataToSign := []byte("test message for secp256k1") + algorithm := "secp256k1" + + signResp, err := client.Sign(context.Background(), algorithm, dataToSign) + if err != nil { + t.Fatalf("Sign() error = %v", err) + } + + if len(signResp.Signature) == 0 { + t.Error("expected signature to not be empty") + } + if len(signResp.PublicKey) == 0 { + t.Error("expected public key to not be empty") + } + if len(signResp.SignatureChain) != 3 { + t.Errorf("expected signature chain to have 3 elements, got %d", len(signResp.SignatureChain)) + } + + verifyResp, err := client.Verify(context.Background(), algorithm, dataToSign, signResp.Signature, signResp.PublicKey) + if err != nil { + t.Fatalf("Verify() error = %v", err) + } + + if !verifyResp.Valid { + t.Error("expected verification to be valid") + } +} + +func TestSignAndVerifySecp256k1Prehashed(t *testing.T) { + client := dstack.NewDstackClient() + dataToSign := []byte("test message for secp256k1 prehashed") + digest := sha256.Sum256(dataToSign) + algorithm := "secp256k1_prehashed" + + signResp, err := client.Sign(context.Background(), algorithm, digest[:]) + if err != nil { + t.Fatalf("Sign() error = %v", err) + } + + if len(signResp.Signature) == 0 { + t.Error("expected signature to not be empty") + } + + verifyResp, err := client.Verify(context.Background(), algorithm, digest[:], signResp.Signature, signResp.PublicKey) + if err != nil { + t.Fatalf("Verify() error = %v", err) + } + + if !verifyResp.Valid { + t.Error("expected verification to be valid") + } + + // Test invalid digest length for signing + invalidDigest := []byte{1, 2, 3} + _, err = client.Sign(context.Background(), algorithm, invalidDigest) + if err == nil { + t.Fatal("expected error for invalid digest length, got nil") + } + if !strings.Contains(err.Error(), "32-byte digest") { + t.Errorf("expected error to mention '32-byte digest', got: %v", err) + } +} diff --git a/sdk/go/go.mod b/sdk/go/go.mod index 95e643ca..ada79bf7 100644 --- a/sdk/go/go.mod +++ b/sdk/go/go.mod @@ -5,14 +5,13 @@ module github.com/Dstack-TEE/dstack/sdk/go -go 1.23.0 +go 1.24.0 -toolchain go1.23.8 +require github.com/ethereum/go-ethereum v1.15.7 require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/ethereum/go-ethereum v1.15.7 // indirect github.com/holiman/uint256 v1.3.2 // indirect - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/sys v0.38.0 // indirect ) diff --git a/sdk/go/go.sum b/sdk/go/go.sum index 28868299..9fd544d6 100644 --- a/sdk/go/go.sum +++ b/sdk/go/go.sum @@ -1,3 +1,4 @@ +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= @@ -5,7 +6,7 @@ github.com/ethereum/go-ethereum v1.15.7 h1:vm1XXruZVnqtODBgqFaTclzP0xAvCvQIDKyFN github.com/ethereum/go-ethereum v1.15.7/go.mod h1:+S9k+jFzlyVTNcYGvqFhzN/SFhI6vA+aOY4T5tLSPL0= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= diff --git a/sdk/js/.gitignore b/sdk/js/.gitignore index 3c25e1e4..86bd3112 100644 --- a/sdk/js/.gitignore +++ b/sdk/js/.gitignore @@ -1,3 +1,4 @@ node_modules/ dist/ *.log +package-lock.json diff --git a/sdk/js/bun.lockb b/sdk/js/bun.lockb index dd186931..63682ed7 100755 Binary files a/sdk/js/bun.lockb and b/sdk/js/bun.lockb differ diff --git a/sdk/js/package.json b/sdk/js/package.json index 7a25c9d8..9cb6d3d8 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -1,6 +1,6 @@ { "name": "@phala/dstack-sdk", - "version": "0.5.4", + "version": "0.5.7", "description": "dstack SDK", "main": "dist/node/index.js", "types": "dist/node/index.d.ts", @@ -96,7 +96,7 @@ "devDependencies": { "@types/node": "latest", "typescript": "latest", - "vitest": "^2.1.3" + "vitest": "^3.2.4" }, "optionalDependencies": { "viem": "^2.21.0 <3.0.0", diff --git a/sdk/js/src/__tests__/index.test.ts b/sdk/js/src/__tests__/index.test.ts index 2236c138..dea1a1eb 100644 --- a/sdk/js/src/__tests__/index.test.ts +++ b/sdk/js/src/__tests__/index.test.ts @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 import { expect, describe, it, vi } from 'vitest' +import crypto from 'crypto' // Added for prehashed test import { DstackClient, TappdClient } from '../index' describe('DstackClient', () => { @@ -25,6 +26,18 @@ describe('DstackClient', () => { expect(result).toHaveProperty('signature_chain') }) + it('should able to get key with different algorithms', async () => { + const client = new DstackClient() + const resultSecp = await client.getKey('/secp', 'test', 'secp256k1') + expect(resultSecp.key).toBeInstanceOf(Uint8Array) + expect(resultSecp.key.length).toBe(32) // secp256k1 private key size + + const resultEd = await client.getKey('/ed', 'test', 'ed25519') + expect(resultEd.key).toBeInstanceOf(Uint8Array) + expect(resultEd.key.length).toBe(32) // ed25519 private key size (seed) + }) + + it('should able to request tdx quote', async () => { const client = new DstackClient() // You can put computation result as report data to tdxQuote. NOTE: it should serializable by JSON.stringify @@ -155,6 +168,84 @@ describe('DstackClient', () => { expect(typeof isReachable).toBe('boolean') }) + describe('Sign and Verify Methods', () => { + const client = new DstackClient() + const testData = 'Test message for signing' + const badData = 'This is not the original message' + + it('should sign and verify with ed25519', async () => { + const algorithm = 'ed25519' + const signResp = await client.sign(algorithm, testData) + + expect(signResp).toHaveProperty('signature') + expect(signResp).toHaveProperty('signature_chain') + expect(signResp).toHaveProperty('public_key') + expect(signResp.signature).toBeInstanceOf(Uint8Array) + expect(signResp.public_key).toBeInstanceOf(Uint8Array) + expect(signResp.signature_chain.length).toBeGreaterThan(0) // Should have at least the signature itself + expect(signResp.signature_chain[0]).toBeInstanceOf(Uint8Array) + + // Verify success + const verifyResp = await client.verify(algorithm, testData, signResp.signature, signResp.public_key) + expect(verifyResp).toHaveProperty('valid', true) + + // Verify failure (bad data) + const verifyRespBadData = await client.verify(algorithm, badData, signResp.signature, signResp.public_key) + expect(verifyRespBadData).toHaveProperty('valid', false) + }) + + it('should sign and verify with secp256k1', async () => { + const algorithm = 'secp256k1' + const signResp = await client.sign(algorithm, testData) + + expect(signResp.signature).toBeInstanceOf(Uint8Array) + expect(signResp.public_key).toBeInstanceOf(Uint8Array) + expect(signResp.signature_chain.length).toBeGreaterThan(0) + + // Verify success + const verifyResp = await client.verify(algorithm, testData, signResp.signature, signResp.public_key) + expect(verifyResp).toHaveProperty('valid', true) + + // Verify failure (bad data) + const verifyRespBadData = await client.verify(algorithm, badData, signResp.signature, signResp.public_key) + expect(verifyRespBadData).toHaveProperty('valid', false) + }) + + it('should sign and verify with secp256k1_prehashed', async () => { + const algorithm = 'secp256k1_prehashed' + const digest = crypto.createHash('sha256').update(testData).digest() + expect(digest.length).toBe(32) // Ensure it's 32 bytes + + const signResp = await client.sign(algorithm, digest) + + expect(signResp.signature).toBeInstanceOf(Uint8Array) + expect(signResp.public_key).toBeInstanceOf(Uint8Array) + + // Verify success + const verifyResp = await client.verify(algorithm, digest, signResp.signature, signResp.public_key) + expect(verifyResp).toHaveProperty('valid', true) + + // Verify failure (bad digest) + const badDigest = crypto.createHash('sha256').update(badData).digest() + const verifyRespBadData = await client.verify(algorithm, badDigest, signResp.signature, signResp.public_key) + expect(verifyRespBadData).toHaveProperty('valid', false) + }) + + it('should throw error when signing secp256k1_prehashed with incorrect data length', async () => { + const algorithm = 'secp256k1_prehashed' + const invalidData = 'This is not 32 bytes' + await expect(() => client.sign(algorithm, invalidData)).rejects.toThrow('Pre-hashed signing requires a 32-byte digest') + + const invalidBuffer = Buffer.alloc(31) // Not 32 bytes + await expect(() => client.sign(algorithm, invalidBuffer)).rejects.toThrow('Pre-hashed signing requires a 32-byte digest') + }) + + it('should throw error for unsupported sign algorithm', async () => { + const algorithm = 'rsa' + await expect(() => client.sign(algorithm, testData)).rejects.toThrow() // Specific error depends on server impl. + }) + }) + describe('deprecated methods with TappdClient', () => { it('should support deprecated deriveKey method with warning', async () => { const client = new TappdClient() diff --git a/sdk/js/src/index.ts b/sdk/js/src/index.ts index 4448c066..6e8e3206 100644 --- a/sdk/js/src/index.ts +++ b/sdk/js/src/index.ts @@ -24,6 +24,21 @@ export interface GetKeyResponse { signature_chain: Uint8Array[] } +export interface SignResponse { + __name__: Readonly<'SignResponse'> + + signature: Uint8Array + signature_chain: Uint8Array[] + public_key: Uint8Array +} + +export interface VerifyResponse { + __name__: Readonly<'VerifyResponse'> + + valid: boolean +} + + export type Hex = `${string}` export type TdxQuoteHashAlgorithms = @@ -44,24 +59,40 @@ export interface TcbInfo { rtmr1: string rtmr2: string rtmr3: string + app_compose: string event_log: EventLog[] } -export interface InfoResponse { +export type TcbInfoV03x = TcbInfo & { + rootfs_hash?: string +} + +export type TcbInfoV05x = TcbInfo & { + mr_aggregated: string + os_image_hash: string + compose_hash: string + device_id: string +} + +export interface InfoResponse { app_id: string instance_id: string app_cert: string - tcb_info: TcbInfo + tcb_info: VersionTcbInfo app_name: string device_id: string + mr_aggregated?: string os_image_hash?: string // Optional: empty if OS image is not measured by KMS key_provider_info: string compose_hash: string + vm_config?: string } export interface GetQuoteResponse { quote: Hex event_log: string + report_data?: Hex + vm_config?: string replayRtmrs: () => string[] } @@ -132,7 +163,7 @@ export interface TlsKeyOptions { usageClientAuth?: boolean; } -export class DstackClient { +export class DstackClient { protected endpoint: string constructor(endpoint: string | undefined = undefined) { @@ -150,10 +181,11 @@ export class DstackClient { this.endpoint = endpoint } - async getKey(path: string, purpose: string = ''): Promise { + async getKey(path: string, purpose: string = '', algorithm: string = 'secp256k1'): Promise { const payload = JSON.stringify({ path: path, - purpose: purpose + purpose: purpose, + algorithm: algorithm }) const result = await send_rpc_request<{ key: string, signature_chain: string[] }>(this.endpoint, '/GetKey', payload) return Object.freeze({ @@ -210,11 +242,11 @@ export class DstackClient { return Object.freeze(result) } - async info(): Promise { - const result = await send_rpc_request & { tcb_info: string }>(this.endpoint, '/Info', '{}') + async info(): Promise> { + const result = await send_rpc_request, 'tcb_info'> & { tcb_info: string }>(this.endpoint, '/Info', '{}') return Object.freeze({ ...result, - tcb_info: JSON.parse(result.tcb_info) as TcbInfo, + tcb_info: JSON.parse(result.tcb_info) as T, }) } @@ -252,6 +284,62 @@ export class DstackClient { ) } + /** + * Signs a payload using a derived key. + * @param algorithm The algorithm to use (e.g., "ed25519", "secp256k1", "secp256k1_prehashed") + * @param data The data to sign. If algorithm is "secp256k1_prehashed", this must be a 32-byte hash. + * @returns A SignResponse containing the signature, signature chain, and public key. + */ + async sign(algorithm: string, data: string | Buffer | Uint8Array): Promise { + const hexData = to_hex(data); + if (algorithm === 'secp256k1_prehashed' && hexData.length !== 64) { + throw new Error(`Pre-hashed signing requires a 32-byte digest, but received ${hexData.length / 2} bytes`); + } + + const payload = JSON.stringify({ + algorithm: algorithm, + data: hexData + }); + + const result = await send_rpc_request<{ signature: string, signature_chain: string[], public_key: string }>(this.endpoint, '/Sign', payload); + + return Object.freeze({ + signature: new Uint8Array(Buffer.from(result.signature, 'hex')), + signature_chain: result.signature_chain.map(sig => new Uint8Array(Buffer.from(sig, 'hex'))), + public_key: new Uint8Array(Buffer.from(result.public_key, 'hex')), + __name__: 'SignResponse', + }); + } + + /** + * Verifies a payload signature. + * @param algorithm The algorithm to use (e.g., "ed25519", "secp256k1", "secp256k1_prehashed") + * @param data The data that was signed. + * @param signature The signature to verify. + * @param publicKey The public key to use for verification. + * @returns A VerifyResponse indicating if the signature is valid. + */ + async verify( + algorithm: string, + data: string | Buffer | Uint8Array, + signature: string | Buffer | Uint8Array, + publicKey: string | Buffer | Uint8Array + ): Promise { + const payload = JSON.stringify({ + algorithm: algorithm, + data: to_hex(data), + signature: to_hex(signature), + public_key: to_hex(publicKey) + }); + + const result = await send_rpc_request<{ valid: boolean }>(this.endpoint, '/Verify', payload); + + return Object.freeze({ + ...result, + __name__: 'VerifyResponse', + }); + } + // // Legacy methods for backward compatibility with a warning to notify users about migrating to new methods. // These methods don't mean fully compatible as past, but we keep them here until next major version. @@ -283,7 +371,7 @@ export class DstackClient { } } -export class TappdClient extends DstackClient { +export class TappdClient extends DstackClient { constructor(endpoint: string | undefined = undefined) { if (endpoint === undefined) { if (process.env.TAPPD_SIMULATOR_ENDPOINT) { @@ -350,4 +438,4 @@ export class TappdClient extends DstackClient { }) return Object.freeze(result) } -} \ No newline at end of file +} diff --git a/sdk/js/src/send-rpc-request.ts b/sdk/js/src/send-rpc-request.ts index 6c17c344..fa6837a1 100644 --- a/sdk/js/src/send-rpc-request.ts +++ b/sdk/js/src/send-rpc-request.ts @@ -6,7 +6,7 @@ import http from 'http' import https from 'https' import net from 'net' -export const __version__ = "0.5.0" +export const __version__ = "0.5.6" export function send_rpc_request(endpoint: string, path: string, payload: string, timeoutMs?: number): Promise { diff --git a/sdk/python/README.md b/sdk/python/README.md index 8b81ef76..8b6d521a 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -556,13 +556,14 @@ Retrieves comprehensive information about the TEE instance. - `app_cert`: Application certificate in PEM format - `key_provider_info`: Key management configuration -##### `get_key(path: str | None = None, purpose: str | None = None) -> GetKeyResponse` +##### `get_key(path: str | None = None, purpose: str | None = None, algorithm: str = "secp256k1") -> GetKeyResponse` Derives a deterministic secp256k1/K256 private key for blockchain and Web3 applications. This is the primary method for obtaining cryptographic keys for wallets, signing, and other deterministic key scenarios. **Parameters:** - `path`: Unique identifier for key derivation (e.g., `"wallet/ethereum"`, `"signing/solana"`) - `purpose` (optional): Additional context for key usage (default: `""`) +- `algorithm` (optional): Key algorithm (e.g., "secp256k1", "ed25519"). Defaults to "secp256k1". **Returns:** `GetKeyResponse` - `key`: 32-byte secp256k1 private key as hex string (suitable for Ethereum, Bitcoin, Solana, etc.) @@ -636,6 +637,32 @@ Generates a fresh, random TLS key pair with X.509 certificate for TLS/SSL connec - **RA-TLS Support**: Optional remote attestation extension in certificates - **TEE-Signed**: Certificates signed by TEE-resident Certificate Authority +##### `sign(algorithm: str, data: str | bytes) -> SignResponse` + +Signs data using a derived key. + +**Parameters**: +- `algorithm`: The algorithm to use (e.g., "ed25519", "secp256k1", "secp256k1_prehashed"). +- `data`: The data to sign. If using "secp256k1_prehashed", this must be a 32-byte hash (bytes). + +**Returns**: `SignResponse` +- `signature`: The resulting signature as hex string. +- `signature_chain`: List of hex strings proving key authenticity. +- `public_key`: The public key corresponding to the derived signing key as hex string. + +##### `verify(algorithm: str, data: str | bytes, signature: str | bytes, public_key: str | bytes) -> VerifyResponse` + +Verifies a payload signature. + +**Parameters**: +- `algorithm`: The algorithm used for signing. +- `data`: The original data that was signed. +- `signature`: The signature to verify (hex string or bytes). +- `public_key`: The public key to use for verification (hex string or bytes). + +**Returns**: `VerifyResponse` +- `valid`: A bool indicating if the signature is valid. + ##### `emit_event(event: str, payload: str | bytes) -> None` Extends RTMR3 with a custom event for audit logging. @@ -811,4 +838,4 @@ DSTACK_SIMULATOR_ENDPOINT=/path/to/dstack/sdk/simulator/dstack.sock pdm run pyte ## License -Apache License 2.0 \ No newline at end of file +Apache License 2.0 diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml index 1e1c9df5..e0ba5d33 100644 --- a/sdk/python/pyproject.toml +++ b/sdk/python/pyproject.toml @@ -4,7 +4,7 @@ [project] name = "dstack-sdk" -version = "0.5.0" +version = "0.5.3" description = "dstack SDK for Python" authors = [ {name = "Leechael Yim", email = "yanleech@gmail.com"}, diff --git a/sdk/python/src/dstack_sdk/__init__.py b/sdk/python/src/dstack_sdk/__init__.py index 8be9664c..2831a17d 100644 --- a/sdk/python/src/dstack_sdk/__init__.py +++ b/sdk/python/src/dstack_sdk/__init__.py @@ -10,8 +10,10 @@ from .dstack_client import GetQuoteResponse from .dstack_client import GetTlsKeyResponse from .dstack_client import InfoResponse +from .dstack_client import SignResponse from .dstack_client import TappdClient from .dstack_client import TcbInfo +from .dstack_client import VerifyResponse from .encrypt_env_vars import EnvVar from .encrypt_env_vars import encrypt_env_vars from .encrypt_env_vars import encrypt_env_vars_sync diff --git a/sdk/python/src/dstack_sdk/dstack_client.py b/sdk/python/src/dstack_sdk/dstack_client.py index 78f34844..372c3f55 100644 --- a/sdk/python/src/dstack_sdk/dstack_client.py +++ b/sdk/python/src/dstack_sdk/dstack_client.py @@ -11,8 +11,10 @@ import os from typing import Any from typing import Dict +from typing import Generic from typing import List from typing import Optional +from typing import TypeVar from typing import cast import warnings @@ -21,7 +23,7 @@ logger = logging.getLogger("dstack_sdk") -__version__ = "0.2.0" +__version__ = "0.5.2" INIT_MR = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" @@ -130,6 +132,8 @@ def decode_signature_chain(self) -> List[bytes]: class GetQuoteResponse(BaseModel): quote: str event_log: str + report_data: str = "" + vm_config: str = "" def decode_quote(self) -> bytes: return bytes.fromhex(self.quote) @@ -148,6 +152,25 @@ def replay_rtmrs(self) -> Dict[int, str]: return rtmrs +class SignResponse(BaseModel): + signature: str + signature_chain: List[str] + public_key: str + + def decode_signature(self) -> bytes: + return bytes.fromhex(self.signature) + + def decode_signature_chain(self) -> List[bytes]: + return [bytes.fromhex(chain) for chain in self.signature_chain] + + def decode_public_key(self) -> bytes: + return bytes.fromhex(self.public_key) + + +class VerifyResponse(BaseModel): + valid: bool + + class EventLog(BaseModel): imr: int event_type: int @@ -157,38 +180,65 @@ class EventLog(BaseModel): class TcbInfo(BaseModel): + """Base TCB (Trusted Computing Base) information structure.""" + mrtd: str rtmr0: str rtmr1: str rtmr2: str rtmr3: str - os_image_hash: str = "" - compose_hash: str - device_id: str app_compose: str event_log: List[EventLog] -class InfoResponse(BaseModel): +class TcbInfoV03x(TcbInfo): + """TCB information for dstack OS version 0.3.x.""" + + rootfs_hash: Optional[str] = None + + +class TcbInfoV05x(TcbInfo): + """TCB information for dstack OS version 0.5.x.""" + + mr_aggregated: str + os_image_hash: str + compose_hash: str + device_id: str + + +# Type variable for TCB info versions +T = TypeVar("T", bound=TcbInfo) + + +class InfoResponse(BaseModel, Generic[T]): app_id: str instance_id: str app_cert: str - tcb_info: TcbInfo + tcb_info: T app_name: str device_id: str + mr_aggregated: str = "" os_image_hash: str = "" key_provider_info: str compose_hash: str + vm_config: str = "" @classmethod - def parse_response(cls, obj: Any) -> "InfoResponse": + def parse_response(cls, obj: Any, tcb_info_type: type[T]) -> "InfoResponse[T]": + """Parse response from service, automatically deserializing tcb_info. + + Args: + obj: Raw response object from service + tcb_info_type: The specific TcbInfo subclass to use for parsing + + """ if ( isinstance(obj, dict) and "tcb_info" in obj and isinstance(obj["tcb_info"], str) ): obj = dict(obj) - obj["tcb_info"] = TcbInfo(**json.loads(obj["tcb_info"])) + obj["tcb_info"] = tcb_info_type(**json.loads(obj["tcb_info"])) return cls(**obj) @@ -199,12 +249,19 @@ class BaseClient: class AsyncDstackClient(BaseClient): PATH_PREFIX = "/" - def __init__(self, endpoint: str | None = None, use_sync_http: bool = False): + def __init__( + self, + endpoint: str | None = None, + *, + use_sync_http: bool = False, + timeout: float = 3, + ): """Initialize async client with HTTP or Unix-socket transport. Args: endpoint: HTTP/HTTPS URL or Unix socket path use_sync_http: If True, use sync HTTP client internally + timeout: Timeout in seconds """ endpoint = get_endpoint(endpoint) @@ -212,6 +269,7 @@ def __init__(self, endpoint: str | None = None, use_sync_http: bool = False): self._client: Optional[httpx.AsyncClient] = None self._sync_client: Optional[httpx.Client] = None self._client_ref_count = 0 + self._timeout = timeout if endpoint.startswith("http://") or endpoint.startswith("https://"): self.async_transport = httpx.AsyncHTTPTransport() @@ -228,14 +286,18 @@ def __init__(self, endpoint: str | None = None, use_sync_http: bool = False): def _get_client(self) -> httpx.AsyncClient: if self._client is None: self._client = httpx.AsyncClient( - transport=self.async_transport, base_url=self.base_url, timeout=0.5 + transport=self.async_transport, + base_url=self.base_url, + timeout=self._timeout, ) return self._client def _get_sync_client(self) -> httpx.Client: if self._sync_client is None: self._sync_client = httpx.Client( - transport=self.sync_transport, base_url=self.base_url, timeout=0.5 + transport=self.sync_transport, + base_url=self.base_url, + timeout=self._timeout, ) return self._sync_client @@ -289,9 +351,14 @@ async def get_key( self, path: str | None = None, purpose: str | None = None, + algorithm: str = "secp256k1", ) -> GetKeyResponse: - """Derive a key from the given path and purpose.""" - data: Dict[str, Any] = {"path": path or "", "purpose": purpose or ""} + """Derive a key from the given path, purpose, and algorithm.""" + data: Dict[str, Any] = { + "path": path or "", + "purpose": purpose or "", + "algorithm": algorithm, + } result = await self._send_rpc_request("GetKey", data) return GetKeyResponse(**result) @@ -311,10 +378,10 @@ async def get_quote( result = await self._send_rpc_request("GetQuote", {"report_data": hex}) return GetQuoteResponse(**result) - async def info(self) -> InfoResponse: + async def info(self) -> InfoResponse[TcbInfo]: """Fetch service information including parsed TCB info.""" result = await self._send_rpc_request("Info", {}) - return InfoResponse.parse_response(result) + return InfoResponse.parse_response(result, TcbInfoV05x) async def emit_event( self, @@ -353,6 +420,40 @@ async def get_tls_key( result = await self._send_rpc_request("GetTlsKey", data) return GetTlsKeyResponse(**result) + async def sign(self, algorithm: str, data: str | bytes) -> SignResponse: + """Signs data using a derived key.""" + data_bytes = data.encode() if isinstance(data, str) else data + if algorithm == "secp256k1_prehashed" and len(data_bytes) != 32: + raise ValueError( + f"Pre-hashed signing requires a 32-byte digest, but received {len(data_bytes)} bytes" + ) + + hex_data = binascii.hexlify(data_bytes).decode() + payload = {"algorithm": algorithm, "data": hex_data} + result = await self._send_rpc_request("Sign", payload) + return SignResponse(**result) + + async def verify( + self, + algorithm: str, + data: str | bytes, + signature: str | bytes, + public_key: str | bytes, + ) -> VerifyResponse: + """Verify a signature.""" + data_bytes = data.encode() if isinstance(data, str) else data + sig_bytes = signature.encode() if isinstance(signature, str) else signature + pk_bytes = public_key.encode() if isinstance(public_key, str) else public_key + + payload = { + "algorithm": algorithm, + "data": binascii.hexlify(data_bytes).decode(), + "signature": binascii.hexlify(sig_bytes).decode(), + "public_key": binascii.hexlify(pk_bytes).decode(), + } + result = await self._send_rpc_request("Verify", payload) + return VerifyResponse(**result) + async def is_reachable(self) -> bool: """Return True if the service responds to a quick health call.""" try: @@ -365,21 +466,24 @@ async def is_reachable(self) -> bool: class DstackClient(BaseClient): PATH_PREFIX = "/" - def __init__(self, endpoint: str | None = None): + def __init__(self, endpoint: str | None = None, *, timeout: float = 3): """Initialize client with HTTP or Unix-socket transport. If a non-HTTP(S) endpoint is provided, it is treated as a Unix socket path and validated for existence. """ - self.async_client = AsyncDstackClient(endpoint, use_sync_http=True) + self.async_client = AsyncDstackClient( + endpoint, use_sync_http=True, timeout=timeout + ) @call_async def get_key( self, path: str | None = None, purpose: str | None = None, + algorithm: str = "secp256k1", ) -> GetKeyResponse: - """Derive a key from the given path and purpose.""" + """Derive a key from the given path, purpose, and algorithm.""" raise NotImplementedError @call_async @@ -391,7 +495,7 @@ def get_quote( raise NotImplementedError @call_async - def info(self) -> InfoResponse: + def info(self) -> InfoResponse[TcbInfo]: """Fetch service information including parsed TCB info.""" raise NotImplementedError @@ -416,6 +520,22 @@ def get_tls_key( """Request a TLS key from the service with optional parameters.""" raise NotImplementedError + @call_async + def sign(self, algorithm: str, data: str | bytes) -> SignResponse: + """Signs data using a derived key.""" + raise NotImplementedError + + @call_async + def verify( + self, + algorithm: str, + data: str | bytes, + signature: str | bytes, + public_key: str | bytes, + ) -> VerifyResponse: + """Verify a signature.""" + raise NotImplementedError + @call_async def is_reachable(self) -> bool: """Return True if the service responds to a quick health call.""" @@ -436,7 +556,13 @@ class AsyncTappdClient(AsyncDstackClient): DEPRECATED: Use ``AsyncDstackClient`` instead. """ - def __init__(self, endpoint: str | None = None, use_sync_http: bool = False): + def __init__( + self, + endpoint: str | None = None, + *, + use_sync_http: bool = False, + timeout: float = 3, + ): """Initialize deprecated async tappd client wrapper.""" if not use_sync_http: # Already warned in TappdClient.__init__ @@ -445,7 +571,7 @@ def __init__(self, endpoint: str | None = None, use_sync_http: bool = False): ) endpoint = get_tappd_endpoint(endpoint) - super().__init__(endpoint, use_sync_http=use_sync_http) + super().__init__(endpoint, use_sync_http=use_sync_http, timeout=timeout) # Set the correct path prefix for tappd self.PATH_PREFIX = "/prpc/Tappd." @@ -503,6 +629,11 @@ async def tdx_quote( return GetQuoteResponse(**result) + async def info(self) -> InfoResponse[TcbInfo]: + """Fetch service information including parsed TCB info.""" + result = await self._send_rpc_request("Info", {}) + return InfoResponse.parse_response(result, TcbInfoV03x) + class TappdClient(DstackClient): """Deprecated client kept for backward compatibility. @@ -510,13 +641,15 @@ class TappdClient(DstackClient): DEPRECATED: Use ``DstackClient`` instead. """ - def __init__(self, endpoint: str | None = None): + def __init__(self, endpoint: str | None = None, timeout: float = 3): """Initialize deprecated tappd client wrapper.""" emit_deprecation_warning( "TappdClient is deprecated, please use DstackClient instead" ) endpoint = get_tappd_endpoint(endpoint) - self.async_client = AsyncTappdClient(endpoint, use_sync_http=True) + self.async_client = AsyncTappdClient( + endpoint, use_sync_http=True, timeout=timeout + ) @call_async def derive_key( @@ -537,6 +670,11 @@ def tdx_quote( """Use ``get_quote`` instead (deprecated).""" raise NotImplementedError + @call_async + def info(self) -> InfoResponse[TcbInfo]: + """Fetch service information including parsed TCB info.""" + raise NotImplementedError + @call_async def __enter__(self): raise NotImplementedError diff --git a/sdk/python/tests/test_client.py b/sdk/python/tests/test_client.py index 9f948df7..437acf95 100644 --- a/sdk/python/tests/test_client.py +++ b/sdk/python/tests/test_client.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 +import hashlib import warnings from evidence_api.tdx.quote import TdxQuote @@ -13,18 +14,28 @@ from dstack_sdk import GetKeyResponse from dstack_sdk import GetQuoteResponse from dstack_sdk import GetTlsKeyResponse +from dstack_sdk import SignResponse from dstack_sdk import TappdClient +from dstack_sdk import VerifyResponse from dstack_sdk.dstack_client import InfoResponse from dstack_sdk.dstack_client import TcbInfo def test_sync_client_get_key(): client = DstackClient() - result = client.get_key() + result = client.get_key() # Test default algorithm (secp256k1) assert isinstance(result, GetKeyResponse) assert isinstance(result.decode_key(), bytes) assert len(result.decode_key()) == 32 + # Test specifying algorithm + result_ed = client.get_key(algorithm="ed25519") + assert isinstance(result_ed, GetKeyResponse) + assert len(result_ed.decode_key()) == 32 + + with pytest.raises(Exception): # Assuming unsupported algo raises error + client.get_key(algorithm="rsa") + def test_sync_client_get_quote(): client = DstackClient() @@ -66,8 +77,18 @@ def check_info_response(result: InfoResponse): @pytest.mark.asyncio async def test_async_client_get_key(): client = AsyncDstackClient() - result = await client.get_key() + result = await client.get_key() # Test default algorithm (secp256k1) assert isinstance(result, GetKeyResponse) + assert isinstance(result.decode_key(), bytes) + assert len(result.decode_key()) == 32 + + # Test specifying algorithm + result_ed = await client.get_key(algorithm="ed25519") + assert isinstance(result_ed, GetKeyResponse) + assert len(result_ed.decode_key()) == 32 + + with pytest.raises(Exception): # Assuming unsupported algo raises error + await client.get_key(algorithm="rsa") @pytest.mark.asyncio @@ -257,6 +278,157 @@ def test_emit_event_validation(): assert "event name cannot be empty" in str(exc_info.value) +SIGN_TEST_DATA = b"Test message for signing" +SIGN_BAD_DATA = b"This is not the original message" + + +def test_sync_sign_verify_ed25519(): + client = DstackClient() + algo = "ed25519" + sign_resp = client.sign(algo, SIGN_TEST_DATA) + assert isinstance(sign_resp, SignResponse) + assert len(sign_resp.decode_signature()) > 0 + assert len(sign_resp.decode_public_key()) > 0 + assert len(sign_resp.signature_chain) > 0 + + verify_resp = client.verify( + algo, + SIGN_TEST_DATA, + sign_resp.decode_signature(), + sign_resp.decode_public_key(), + ) + assert isinstance(verify_resp, VerifyResponse) + assert verify_resp.valid is True + + verify_bad = client.verify( + algo, SIGN_BAD_DATA, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +def test_sync_sign_verify_secp256k1(): + client = DstackClient() + algo = "secp256k1" + sign_resp = client.sign(algo, SIGN_TEST_DATA) + assert isinstance(sign_resp, SignResponse) + + verify_resp = client.verify( + algo, + SIGN_TEST_DATA, + sign_resp.decode_signature(), + sign_resp.decode_public_key(), + ) + assert verify_resp.valid is True + + verify_bad = client.verify( + algo, SIGN_BAD_DATA, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +def test_sync_sign_verify_secp256k1_prehashed(): + client = DstackClient() + algo = "secp256k1_prehashed" + digest = hashlib.sha256(SIGN_TEST_DATA).digest() + assert len(digest) == 32 + + sign_resp = client.sign(algo, digest) + assert isinstance(sign_resp, SignResponse) + + verify_resp = client.verify( + algo, digest, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_resp.valid is True + + bad_digest = hashlib.sha256(SIGN_BAD_DATA).digest() + verify_bad = client.verify( + algo, bad_digest, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +def test_sync_sign_prehashed_length_error(): + client = DstackClient() + algo = "secp256k1_prehashed" + with pytest.raises(ValueError) as excinfo: + client.sign(algo, b"too short") + assert "32-byte digest" in str(excinfo.value) + + +@pytest.mark.asyncio +async def test_async_sign_verify_ed25519(): + client = AsyncDstackClient() + algo = "ed25519" + sign_resp = await client.sign(algo, SIGN_TEST_DATA) + assert isinstance(sign_resp, SignResponse) + assert len(sign_resp.decode_signature()) > 0 + assert len(sign_resp.decode_public_key()) > 0 + + verify_resp = await client.verify( + algo, + SIGN_TEST_DATA, + sign_resp.decode_signature(), + sign_resp.decode_public_key(), + ) + assert verify_resp.valid is True + + verify_bad = await client.verify( + algo, SIGN_BAD_DATA, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +@pytest.mark.asyncio +async def test_async_sign_verify_secp256k1(): + client = AsyncDstackClient() + algo = "secp256k1" + sign_resp = await client.sign(algo, SIGN_TEST_DATA) + assert isinstance(sign_resp, SignResponse) + + verify_resp = await client.verify( + algo, + SIGN_TEST_DATA, + sign_resp.decode_signature(), + sign_resp.decode_public_key(), + ) + assert verify_resp.valid is True + + verify_bad = await client.verify( + algo, SIGN_BAD_DATA, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +@pytest.mark.asyncio +async def test_async_sign_verify_secp256k1_prehashed(): + client = AsyncDstackClient() + algo = "secp256k1_prehashed" + digest = hashlib.sha256(SIGN_TEST_DATA).digest() + + sign_resp = await client.sign(algo, digest) + assert isinstance(sign_resp, SignResponse) + + verify_resp = await client.verify( + algo, digest, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_resp.valid is True + + bad_digest = hashlib.sha256(SIGN_BAD_DATA).digest() + verify_bad = await client.verify( + algo, bad_digest, sign_resp.decode_signature(), sign_resp.decode_public_key() + ) + assert verify_bad.valid is False + + +@pytest.mark.asyncio +async def test_async_sign_prehashed_length_error(): + client = AsyncDstackClient() + algo = "secp256k1_prehashed" + with pytest.raises(ValueError) as excinfo: + await client.sign(algo, b"too short") + assert "32-byte digest" in str(excinfo.value) + + # Test deprecated TappdClient def test_tappd_client_deprecated(): """Test that TappdClient shows deprecation warning.""" diff --git a/sdk/rust/Cargo.toml b/sdk/rust/Cargo.toml index 961af1e6..3d7a97a0 100644 --- a/sdk/rust/Cargo.toml +++ b/sdk/rust/Cargo.toml @@ -7,7 +7,7 @@ [package] name = "dstack-sdk" -version = "0.1.0" +version = "0.1.2" edition = "2021" license = "MIT" description = "This crate provides a rust client for communicating with dstack" diff --git a/sdk/rust/README.md b/sdk/rust/README.md index 339363d1..2ae47ad1 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -120,6 +120,12 @@ Sends an event log with associated binary payload to the runtime. #### `get_tls_key(...) -> GetTlsKeyResponse` Requests a key and X.509 certificate chain for RA-TLS or server/client authentication. +#### sign(algorithm: &str, data: Vec) -> SignResponse +Signs a payload using a derived key. + +#### verify(algorithm: &str, data: Vec, signature: Vec, public_key: Vec) -> VerifyResponse +Verifies a payload signature. + ### TappdClient Methods (Legacy API) #### `info(): TappdInfoResponse` @@ -147,6 +153,10 @@ Generates a TDX quote with exactly 64 bytes of raw report data. - `InfoResponse`: CVM instance metadata, including image and runtime measurements +- `SignResponse`: Holds a signature, signature chain, and public key + +- `VerifyResponse`: Holds a boolean valid result + ## API Reference ### Running the Simulator diff --git a/sdk/rust/examples/dstack_client_usage.rs b/sdk/rust/examples/dstack_client_usage.rs index ffe7e71a..722dbf2d 100644 --- a/sdk/rust/examples/dstack_client_usage.rs +++ b/sdk/rust/examples/dstack_client_usage.rs @@ -115,5 +115,24 @@ async fn main() -> anyhow::Result<()> { ); } + let data_to_sign = b"my secret message".to_vec(); + let algorithm = "secp256k1"; + println!("Signing data with algorithm '{}'...", algorithm); + let sign_resp = client.sign(algorithm, data_to_sign.clone()).await?; + println!(" Signature: {}", sign_resp.signature); + println!(" Public Key: {}", sign_resp.public_key); + + let sig_bytes = sign_resp.decode_signature()?; + let pub_key_bytes = sign_resp.decode_public_key()?; + + let verify_resp = client + .verify( + algorithm, + data_to_sign.clone(), + sig_bytes.clone(), + pub_key_bytes.clone(), + ) + .await?; + println!(" Verification successful: {}", verify_resp.valid); Ok(()) } diff --git a/sdk/rust/src/dstack_client.rs b/sdk/rust/src/dstack_client.rs index e2eaeba4..40a5242a 100644 --- a/sdk/rust/src/dstack_client.rs +++ b/sdk/rust/src/dstack_client.rs @@ -14,6 +14,21 @@ use std::env; pub use dstack_sdk_types::dstack::*; +// Internal request structs for hex encoding +#[derive(Debug, Serialize)] +struct SignRequest<'a> { + algorithm: &'a str, + data: String, +} + +#[derive(Debug, Serialize)] +struct VerifyRequest<'a> { + algorithm: &'a str, + data: String, + signature: String, + public_key: String, +} + fn get_endpoint(endpoint: Option<&str>) -> String { if let Some(e) = endpoint { return e.to_string(); @@ -106,6 +121,7 @@ impl DstackClient { let data = json!({ "path": path.unwrap_or_default(), "purpose": purpose.unwrap_or_default(), + "algorithm": "secp256k1", // Default or specify as needed }); let response = self.send_rpc_request("/GetKey", &data).await?; let response = serde_json::from_value::(response)?; @@ -146,4 +162,34 @@ impl DstackClient { Ok(response) } + + /// Signs a payload using a derived key. + pub async fn sign(&self, algorithm: &str, data: Vec) -> Result { + let payload = SignRequest { + algorithm, + data: hex_encode(data), + }; + let response = self.send_rpc_request("/Sign", &payload).await?; + let response = serde_json::from_value::(response)?; + Ok(response) + } + + /// Verifies a payload signature. + pub async fn verify( + &self, + algorithm: &str, + data: Vec, + signature: Vec, + public_key: Vec, + ) -> Result { + let payload = VerifyRequest { + algorithm, + data: hex_encode(data), + signature: hex_encode(signature), + public_key: hex_encode(public_key), + }; + let response = self.send_rpc_request("/Verify", &payload).await?; + let response = serde_json::from_value::(response)?; + Ok(response) + } } diff --git a/sdk/rust/tests/test_client.rs b/sdk/rust/tests/test_client.rs index 84b1407d..e7be67e0 100644 --- a/sdk/rust/tests/test_client.rs +++ b/sdk/rust/tests/test_client.rs @@ -7,6 +7,7 @@ use dcap_qvl::quote::Quote; use dstack_sdk::dstack_client::DstackClient as AsyncDstackClient; +use sha2::{Digest, Sha256}; #[tokio::test] async fn test_async_client_get_key() { @@ -95,3 +96,71 @@ async fn test_info() { assert!(!info.key_provider_info.is_empty()); assert!(!info.compose_hash.is_empty()); } + +#[tokio::test] +async fn test_async_client_sign_and_verify_ed25519() { + let client = AsyncDstackClient::new(None); + let data_to_sign = b"test message for ed25519".to_vec(); + let algorithm = "ed25519"; + + let sign_resp = client.sign(algorithm, data_to_sign.clone()).await.unwrap(); + assert!(!sign_resp.signature.is_empty()); + assert!(!sign_resp.public_key.is_empty()); + assert_eq!(sign_resp.signature_chain.len(), 3); + + let sig = sign_resp.decode_signature().unwrap(); + let pub_key = sign_resp.decode_public_key().unwrap(); + + let verify_resp = client + .verify( + algorithm, + data_to_sign.clone(), + sig.clone(), + pub_key.clone(), + ) + .await + .unwrap(); + assert!(verify_resp.valid); + + let bad_data = b"wrong message".to_vec(); + let verify_resp_bad = client + .verify(algorithm, bad_data, sig, pub_key) + .await + .unwrap(); + assert!(!verify_resp_bad.valid); +} + +#[tokio::test] +async fn test_async_client_sign_and_verify_secp256k1() { + let client = AsyncDstackClient::new(None); + let data_to_sign = b"test message for secp256k1".to_vec(); + let algorithm = "secp256k1"; + + let sign_resp = client.sign(algorithm, data_to_sign.clone()).await.unwrap(); + let sig = sign_resp.decode_signature().unwrap(); + let pub_key = sign_resp.decode_public_key().unwrap(); + + let verify_resp = client + .verify(algorithm, data_to_sign, sig, pub_key) + .await + .unwrap(); + assert!(verify_resp.valid); +} + +#[tokio::test] +async fn test_async_client_sign_and_verify_secp256k1_prehashed() { + let client = AsyncDstackClient::new(None); + let data_to_sign = b"test message for secp256k1 prehashed"; + let digest = Sha256::digest(data_to_sign).to_vec(); + let algorithm = "secp256k1_prehashed"; + + let sign_resp = client.sign(algorithm, digest.clone()).await.unwrap(); + let sig = sign_resp.decode_signature().unwrap(); + let pub_key = sign_resp.decode_public_key().unwrap(); + + let verify_resp = client + .verify(algorithm, digest.clone(), sig, pub_key) + .await + .unwrap(); + assert!(verify_resp.valid); +} diff --git a/sdk/rust/types/Cargo.toml b/sdk/rust/types/Cargo.toml index 1fd355ed..c9290601 100644 --- a/sdk/rust/types/Cargo.toml +++ b/sdk/rust/types/Cargo.toml @@ -5,7 +5,7 @@ [package] name = "dstack-sdk-types" -version = "0.1.0" +version = "0.1.2" edition = "2021" license = "MIT" description = "This crate provides rust types for communication with dstack" diff --git a/sdk/rust/types/src/dstack.rs b/sdk/rust/types/src/dstack.rs index 28878fd3..ba151fbc 100644 --- a/sdk/rust/types/src/dstack.rs +++ b/sdk/rust/types/src/dstack.rs @@ -105,6 +105,12 @@ pub struct GetQuoteResponse { pub quote: String, /// The event log associated with the quote pub event_log: String, + /// The report data + #[serde(default)] + pub report_data: String, + /// VM configuration + #[serde(default)] + pub vm_config: String, } impl GetQuoteResponse { @@ -154,6 +160,9 @@ pub struct InfoResponse { pub app_name: String, /// The device identifier pub device_id: String, + /// The aggregated measurement register + #[serde(default)] + pub mr_aggregated: String, /// The hash of the OS image /// Optional: empty if OS image is not measured by KMS #[serde(default)] @@ -162,6 +171,9 @@ pub struct InfoResponse { pub key_provider_info: String, /// The hash of the compose configuration pub compose_hash: String, + /// VM configuration + #[serde(default)] + pub vm_config: String, } impl InfoResponse { @@ -212,3 +224,42 @@ pub struct GetTlsKeyResponse { /// The chain of certificates pub certificate_chain: Vec, } + +/// Response from a Sign request +#[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "borsh_schema", derive(BorshSchema))] +pub struct SignResponse { + /// The signature in hexadecimal format + pub signature: String, + /// The chain of signatures in hexadecimal format + pub signature_chain: Vec, + /// The public key in hexadecimal format + pub public_key: String, +} + +impl SignResponse { + /// Decodes the signature from hex to bytes + pub fn decode_signature(&self) -> Result, FromHexError> { + hex::decode(&self.signature) + } + + /// Decodes the public key from hex to bytes + pub fn decode_public_key(&self) -> Result, FromHexError> { + hex::decode(&self.public_key) + } + + /// Decodes the signature chain from hex to bytes + pub fn decode_signature_chain(&self) -> Result>, FromHexError> { + self.signature_chain.iter().map(hex::decode).collect() + } +} + +/// Response from a Verify request +#[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "borsh_schema", derive(BorshSchema))] +pub struct VerifyResponse { + /// Whether the signature is valid + pub valid: bool, +} diff --git a/sdk/simulator/sys-config.json b/sdk/simulator/sys-config.json index 02911d19..1b2d5b48 100644 --- a/sdk/simulator/sys-config.json +++ b/sdk/simulator/sys-config.json @@ -1,9 +1,9 @@ { "kms_urls": [ - "https://kms.1022.kvin.wang:12001" + "https://kms.1022.dstack.org:12001" ], "gateway_urls": [ - "https://tproxy.1022.kvin.wang:12002" + "https://tproxy.1022.dstack.org:12002" ], "pccs_url": "", "docker_registry": "", diff --git a/size-parser/Cargo.toml b/size-parser/Cargo.toml new file mode 100644 index 00000000..1107dec7 --- /dev/null +++ b/size-parser/Cargo.toml @@ -0,0 +1,25 @@ +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +[package] +name = "size-parser" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "A utility crate for parsing and handling memory sizes with serde support" + +[dependencies] +anyhow.workspace = true +serde = { workspace = true, features = ["derive"], optional = true } +thiserror.workspace = true + +[features] +default = [] +serde = ["dep:serde"] + +[dev-dependencies] +serde_json = { workspace = true, features = ["std"] } diff --git a/size-parser/README.md b/size-parser/README.md new file mode 100644 index 00000000..5f852826 --- /dev/null +++ b/size-parser/README.md @@ -0,0 +1,148 @@ +# size-parser + +A utility crate for parsing and handling memory sizes with serde support. + +## Features + +- Parse memory size strings with various suffixes (K, M, G, T) +- Support for hexadecimal values (0x prefix) +- Optional serde serialization/deserialization support +- Human-readable formatting +- Type-safe memory size handling + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +size-parser = { path = "../size-parser" } + +# For serde support +size-parser = { path = "../size-parser", features = ["serde"] } +``` + +## Examples + +### Basic Usage + +```rust +use size_parser::MemorySize; + +// Parse from string +let size = MemorySize::parse("2G").unwrap(); +assert_eq!(size.bytes(), 2 * 1024 * 1024 * 1024); + +// Parse hexadecimal +let size = MemorySize::parse("0x1000").unwrap(); +assert_eq!(size.bytes(), 4096); + +// Create from bytes +let size = MemorySize::from_bytes(1024); +assert_eq!(size.bytes(), 1024); + +// Using FromStr trait +let size: MemorySize = "2G".parse().unwrap(); +assert_eq!(size.bytes(), 2 * 1024 * 1024 * 1024); +``` + +### Supported Formats + +- Plain numbers: `"1024"`, `"2048"` +- Hexadecimal: `"0x1000"`, `"0X2000"` +- With suffixes: `"2K"`, `"4M"`, `"1G"`, `"2T"` (case-insensitive) + +Suffixes use binary (1024-based) multipliers: +- K/k: 1024 bytes +- M/m: 1024² bytes +- G/g: 1024³ bytes +- T/t: 1024⁴ bytes + +### Human-readable Formatting + +```rust +let size = MemorySize::from_bytes(1536); +println!("{}", size); // Prints: "1.5K" + +let size = MemorySize::from_bytes(2 * 1024 * 1024 * 1024); +println!("{}", size); // Prints: "2G" +``` + +### Serde Support (with "serde" feature) + +#### Using MemorySize Type + +```rust +use size_parser::MemorySize; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +struct Config { + memory: MemorySize, +} + +let config = Config { + memory: MemorySize::parse("2G").unwrap(), +}; + +// Serializes as: {"memory": "2G"} +let json = serde_json::to_string(&config).unwrap(); + +// Can deserialize from various formats +let config: Config = serde_json::from_str(r#"{"memory": "1024M"}"#).unwrap(); +``` + +#### Using Field Attributes with Numeric Types + +You can also use serde field attributes to serialize/deserialize memory sizes directly into any numeric type that can be converted to/from u64: + +```rust +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +struct MyConfig { + #[serde(with = "size_parser::human_size")] + memory_size: u64, + #[serde(with = "size_parser::human_size")] + buffer_size: usize, + #[serde(with = "size_parser::human_size")] + cache_size: u32, +} + +let config = MyConfig { + memory_size: 2 * 1024 * 1024 * 1024, // 2GB + buffer_size: 512 * 1024, // 512KB + cache_size: 64 * 1024, // 64KB +}; + +// Serializes as: {"memory_size": "2G", "buffer_size": "512K", "cache_size": "64K"} +let json = serde_json::to_string(&config).unwrap(); + +// Can deserialize from human-readable formats +let config: MyConfig = serde_json::from_str(r#"{"memory_size": "1G", "buffer_size": "256K", "cache_size": "32K"}"#).unwrap(); +assert_eq!(config.memory_size, 1024 * 1024 * 1024); +assert_eq!(config.buffer_size, 256 * 1024); +assert_eq!(config.cache_size, 32 * 1024); +``` + +**Supported numeric types:** +- `u64`, `u32`, `u16`, `u8` - unsigned integers +- `usize` - platform-dependent unsigned integer +- Any type that implements `TryFrom` and `Into` + +The generic implementation automatically handles overflow checking and provides clear error messages when values are too large for the target type. + +### Compatibility Function + +For compatibility with existing code, a standalone function is also provided: + +```rust +use size_parser::parse_memory_size; + +let bytes = parse_memory_size("2G").unwrap(); +assert_eq!(bytes, 2 * 1024 * 1024 * 1024); +``` + +## License + +Apache-2.0 diff --git a/size-parser/src/lib.rs b/size-parser/src/lib.rs new file mode 100644 index 00000000..b7cf3989 --- /dev/null +++ b/size-parser/src/lib.rs @@ -0,0 +1,627 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +//! A utility crate for parsing and handling memory sizes with serde support. +//! +//! This crate provides functionality to parse memory size strings with various +//! suffixes (K, M, G, T) and hexadecimal values, with optional serde serialization +//! and deserialization support. +//! +//! # Examples +//! +//! ``` +//! use size_parser::MemorySize; +//! +//! // Parse from string +//! let size = MemorySize::parse("2G").unwrap(); +//! assert_eq!(size.bytes(), 2 * 1024 * 1024 * 1024); +//! +//! // Parse hexadecimal +//! let size = MemorySize::parse("0x1000").unwrap(); +//! assert_eq!(size.bytes(), 4096); +//! +//! // Create from bytes +//! let size = MemorySize::from_bytes(1024); +//! assert_eq!(size.bytes(), 1024); +//! +//! // Using FromStr trait +//! let size: MemorySize = "2G".parse().unwrap(); +//! assert_eq!(size.bytes(), 2 * 1024 * 1024 * 1024); +//! ``` + +use std::fmt; +use std::str::FromStr; +use thiserror::Error; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// Errors that can occur when parsing memory sizes +#[derive(Error, Debug, Clone, PartialEq)] +pub enum MemorySizeError { + #[error("Empty memory size")] + Empty, + #[error("Invalid hexadecimal value: {0}")] + InvalidHex(String), + #[error("Invalid numeric value: {0}")] + InvalidNumber(String), + #[error("Unknown memory size suffix: {0}")] + UnknownSuffix(char), + #[error("Overflow in memory size calculation")] + Overflow, +} + +/// A memory size value that can be parsed from strings with various formats +/// and optionally serialized/deserialized with serde. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct MemorySize { + bytes: u64, +} + +impl MemorySize { + /// Create a new MemorySize from a number of bytes + pub fn from_bytes(bytes: u64) -> Self { + Self { bytes } + } + + /// Get the memory size in bytes + pub fn bytes(self) -> u64 { + self.bytes + } + + /// Get the memory size in kilobytes (1024 bytes) + pub fn kilobytes(self) -> f64 { + self.bytes as f64 / 1024.0 + } + + /// Get the memory size in megabytes (1024^2 bytes) + pub fn megabytes(self) -> f64 { + self.bytes as f64 / (1024.0 * 1024.0) + } + + /// Get the memory size in gigabytes (1024^3 bytes) + pub fn gigabytes(self) -> f64 { + self.bytes as f64 / (1024.0 * 1024.0 * 1024.0) + } + + /// Get the memory size in terabytes (1024^4 bytes) + pub fn terabytes(self) -> f64 { + self.bytes as f64 / (1024.0 * 1024.0 * 1024.0 * 1024.0) + } + + /// Parse a memory size value that can be decimal or hexadecimal (with 0x prefix) + /// + /// Supports the following formats: + /// - Plain numbers: "1024", "2048" + /// - Hexadecimal: "0x1000", "0X2000" + /// - With suffixes: "2K", "4M", "1G", "2T" (case-insensitive) + /// + /// Suffixes use binary (1024-based) multipliers: + /// - K/k: 1024 bytes + /// - M/m: 1024^2 bytes + /// - G/g: 1024^3 bytes + /// - T/t: 1024^4 bytes + pub fn parse(s: &str) -> Result { + let s = s.trim(); + + if s.is_empty() { + return Err(MemorySizeError::Empty); + } + + // Handle hexadecimal values + if s.starts_with("0x") || s.starts_with("0X") { + let hex_str = &s[2..]; + let bytes = u64::from_str_radix(hex_str, 16) + .map_err(|_| MemorySizeError::InvalidHex(hex_str.to_string()))?; + return Ok(Self::from_bytes(bytes)); + } + + // Handle plain numbers (all digits) + if s.chars().all(|c| c.is_ascii_digit()) { + let bytes = s + .parse::() + .map_err(|_| MemorySizeError::InvalidNumber(s.to_string()))?; + return Ok(Self::from_bytes(bytes)); + } + + // Handle numbers with suffixes + let Some(last_char) = s.chars().last() else { + return Err(MemorySizeError::Empty); + }; + + let multiplier = match last_char.to_ascii_lowercase() { + 'k' => 1024u64, + 'm' => 1024u64.saturating_mul(1024), + 'g' => 1024u64.saturating_mul(1024).saturating_mul(1024), + 't' => 1024u64 + .saturating_mul(1024) + .saturating_mul(1024) + .saturating_mul(1024), + _ => return Err(MemorySizeError::UnknownSuffix(last_char)), + }; + let num_part = s.trim_end_matches(last_char); + let num = num_part + .parse::() + .map_err(|_| MemorySizeError::InvalidNumber(num_part.to_string()))?; + + let bytes = num + .checked_mul(multiplier) + .ok_or(MemorySizeError::Overflow)?; + + Ok(Self::from_bytes(bytes)) + } + + /// Format the memory size in a human-readable way + pub fn format_human(&self) -> String { + const UNITS: &[(&str, u64)] = &[ + ("T", 1024u64.pow(4)), + ("G", 1024u64.pow(3)), + ("M", 1024u64.pow(2)), + ("K", 1024), + ]; + + for &(unit, size) in UNITS { + if self.bytes >= size { + let value = self.bytes / size; + let remainder = self.bytes % size; + if remainder == 0 { + return format!("{}{}", value, unit); + } else { + let fractional = remainder as f64 / size as f64; + return format!("{:.1}{}", value as f64 + fractional, unit); + } + } + } + + format!("{}", self.bytes) + } +} + +impl FromStr for MemorySize { + type Err = MemorySizeError; + + fn from_str(s: &str) -> Result { + Self::parse(s) + } +} + +impl fmt::Display for MemorySize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.format_human()) + } +} + +impl From for MemorySize { + fn from(bytes: u64) -> Self { + Self::from_bytes(bytes) + } +} + +impl From for u64 { + fn from(size: MemorySize) -> Self { + size.bytes + } +} + +#[cfg(feature = "serde")] +impl Serialize for MemorySize { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + // Serialize as human-readable string for JSON, YAML, etc. + serializer.serialize_str(&self.format_human()) + } else { + // Serialize as raw bytes for binary formats + serializer.serialize_u64(self.bytes) + } + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for MemorySize { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + use serde::de::{Error, Visitor}; + use std::fmt; + + struct MemorySizeVisitor; + + impl Visitor<'_> for MemorySizeVisitor { + type Value = MemorySize; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a memory size string or u64") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + MemorySize::parse(value).map_err(E::custom) + } + + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + Ok(MemorySize::from_bytes(value)) + } + } + + if deserializer.is_human_readable() { + // For human-readable formats like JSON, support both strings and numbers + deserializer.deserialize_any(MemorySizeVisitor) + } else { + // For binary formats, expect u64 + deserializer.deserialize_u64(MemorySizeVisitor) + } + } +} + +/// Parse a memory size string into bytes (for compatibility with existing code) +pub fn parse_memory_size(s: &str) -> Result { + MemorySize::parse(s).map(|size| size.bytes()) +} + +/// Generic serde support for using memory size parsing with field attributes +/// +/// This module provides functions that can be used with `#[serde(with = "memory_size::human_size")]` +/// to serialize and deserialize memory sizes as human-readable strings directly into any numeric type +/// that can be converted to/from u64. +/// +/// # Example +/// +/// ```rust +/// use serde::{Deserialize, Serialize}; +/// +/// #[derive(Serialize, Deserialize)] +/// struct Config { +/// #[serde(with = "size_parser::human_size")] +/// memory_size: u64, +/// #[serde(with = "size_parser::human_size")] +/// buffer_size: usize, +/// #[serde(with = "size_parser::human_size")] +/// cache_size: u32, +/// } +/// +/// let config = Config { +/// memory_size: 2 * 1024 * 1024 * 1024, // 2GB +/// buffer_size: 512 * 1024, // 512KB +/// cache_size: 64 * 1024, // 64KB +/// }; +/// +/// // Serializes as: {"memory_size": "2G", "buffer_size": "512K", "cache_size": "64K"} +/// let json = serde_json::to_string(&config).unwrap(); +/// +/// // Can deserialize from human-readable formats +/// let config: Config = serde_json::from_str(r#"{"memory_size": "1G", "buffer_size": "256K", "cache_size": "32K"}"#).unwrap(); +/// assert_eq!(config.memory_size, 1024 * 1024 * 1024); +/// assert_eq!(config.buffer_size, 256 * 1024); +/// assert_eq!(config.cache_size, 32 * 1024); +/// ``` +#[cfg(feature = "serde")] +pub mod human_size { + use super::MemorySize; + use serde::{de::Error, Deserializer, Serializer}; + use std::convert::{TryFrom, TryInto}; + + /// Serialize a numeric memory size as a human-readable string + pub fn serialize(value: &T, serializer: S) -> Result + where + S: Serializer, + T: Copy + TryInto, + T::Error: std::fmt::Display, + { + let bytes: u64 = (*value).try_into().map_err(|e| { + serde::ser::Error::custom(format!("memory size conversion error: {}", e)) + })?; + let memory_size = MemorySize::from_bytes(bytes); + + if serializer.is_human_readable() { + serializer.serialize_str(&memory_size.format_human()) + } else { + serializer.serialize_u64(bytes) + } + } + + /// Deserialize a memory size from a human-readable string into any numeric type + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: TryFrom, + T::Error: std::fmt::Display, + { + use serde::de::Visitor; + use std::fmt; + + struct MemorySizeVisitor(std::marker::PhantomData); + + impl Visitor<'_> for MemorySizeVisitor + where + T: TryFrom, + T::Error: std::fmt::Display, + { + type Value = T; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a memory size string or number") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + let bytes = MemorySize::parse(value) + .map(|size| size.bytes()) + .map_err(E::custom)?; + + T::try_from(bytes) + .map_err(|e| E::custom(format!("memory size conversion error: {}", e))) + } + + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + T::try_from(value) + .map_err(|e| E::custom(format!("memory size conversion error: {}", e))) + } + + fn visit_u32(self, value: u32) -> Result + where + E: Error, + { + T::try_from(value as u64) + .map_err(|e| E::custom(format!("memory size conversion error: {}", e))) + } + + fn visit_i32(self, value: i32) -> Result + where + E: Error, + { + if value < 0 { + return Err(E::custom("memory size cannot be negative")); + } + T::try_from(value as u64) + .map_err(|e| E::custom(format!("memory size conversion error: {}", e))) + } + + fn visit_i64(self, value: i64) -> Result + where + E: Error, + { + if value < 0 { + return Err(E::custom("memory size cannot be negative")); + } + T::try_from(value as u64) + .map_err(|e| E::custom(format!("memory size conversion error: {}", e))) + } + } + + if deserializer.is_human_readable() { + // For human-readable formats like JSON, support both strings and numbers + deserializer.deserialize_any(MemorySizeVisitor(std::marker::PhantomData)) + } else { + // For binary formats, expect u64 + deserializer.deserialize_u64(MemorySizeVisitor(std::marker::PhantomData)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_plain_numbers() { + assert_eq!(MemorySize::parse("1024").unwrap().bytes(), 1024); + assert_eq!(MemorySize::parse("2048").unwrap().bytes(), 2048); + assert_eq!(MemorySize::parse("0").unwrap().bytes(), 0); + } + + #[test] + fn test_parse_hexadecimal() { + assert_eq!(MemorySize::parse("0x1000").unwrap().bytes(), 4096); + assert_eq!(MemorySize::parse("0X2000").unwrap().bytes(), 8192); + assert_eq!(MemorySize::parse("0xff").unwrap().bytes(), 255); + } + + #[test] + fn test_parse_with_suffixes() { + assert_eq!(MemorySize::parse("1K").unwrap().bytes(), 1024); + assert_eq!(MemorySize::parse("2k").unwrap().bytes(), 2048); + assert_eq!(MemorySize::parse("1M").unwrap().bytes(), 1024 * 1024); + assert_eq!(MemorySize::parse("2m").unwrap().bytes(), 2 * 1024 * 1024); + assert_eq!(MemorySize::parse("1G").unwrap().bytes(), 1024 * 1024 * 1024); + assert_eq!( + MemorySize::parse("2g").unwrap().bytes(), + 2 * 1024 * 1024 * 1024 + ); + assert_eq!(MemorySize::parse("1T").unwrap().bytes(), 1024u64.pow(4)); + assert_eq!(MemorySize::parse("2t").unwrap().bytes(), 2 * 1024u64.pow(4)); + } + + #[test] + fn test_parse_errors() { + assert!(matches!(MemorySize::parse(""), Err(MemorySizeError::Empty))); + assert!(matches!( + MemorySize::parse(" "), + Err(MemorySizeError::Empty) + )); + assert!(matches!( + MemorySize::parse("abc"), + Err(MemorySizeError::UnknownSuffix('c')) + )); + assert!(matches!( + MemorySize::parse("0xgg"), + Err(MemorySizeError::InvalidHex(_)) + )); + assert!(matches!( + MemorySize::parse("abcK"), + Err(MemorySizeError::InvalidNumber(_)) + )); + } + + #[test] + fn test_format_human() { + assert_eq!(MemorySize::from_bytes(1024).format_human(), "1K"); + assert_eq!(MemorySize::from_bytes(1024 * 1024).format_human(), "1M"); + assert_eq!( + MemorySize::from_bytes(1024 * 1024 * 1024).format_human(), + "1G" + ); + assert_eq!(MemorySize::from_bytes(1024u64.pow(4)).format_human(), "1T"); + assert_eq!(MemorySize::from_bytes(1536).format_human(), "1.5K"); + assert_eq!(MemorySize::from_bytes(512).format_human(), "512"); + } + + #[test] + fn test_conversions() { + let size = MemorySize::from_bytes(2 * 1024 * 1024 * 1024); + assert_eq!(size.kilobytes(), 2.0 * 1024.0 * 1024.0); + assert_eq!(size.megabytes(), 2.0 * 1024.0); + assert_eq!(size.gigabytes(), 2.0); + assert_eq!(size.terabytes(), 2.0 / 1024.0); + } + + #[test] + fn test_from_str() { + let size: MemorySize = "2G".parse().unwrap(); + assert_eq!(size.bytes(), 2 * 1024 * 1024 * 1024); + } + + #[test] + fn test_display() { + let size = MemorySize::from_bytes(1024); + assert_eq!(format!("{}", size), "1K"); + } + + #[test] + fn test_compatibility_function() { + assert_eq!(parse_memory_size("2G").unwrap(), 2 * 1024 * 1024 * 1024); + assert_eq!(parse_memory_size("0x1000").unwrap(), 4096); + } + + #[cfg(feature = "serde")] + #[test] + fn test_serde_json() { + let size = MemorySize::from_bytes(2 * 1024 * 1024 * 1024); + + // Test serialization + let json = serde_json::to_string(&size).unwrap(); + assert_eq!(json, "\"2G\""); + + // Test deserialization from string + let deserialized: MemorySize = serde_json::from_str("\"1G\"").unwrap(); + assert_eq!(deserialized.bytes(), 1024 * 1024 * 1024); + + // Test deserialization from JSON number + let from_number: MemorySize = serde_json::from_str("2147483648").unwrap(); + assert_eq!(from_number.bytes(), 2147483648); + + // Test deserialization from various string formats + let from_k: MemorySize = serde_json::from_str("\"512K\"").unwrap(); + assert_eq!(from_k.bytes(), 512 * 1024); + + let from_hex: MemorySize = serde_json::from_str("\"0x1000\"").unwrap(); + assert_eq!(from_hex.bytes(), 4096); + } + + #[cfg(feature = "serde")] + #[test] + fn test_generic_human_size_field_attribute() { + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct Config { + #[serde(with = "crate::human_size")] + memory_size: u64, + #[serde(with = "crate::human_size")] + buffer_size: usize, + #[serde(with = "crate::human_size")] + cache_size: u32, + // Regular field without custom serialization + threads: u32, + } + + let config = Config { + memory_size: 2 * 1024 * 1024 * 1024, // 2GB + buffer_size: 512 * 1024, // 512KB + cache_size: 64 * 1024, // 64KB + threads: 4, + }; + + // Test serialization + let json = serde_json::to_string(&config).unwrap(); + assert_eq!( + json, + r#"{"memory_size":"2G","buffer_size":"512K","cache_size":"64K","threads":4}"# + ); + + // Test deserialization from human-readable format + let json_input = + r#"{"memory_size":"1G","buffer_size":"256K","cache_size":"32K","threads":8}"#; + let deserialized: Config = serde_json::from_str(json_input).unwrap(); + + assert_eq!(deserialized.memory_size, 1024 * 1024 * 1024); + assert_eq!(deserialized.buffer_size, 256 * 1024); + assert_eq!(deserialized.cache_size, 32 * 1024); + assert_eq!(deserialized.threads, 8); + + // Test deserialization from various formats + let from_hex: Config = serde_json::from_str(r#"{"memory_size":"0x40000000","buffer_size":"0x1000","cache_size":"0x800","threads":2}"#).unwrap(); + assert_eq!(from_hex.memory_size, 0x40000000); + assert_eq!(from_hex.buffer_size, 0x1000); + assert_eq!(from_hex.cache_size, 0x800); + } + + #[cfg(feature = "serde")] + #[test] + fn test_human_size_overflow_handling() { + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct Config { + #[serde(with = "crate::human_size")] + small_size: u32, + } + + // Test that values too large for u32 are handled gracefully + let json_input = r#"{"small_size":"8G"}"#; // 8GB > u32::MAX + let result: Result = serde_json::from_str(json_input); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("conversion error")); + } + + #[cfg(feature = "serde")] + #[test] + fn test_human_size_json_number_support() { + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct Config { + #[serde(with = "crate::human_size")] + memory_size: u64, + } + + // Test deserialization from JSON string + let from_string: Config = serde_json::from_str(r#"{"memory_size":"2G"}"#).unwrap(); + assert_eq!(from_string.memory_size, 2 * 1024 * 1024 * 1024); + + // Test deserialization from JSON number + let from_number: Config = serde_json::from_str(r#"{"memory_size":2147483648}"#).unwrap(); + assert_eq!(from_number.memory_size, 2147483648); + + // Test that both produce the same result when the number matches the parsed string + let gb_2 = 2u64 * 1024 * 1024 * 1024; + let from_string_2g: Config = serde_json::from_str(r#"{"memory_size":"2G"}"#).unwrap(); + let from_number_2g: Config = + serde_json::from_str(&format!(r#"{{"memory_size":{}}}"#, gb_2)).unwrap(); + assert_eq!(from_string_2g.memory_size, from_number_2g.memory_size); + } +} diff --git a/sodiumbox/Cargo.toml b/sodiumbox/Cargo.toml index ffd55962..9fcd6702 100644 --- a/sodiumbox/Cargo.toml +++ b/sodiumbox/Cargo.toml @@ -17,3 +17,4 @@ xsalsa20poly1305.workspace = true salsa20.workspace = true rand_core.workspace = true blake2.workspace = true +or-panic.workspace = true diff --git a/sodiumbox/src/lib.rs b/sodiumbox/src/lib.rs index f78ab346..8375776c 100644 --- a/sodiumbox/src/lib.rs +++ b/sodiumbox/src/lib.rs @@ -18,6 +18,7 @@ use blake2::{ digest::{Update, VariableOutput}, Blake2bVar, }; +use or_panic::ResultOrPanic; use rand_core::OsRng; use xsalsa20poly1305::{aead::Aead, consts::U10, KeyInit, XSalsa20Poly1305}; @@ -94,14 +95,16 @@ pub fn seal(message: &[u8], recipient_pk: &PublicKey) -> Vec { // Compute nonce: blake2b(ephemeral_pk || recipient_pk, outlen=24) let nonce = derive_nonce(ephemeral_pk.as_bytes(), recipient_pk.as_bytes()) - .expect("Failed to derive nonce"); + .or_panic("Failed to derive nonce"); // Create the XSalsa20Poly1305 cipher with the derived key let cipher = XSalsa20Poly1305::new_from_slice(&key_bytes) - .expect("Failed to create XSalsa20Poly1305 cipher"); + .or_panic("Failed to create XSalsa20Poly1305 cipher"); // Encrypt the message - let ciphertext = cipher.encrypt(&nonce, message).expect("Encryption failed"); + let ciphertext = cipher + .encrypt(&nonce, message) + .or_panic("Encryption failed"); // Combine the ephemeral public key and ciphertext to form the sealed box let mut sealed_box = Vec::with_capacity(PUBLICKEYBYTES + ciphertext.len()); diff --git a/supervisor/Cargo.toml b/supervisor/Cargo.toml index 647a0b1e..e9e91b55 100644 --- a/supervisor/Cargo.toml +++ b/supervisor/Cargo.toml @@ -21,6 +21,7 @@ libc.workspace = true load_config.workspace = true nix = { workspace = true, features = ["resource"] } notify.workspace = true +or-panic.workspace = true rocket = { workspace = true, features = ["json"] } serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/supervisor/src/process.rs b/supervisor/src/process.rs index 769d586e..887aac14 100644 --- a/supervisor/src/process.rs +++ b/supervisor/src/process.rs @@ -6,6 +6,7 @@ use anyhow::{bail, Result}; use bon::Builder; use fs_err as fs; use notify::{RecursiveMode, Watcher}; +use or_panic::ResultOrPanic; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::io::Write; @@ -97,6 +98,7 @@ impl ProcessStateRT { } mod systime { + use or_panic::ResultOrPanic; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -104,8 +106,12 @@ mod systime { time: &Option, serializer: S, ) -> Result { - time.map(|t| t.duration_since(UNIX_EPOCH).unwrap().as_secs()) - .serialize(serializer) + time.map(|t| { + t.duration_since(UNIX_EPOCH) + .or_panic("since zero should never fail") + .as_secs() + }) + .serialize(serializer) } pub(crate) fn deserialize<'de, D: Deserializer<'de>>( @@ -162,7 +168,7 @@ impl Process { } pub(crate) fn lock(&self) -> MutexGuard { - self.state.lock().unwrap() + self.state.lock().or_panic("lock should never fail") } pub fn start(&self) -> Result<()> { @@ -199,7 +205,7 @@ impl Process { // Update process state { - let mut state = self.state.lock().unwrap(); + let mut state = self.lock(); state.started_at = Some(SystemTime::now()); state.status = ProcessStatus::Running; state.pid = pid; @@ -263,7 +269,7 @@ impl Process { } }; if let Some(state) = state { - let mut state = state.lock().unwrap(); + let mut state = state.lock().or_panic("lock should never fail"); state.status = next_status; state.stopped_at = Some(SystemTime::now()); } @@ -276,7 +282,7 @@ impl Process { } pub fn stop(&self) -> Result<()> { - let mut state = self.state.lock().unwrap(); + let mut state = self.lock(); state.started = false; let is_running = state.status.is_running(); let Some(stop_tx) = state.kill_tx.take() else { @@ -295,7 +301,7 @@ impl Process { } pub fn info(&self) -> ProcessInfo { - let state = self.state.lock().unwrap(); + let state = self.lock(); ProcessInfo { config: (*self.config).clone(), state: state.display(), diff --git a/supervisor/src/web_api.rs b/supervisor/src/web_api.rs index f52ebe0b..2521ee20 100644 --- a/supervisor/src/web_api.rs +++ b/supervisor/src/web_api.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{anyhow, Result}; +use or_panic::ResultOrPanic; use rocket::figment::Figment; use rocket::serde::json::Json; use rocket::{delete, get, post, routes, Build, Rocket, State}; @@ -108,13 +109,13 @@ async fn handle_shutdown_signals(supervisor: Supervisor) { let ctrl_c = async { signal::ctrl_c() .await - .expect("failed to install Ctrl+C handler"); + .or_panic("failed to install Ctrl+C handler"); }; #[cfg(unix)] let terminate = async { signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") + .or_panic("failed to install signal handler") .recv() .await; }; @@ -133,5 +134,5 @@ async fn handle_shutdown_signals(supervisor: Supervisor) { perform_shutdown(&supervisor, true) .await - .expect("Force shutdown should never return"); + .or_panic("Force shutdown should never return"); } diff --git a/tdx-attest-sys/build.rs b/tdx-attest-sys/build.rs index b943878e..9e30e05f 100644 --- a/tdx-attest-sys/build.rs +++ b/tdx-attest-sys/build.rs @@ -2,13 +2,15 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + use std::env; use std::path::PathBuf; fn main() { println!("cargo:rerun-if-changed=csrc/tdx_attest.c"); println!("cargo:rerun-if-changed=csrc/qgs_msg_lib.cpp"); - let output_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + let output_path = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); bindgen::Builder::default() .header("bindings.h") .default_enum_style(bindgen::EnumVariation::ModuleConsts) diff --git a/verifier/Cargo.toml b/verifier/Cargo.toml new file mode 100644 index 00000000..78706da9 --- /dev/null +++ b/verifier/Cargo.toml @@ -0,0 +1,37 @@ +# SPDX-FileCopyrightText: © 2024-2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +[package] +name = "dstack-verifier" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +anyhow.workspace = true +clap = { workspace = true, features = ["derive"] } +figment.workspace = true +fs-err.workspace = true +hex.workspace = true +rocket = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true +tracing-subscriber.workspace = true +reqwest.workspace = true +tempfile.workspace = true + +# Internal dependencies +ra-tls.workspace = true +dstack-types.workspace = true +dstack-mr.workspace = true + +# Crypto/verification dependencies +dcap-qvl.workspace = true +cc-eventlog.workspace = true +sha2.workspace = true diff --git a/verifier/README.md b/verifier/README.md new file mode 100644 index 00000000..4d3d8618 --- /dev/null +++ b/verifier/README.md @@ -0,0 +1,168 @@ +# dstack-verifier + +A HTTP server that provides dstack quote verification services using the same verification process as the dstack KMS. + +## API Endpoints + +### POST /verify + +Verifies a dstack quote with the provided quote and VM configuration. The body can be grabbed via [getQuote](https://github.com/Dstack-TEE/dstack/blob/master/sdk/curl/api.md#3-get-quote). + +**Request Body:** +```json +{ + "quote": "hex-encoded-quote", + "event_log": "hex-encoded-event-log", + "vm_config": "json-vm-config-string", +} +``` + +**Response:** +```json +{ + "is_valid": true, + "details": { + "quote_verified": true, + "event_log_verified": true, + "os_image_hash_verified": true, + "report_data": "hex-encoded-64-byte-report-data", + "tcb_status": "OK", + "advisory_ids": [], + "app_info": { + "app_id": "hex-string", + "compose_hash": "hex-string", + "instance_id": "hex-string", + "device_id": "hex-string", + "mrtd": "hex-string", + "rtmr0": "hex-string", + "rtmr1": "hex-string", + "rtmr2": "hex-string", + "rtmr3": "hex-string", + "mr_system": "hex-string", + "mr_aggregated": "hex-string", + "os_image_hash": "hex-string", + "key_provider_info": "hex-string" + } + }, + "reason": null +} +``` + +### GET /health + +Health check endpoint that returns service status. + +**Response:** +```json +{ + "status": "ok", + "service": "dstack-verifier" +} +``` + +## Configuration +You usually don't need to edit the config file. Just using the default is fine, unless you need to deploy your cunstomized os images. + +### Configuration Options + +- `host`: Server bind address (default: "0.0.0.0") +- `port`: Server port (default: 8080) +- `image_cache_dir`: Directory for cached OS images (default: "/tmp/dstack-verifier/cache") +- `image_download_url`: URL template for downloading OS images (default: dstack official releases URL) +- `image_download_timeout_secs`: Download timeout in seconds (default: 300) +- `pccs_url`: Optional PCCS URL for quote verification + +### Example Configuration File + +```toml +host = "0.0.0.0" +port = 8080 +image_cache_dir = "/tmp/dstack-verifier/cache" +image_download_url = "https://download.dstack.org/os-images/mr_{OS_IMAGE_HASH}.tar.gz" +image_download_timeout_secs = 300 +pccs_url = "https://pccs.phala.network" +``` + +## Usage + +### Running with Cargo + +```bash +# Run with default config +cargo run --bin dstack-verifier + +# Run with custom config file +cargo run --bin dstack-verifier -- --config /path/to/config.toml + +# Set via environment variables +DSTACK_VERIFIER_PORT=8080 cargo run --bin dstack-verifier +``` + +### Running with Docker Compose + +```yaml +services: + dstack-verifier: + image: dstacktee/dstack-verifier:latest + ports: + - "8080:8080" + restart: unless-stopped +``` + +Save the docker compose file as `docker-compose.yml` and run `docker compose up -d`. + +### Request verification + +Grab a quote from your app. It's depends on your app how to grab a quote. + +```bash +# Grab a quote from the demo app +curl https://712eab2f507b963e11144ae67218177e93ac2a24-3000.test0.dstack.org:12004/GetQuote?report_data=0x1234 -o quote.json + +``` + +Send the quote to the verifier. + +```bash +$ curl -s -d @quote.json localhost:8080/verify | jq +{ + "is_valid": true, + "details": { + "quote_verified": true, + "event_log_verified": true, + "os_image_hash_verified": true, + "report_data": "12340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "tcb_status": "UpToDate", + "advisory_ids": [], + "app_info": { + "app_id": "e631a04a5d068c0e5ffd8ca60d6574ac99a18bda", + "compose_hash": "e631a04a5d068c0e5ffd8ca60d6574ac99a18bdaf0417d129d0c4ac52244d40f", + "instance_id": "712eab2f507b963e11144ae67218177e93ac2a24", + "device_id": "ee218f44a5f0a9c3233f9cc09f0cd41518f376478127feb989d5cf1292c56a01", + "mrtd": "f06dfda6dce1cf904d4e2bab1dc370634cf95cefa2ceb2de2eee127c9382698090d7a4a13e14c536ec6c9c3c8fa87077", + "rtmr0": "68102e7b524af310f7b7d426ce75481e36c40f5d513a9009c046e9d37e31551f0134d954b496a3357fd61d03f07ffe96", + "rtmr1": "a7b523278d4f914ee8df0ec80cd1c3d498cbf1152b0c5eaf65bad9425072874a3fcf891e8b01713d3d9937e3e0d26c15", + "rtmr2": "dbf4924c07f5066f3dc6859844184344306aa3263817153dcaee85af97d23e0c0b96efe0731d8865a8747e51b9e351ac", + "rtmr3": "5e7d8d84317343d28d73031d0be3c75f25facb1b20c9835a44582b8b0115de1acfe2d19350437dbd63846bcc5d7bf328", + "mr_system": "145010fa227e6c2537ad957c64e4a8486fcbfd8265ddfb359168b59afcff1d05", + "mr_aggregated": "52f6d7ccbee1bfa870709e8ff489e016e2e5c25a157b7e22ef1ea68fce763694", + "os_image_hash": "b6420818b356b198bdd70f076079aa0299a20279b87ab33ada7b2770ef432a5a", + "key_provider_info": "7b226e616d65223a226b6d73222c226964223a223330353933303133303630373261383634386365336430323031303630383261383634386365336430333031303730333432303030343139623234353764643962386161363434366439383066313336666666373831326563643663373737343065656230653238623130643536633063303030323861356236653539646365613330376435383362643166373037363965396331313664663262636662313735386139356438363133653764653163383438326330227d" + } + }, + "reason": null +} +``` + +## Verification Process + +The verifier performs three main verification steps: + +1. **Quote Verification**: Validates the TDX quote using dcap-qvl, checking the quote signature and TCB status +2. **Event Log Verification**: Replays event logs to ensure RTMR values match and extracts app information +3. **OS Image Hash Verification**: + - Automatically downloads OS images if not cached locally + - Uses dstack-mr to compute expected measurements + - Compares against the verified measurements from the quote + +All three steps must pass for the verification to be considered valid. diff --git a/verifier/builder/Dockerfile b/verifier/builder/Dockerfile new file mode 100644 index 00000000..cef0d128 --- /dev/null +++ b/verifier/builder/Dockerfile @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +FROM rust:1.86.0@sha256:300ec56abce8cc9448ddea2172747d048ed902a3090e6b57babb2bf19f754081 AS verifier-builder +COPY builder/shared /build/shared +ARG DSTACK_REV +ARG DSTACK_SRC_URL=https://github.com/Dstack-TEE/dstack.git +WORKDIR /build +RUN ./shared/pin-packages.sh ./shared/builder-pinned-packages.txt +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + git \ + build-essential \ + musl-tools \ + libssl-dev \ + protobuf-compiler \ + libprotobuf-dev \ + clang \ + libclang-dev \ + pkg-config \ + ca-certificates \ + curl && \ + rm -rf /var/lib/apt/lists/* /var/log/* /var/cache/ldconfig/aux-cache +RUN git clone ${DSTACK_SRC_URL} && \ + cd dstack && \ + git checkout ${DSTACK_REV} +RUN rustup target add x86_64-unknown-linux-musl +RUN cd dstack && cargo build --release -p dstack-verifier --target x86_64-unknown-linux-musl +RUN echo "${DSTACK_REV}" > /build/.GIT_REV + +FROM debian:bookworm@sha256:0d8498a0e9e6a60011df39aab78534cfe940785e7c59d19dfae1eb53ea59babe AS acpi-builder +COPY builder/shared /build +WORKDIR /build +ARG QEMU_REV=dbcec07c0854bf873d346a09e87e4c993ccf2633 +RUN ./pin-packages.sh ./qemu-pinned-packages.txt && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + git \ + libslirp-dev \ + python3-pip \ + ninja-build \ + pkg-config \ + libglib2.0-dev \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + build-essential \ + flex \ + bison && \ + rm -rf /var/lib/apt/lists/* /var/log/* /var/cache/ldconfig/aux-cache +RUN git clone https://github.com/kvinwang/qemu-tdx.git --depth 1 --branch dstack-qemu-9.2.1 --single-branch && \ + cd qemu-tdx && git fetch --depth 1 origin ${QEMU_REV} && \ + git checkout ${QEMU_REV} && \ + ../config-qemu.sh ./build /usr/local && \ + cd build && \ + ninja && \ + strip qemu-system-x86_64 && \ + install -m 755 qemu-system-x86_64 /usr/local/bin/dstack-acpi-tables && \ + cd ../ && \ + install -d /usr/local/share/qemu && \ + install -m 644 pc-bios/efi-virtio.rom /usr/local/share/qemu/ && \ + install -m 644 pc-bios/kvmvapic.bin /usr/local/share/qemu/ && \ + install -m 644 pc-bios/linuxboot_dma.bin /usr/local/share/qemu/ && \ + cd .. && rm -rf qemu-tdx + +FROM debian:bookworm@sha256:0d8498a0e9e6a60011df39aab78534cfe940785e7c59d19dfae1eb53ea59babe +COPY builder/shared /build +WORKDIR /build +RUN ./pin-packages.sh ./pinned-packages.txt && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + libglib2.0-0 \ + libslirp0 \ + && rm -rf /var/lib/apt/lists/* /var/log/* /var/cache/ldconfig/aux-cache +COPY --from=verifier-builder /build/dstack/target/x86_64-unknown-linux-musl/release/dstack-verifier /usr/local/bin/dstack-verifier +COPY --from=verifier-builder /build/.GIT_REV /etc/ +COPY --from=acpi-builder /usr/local/bin/dstack-acpi-tables /usr/local/bin/dstack-acpi-tables +COPY --from=acpi-builder /usr/local/share/qemu /usr/local/share/qemu +RUN mkdir -p /etc/dstack +COPY dstack-verifier.toml /etc/dstack/dstack-verifier.toml +WORKDIR /var/lib/dstack-verifier +EXPOSE 8080 +ENTRYPOINT ["/usr/local/bin/dstack-verifier"] +CMD ["--config", "/etc/dstack/dstack-verifier.toml"] diff --git a/verifier/builder/build-image.sh b/verifier/builder/build-image.sh new file mode 100755 index 00000000..75bcca79 --- /dev/null +++ b/verifier/builder/build-image.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +CONTEXT_DIR=$(dirname "$SCRIPT_DIR") +REPO_ROOT=$(git -C "$SCRIPT_DIR" rev-parse --show-toplevel) +SHARED_DIR="$SCRIPT_DIR/shared" +SHARED_GIT_PATH=$(realpath --relative-to="$REPO_ROOT" "$SHARED_DIR") +DOCKERFILE="$SCRIPT_DIR/Dockerfile" + +NO_CACHE=${NO_CACHE:-} +NAME=${1:-} +if [ -z "$NAME" ]; then + echo "Usage: $0 [:]" >&2 + exit 1 +fi + +extract_packages() { + local image_name=$1 + local pkg_list_file=$2 + if [ -z "$pkg_list_file" ]; then + return + fi + docker run --rm --entrypoint bash "$image_name" \ + -c "dpkg -l | grep '^ii' | awk '{print \$2\"=\"\$3}' | sort" \ + >"$pkg_list_file" +} + +docker_build() { + local image_name=$1 + local target=$2 + local pkg_list_file=$3 + + local commit_timestamp + commit_timestamp=$(git -C "$REPO_ROOT" show -s --format=%ct "$GIT_REV") + + local args=( + --builder buildkit_20 + --progress=plain + --output type=docker,name="$image_name",rewrite-timestamp=true + --build-arg SOURCE_DATE_EPOCH="$commit_timestamp" + --build-arg DSTACK_REV="$GIT_REV" + --build-arg DSTACK_SRC_URL="$DSTACK_SRC_URL" + ) + + if [ -n "$NO_CACHE" ]; then + args+=(--no-cache) + fi + + if [ -n "$target" ]; then + args+=(--target "$target") + fi + + docker buildx build "${args[@]}" \ + --file "$DOCKERFILE" \ + "$CONTEXT_DIR" + + extract_packages "$image_name" "$pkg_list_file" +} + +if ! docker buildx inspect buildkit_20 &>/dev/null; then + docker buildx create --use --driver-opt image=moby/buildkit:v0.20.2 --name buildkit_20 +fi + +mkdir -p "$SHARED_DIR" +touch "$SHARED_DIR/builder-pinned-packages.txt" +touch "$SHARED_DIR/qemu-pinned-packages.txt" +touch "$SHARED_DIR/pinned-packages.txt" + +GIT_REV=${GIT_REV:-HEAD} +GIT_REV=$(git -C "$REPO_ROOT" rev-parse "$GIT_REV") +DSTACK_SRC_URL=${DSTACK_SRC_URL:-https://github.com/Dstack-TEE/dstack.git} + +docker_build "$NAME" "" "$SHARED_DIR/pinned-packages.txt" +docker_build "verifier-builder-temp" "verifier-builder" "$SHARED_DIR/builder-pinned-packages.txt" +docker_build "verifier-acpi-builder-temp" "acpi-builder" "$SHARED_DIR/qemu-pinned-packages.txt" + +git_status=$(git -C "$REPO_ROOT" status --porcelain -- "$SHARED_GIT_PATH") +if [ -n "$git_status" ]; then + echo "The working tree has updates in $SHARED_GIT_PATH. Commit or stash before re-running." >&2 + exit 1 +fi diff --git a/verifier/builder/shared/builder-pinned-packages.txt b/verifier/builder/shared/builder-pinned-packages.txt new file mode 100644 index 00000000..69c95e45 --- /dev/null +++ b/verifier/builder/shared/builder-pinned-packages.txt @@ -0,0 +1,435 @@ +adduser=3.134 +apt=2.6.1 +autoconf=2.71-3 +automake=1:1.16.5-1.3 +autotools-dev=20220109.1 +base-files=12.4+deb12u10 +base-passwd=3.6.1 +bash=5.2.15-2+b7 +binutils-common:amd64=2.40-2 +binutils-x86-64-linux-gnu=2.40-2 +binutils=2.40-2 +bsdutils=1:2.38.1-5+deb12u3 +build-essential=12.9 +bzip2=1.0.8-5+b1 +ca-certificates=20230311 +clang-14=1:14.0.6-12 +clang=1:14.0-55.7~deb12u1 +comerr-dev:amd64=2.1-1.47.0-2 +coreutils=9.1-1 +cpp-12=12.2.0-14+deb12u1 +cpp=4:12.2.0-3 +curl=7.88.1-10+deb12u12 +dash=0.5.12-2 +debconf=1.5.82 +debian-archive-keyring=2023.3+deb12u1 +debianutils=5.7-0.5~deb12u1 +default-libmysqlclient-dev:amd64=1.1.0 +diffutils=1:3.8-4 +dirmngr=2.2.40-1.1 +dpkg-dev=1.21.22 +dpkg=1.21.22 +e2fsprogs=1.47.0-2 +file=1:5.44-3 +findutils=4.9.0-4 +fontconfig-config=2.14.1-4 +fontconfig=2.14.1-4 +fonts-dejavu-core=2.37-6 +g++-12=12.2.0-14+deb12u1 +g++=4:12.2.0-3 +gcc-12-base:amd64=12.2.0-14+deb12u1 +gcc-12=12.2.0-14+deb12u1 +gcc=4:12.2.0-3 +gir1.2-freedesktop:amd64=1.74.0-3 +gir1.2-gdkpixbuf-2.0:amd64=2.42.10+dfsg-1+deb12u1 +gir1.2-glib-2.0:amd64=1.74.0-3 +gir1.2-rsvg-2.0:amd64=2.54.7+dfsg-1~deb12u1 +git-man=1:2.39.5-0+deb12u2 +git=1:2.39.5-0+deb12u2 +gnupg-l10n=2.2.40-1.1 +gnupg-utils=2.2.40-1.1 +gnupg=2.2.40-1.1 +gpg-agent=2.2.40-1.1 +gpg-wks-client=2.2.40-1.1 +gpg-wks-server=2.2.40-1.1 +gpg=2.2.40-1.1 +gpgconf=2.2.40-1.1 +gpgsm=2.2.40-1.1 +gpgv=2.2.40-1.1 +grep=3.8-5 +gzip=1.12-1 +hicolor-icon-theme=0.17-2 +hostname=3.23+nmu1 +icu-devtools=72.1-3 +imagemagick-6-common=8:6.9.11.60+dfsg-1.6+deb12u2 +imagemagick-6.q16=8:6.9.11.60+dfsg-1.6+deb12u2 +imagemagick=8:6.9.11.60+dfsg-1.6+deb12u2 +init-system-helpers=1.65.2 +krb5-multidev:amd64=1.20.1-2+deb12u2 +libacl1:amd64=2.3.1-3 +libaom3:amd64=3.6.0-1+deb12u1 +libapr1:amd64=1.7.2-3+deb12u1 +libaprutil1:amd64=1.6.3-1 +libapt-pkg6.0:amd64=2.6.1 +libasan8:amd64=12.2.0-14+deb12u1 +libassuan0:amd64=2.5.5-5 +libatomic1:amd64=12.2.0-14+deb12u1 +libattr1:amd64=1:2.5.1-4 +libaudit-common=1:3.0.9-1 +libaudit1:amd64=1:3.0.9-1 +libbinutils:amd64=2.40-2 +libblkid-dev:amd64=2.38.1-5+deb12u3 +libblkid1:amd64=2.38.1-5+deb12u3 +libbrotli-dev:amd64=1.0.9-2+b6 +libbrotli1:amd64=1.0.9-2+b6 +libbsd0:amd64=0.11.7-2 +libbz2-1.0:amd64=1.0.8-5+b1 +libbz2-dev:amd64=1.0.8-5+b1 +libc-bin=2.36-9+deb12u10 +libc-dev-bin=2.36-9+deb12u10 +libc6-dev:amd64=2.36-9+deb12u10 +libc6:amd64=2.36-9+deb12u10 +libcairo-gobject2:amd64=1.16.0-7 +libcairo-script-interpreter2:amd64=1.16.0-7 +libcairo2-dev:amd64=1.16.0-7 +libcairo2:amd64=1.16.0-7 +libcap-ng0:amd64=0.8.3-1+b3 +libcap2:amd64=1:2.66-4 +libcbor0.8:amd64=0.8.0-2+b1 +libcc1-0:amd64=12.2.0-14+deb12u1 +libclang-14-dev=1:14.0.6-12 +libclang-common-14-dev=1:14.0.6-12 +libclang-cpp14=1:14.0.6-12 +libclang-dev=1:14.0-55.7~deb12u1 +libclang1-14=1:14.0.6-12 +libcom-err2:amd64=1.47.0-2 +libcrypt-dev:amd64=1:4.4.33-2 +libcrypt1:amd64=1:4.4.33-2 +libctf-nobfd0:amd64=2.40-2 +libctf0:amd64=2.40-2 +libcurl3-gnutls:amd64=7.88.1-10+deb12u12 +libcurl4-openssl-dev:amd64=7.88.1-10+deb12u12 +libcurl4:amd64=7.88.1-10+deb12u12 +libdatrie1:amd64=0.2.13-2+b1 +libdav1d6:amd64=1.0.0-2+deb12u1 +libdb-dev:amd64=5.3.2 +libdb5.3-dev=5.3.28+dfsg2-1 +libdb5.3:amd64=5.3.28+dfsg2-1 +libde265-0:amd64=1.0.11-1+deb12u2 +libdebconfclient0:amd64=0.270 +libdeflate-dev:amd64=1.14-1 +libdeflate0:amd64=1.14-1 +libdjvulibre-dev:amd64=3.5.28-2+b1 +libdjvulibre-text=3.5.28-2 +libdjvulibre21:amd64=3.5.28-2+b1 +libdpkg-perl=1.21.22 +libedit2:amd64=3.1-20221030-2 +libelf1:amd64=0.188-2.1 +liberror-perl=0.17029-2 +libevent-2.1-7:amd64=2.1.12-stable-8 +libevent-core-2.1-7:amd64=2.1.12-stable-8 +libevent-dev=2.1.12-stable-8 +libevent-extra-2.1-7:amd64=2.1.12-stable-8 +libevent-openssl-2.1-7:amd64=2.1.12-stable-8 +libevent-pthreads-2.1-7:amd64=2.1.12-stable-8 +libexif-dev:amd64=0.6.24-1+b1 +libexif12:amd64=0.6.24-1+b1 +libexpat1-dev:amd64=2.5.0-1+deb12u1 +libexpat1:amd64=2.5.0-1+deb12u1 +libext2fs2:amd64=1.47.0-2 +libffi-dev:amd64=3.4.4-1 +libffi8:amd64=3.4.4-1 +libfftw3-double3:amd64=3.3.10-1 +libfido2-1:amd64=1.12.0-2+b1 +libfontconfig-dev:amd64=2.14.1-4 +libfontconfig1:amd64=2.14.1-4 +libfreetype-dev:amd64=2.12.1+dfsg-5+deb12u4 +libfreetype6-dev:amd64=2.12.1+dfsg-5+deb12u4 +libfreetype6:amd64=2.12.1+dfsg-5+deb12u4 +libfribidi0:amd64=1.0.8-2.1 +libgc1:amd64=1:8.2.2-3 +libgcc-12-dev:amd64=12.2.0-14+deb12u1 +libgcc-s1:amd64=12.2.0-14+deb12u1 +libgcrypt20:amd64=1.10.1-3 +libgdbm-compat4:amd64=1.23-3 +libgdbm-dev:amd64=1.23-3 +libgdbm6:amd64=1.23-3 +libgdk-pixbuf-2.0-0:amd64=2.42.10+dfsg-1+deb12u1 +libgdk-pixbuf-2.0-dev:amd64=2.42.10+dfsg-1+deb12u1 +libgdk-pixbuf2.0-bin=2.42.10+dfsg-1+deb12u1 +libgdk-pixbuf2.0-common=2.42.10+dfsg-1+deb12u1 +libgirepository-1.0-1:amd64=1.74.0-3 +libglib2.0-0:amd64=2.74.6-2+deb12u5 +libglib2.0-bin=2.74.6-2+deb12u5 +libglib2.0-data=2.74.6-2+deb12u5 +libglib2.0-dev-bin=2.74.6-2+deb12u5 +libglib2.0-dev:amd64=2.74.6-2+deb12u5 +libgmp-dev:amd64=2:6.2.1+dfsg1-1.1 +libgmp10:amd64=2:6.2.1+dfsg1-1.1 +libgmpxx4ldbl:amd64=2:6.2.1+dfsg1-1.1 +libgnutls30:amd64=3.7.9-2+deb12u4 +libgomp1:amd64=12.2.0-14+deb12u1 +libgpg-error0:amd64=1.46-1 +libgprofng0:amd64=2.40-2 +libgraphite2-3:amd64=1.3.14-1 +libgssapi-krb5-2:amd64=1.20.1-2+deb12u2 +libgssrpc4:amd64=1.20.1-2+deb12u2 +libharfbuzz0b:amd64=6.0.0+dfsg-3 +libheif1:amd64=1.15.1-1+deb12u1 +libhogweed6:amd64=3.8.1-2 +libice-dev:amd64=2:1.0.10-1 +libice6:amd64=2:1.0.10-1 +libicu-dev:amd64=72.1-3 +libicu72:amd64=72.1-3 +libidn2-0:amd64=2.3.3-1+b1 +libimath-3-1-29:amd64=3.1.6-1 +libimath-dev:amd64=3.1.6-1 +libisl23:amd64=0.25-1.1 +libitm1:amd64=12.2.0-14+deb12u1 +libjansson4:amd64=2.14-2 +libjbig-dev:amd64=2.1-6.1 +libjbig0:amd64=2.1-6.1 +libjpeg-dev:amd64=1:2.1.5-2 +libjpeg62-turbo-dev:amd64=1:2.1.5-2 +libjpeg62-turbo:amd64=1:2.1.5-2 +libk5crypto3:amd64=1.20.1-2+deb12u2 +libkadm5clnt-mit12:amd64=1.20.1-2+deb12u2 +libkadm5srv-mit12:amd64=1.20.1-2+deb12u2 +libkdb5-10:amd64=1.20.1-2+deb12u2 +libkeyutils1:amd64=1.6.3-2 +libkrb5-3:amd64=1.20.1-2+deb12u2 +libkrb5-dev:amd64=1.20.1-2+deb12u2 +libkrb5support0:amd64=1.20.1-2+deb12u2 +libksba8:amd64=1.6.3-2 +liblcms2-2:amd64=2.14-2 +liblcms2-dev:amd64=2.14-2 +libldap-2.5-0:amd64=2.5.13+dfsg-5 +liblerc-dev:amd64=4.0.0+ds-2 +liblerc4:amd64=4.0.0+ds-2 +libllvm14:amd64=1:14.0.6-12 +liblqr-1-0-dev:amd64=0.4.2-2.1 +liblqr-1-0:amd64=0.4.2-2.1 +liblsan0:amd64=12.2.0-14+deb12u1 +libltdl-dev:amd64=2.4.7-7~deb12u1 +libltdl7:amd64=2.4.7-7~deb12u1 +liblz4-1:amd64=1.9.4-1 +liblzma-dev:amd64=5.4.1-1 +liblzma5:amd64=5.4.1-1 +liblzo2-2:amd64=2.10-2 +libmagic-mgc=1:5.44-3 +libmagic1:amd64=1:5.44-3 +libmagickcore-6-arch-config:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickcore-6-headers=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickcore-6.q16-6-extra:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickcore-6.q16-6:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickcore-6.q16-dev:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickcore-dev=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickwand-6-headers=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickwand-6.q16-6:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickwand-6.q16-dev:amd64=8:6.9.11.60+dfsg-1.6+deb12u2 +libmagickwand-dev=8:6.9.11.60+dfsg-1.6+deb12u2 +libmariadb-dev-compat=1:10.11.11-0+deb12u1 +libmariadb-dev=1:10.11.11-0+deb12u1 +libmariadb3:amd64=1:10.11.11-0+deb12u1 +libmaxminddb-dev:amd64=1.7.1-1 +libmaxminddb0:amd64=1.7.1-1 +libmd0:amd64=1.0.4-2 +libmount-dev:amd64=2.38.1-5+deb12u3 +libmount1:amd64=2.38.1-5+deb12u3 +libmpc3:amd64=1.3.1-1 +libmpfr6:amd64=4.2.0-1 +libncurses-dev:amd64=6.4-4 +libncurses5-dev:amd64=6.4-4 +libncurses6:amd64=6.4-4 +libncursesw5-dev:amd64=6.4-4 +libncursesw6:amd64=6.4-4 +libnettle8:amd64=3.8.1-2 +libnghttp2-14:amd64=1.52.0-1+deb12u2 +libnpth0:amd64=1.6-3 +libnsl-dev:amd64=1.3.0-2 +libnsl2:amd64=1.3.0-2 +libnuma1:amd64=2.0.16-1 +libobjc-12-dev:amd64=12.2.0-14+deb12u1 +libobjc4:amd64=12.2.0-14+deb12u1 +libopenexr-3-1-30:amd64=3.1.5-5 +libopenexr-dev=3.1.5-5 +libopenjp2-7-dev:amd64=2.5.0-2+deb12u1 +libopenjp2-7:amd64=2.5.0-2+deb12u1 +libp11-kit0:amd64=0.24.1-2 +libpam-modules-bin=1.5.2-6+deb12u1 +libpam-modules:amd64=1.5.2-6+deb12u1 +libpam-runtime=1.5.2-6+deb12u1 +libpam0g:amd64=1.5.2-6+deb12u1 +libpango-1.0-0:amd64=1.50.12+ds-1 +libpangocairo-1.0-0:amd64=1.50.12+ds-1 +libpangoft2-1.0-0:amd64=1.50.12+ds-1 +libpcre2-16-0:amd64=10.42-1 +libpcre2-32-0:amd64=10.42-1 +libpcre2-8-0:amd64=10.42-1 +libpcre2-dev:amd64=10.42-1 +libpcre2-posix3:amd64=10.42-1 +libperl5.36:amd64=5.36.0-7+deb12u2 +libpixman-1-0:amd64=0.42.2-1 +libpixman-1-dev:amd64=0.42.2-1 +libpkgconf3:amd64=1.8.1-1 +libpng-dev:amd64=1.6.39-2 +libpng16-16:amd64=1.6.39-2 +libpq-dev=15.12-0+deb12u2 +libpq5:amd64=15.12-0+deb12u2 +libproc2-0:amd64=2:4.0.2-3 +libprotobuf-dev:amd64=3.21.12-3 +libprotobuf-lite32:amd64=3.21.12-3 +libprotobuf32:amd64=3.21.12-3 +libprotoc32:amd64=3.21.12-3 +libpsl5:amd64=0.21.2-1 +libpthread-stubs0-dev:amd64=0.4-1 +libpython3-stdlib:amd64=3.11.2-1+b1 +libpython3.11-minimal:amd64=3.11.2-6+deb12u5 +libpython3.11-stdlib:amd64=3.11.2-6+deb12u5 +libquadmath0:amd64=12.2.0-14+deb12u1 +libreadline-dev:amd64=8.2-1.3 +libreadline8:amd64=8.2-1.3 +librsvg2-2:amd64=2.54.7+dfsg-1~deb12u1 +librsvg2-common:amd64=2.54.7+dfsg-1~deb12u1 +librsvg2-dev:amd64=2.54.7+dfsg-1~deb12u1 +librtmp1:amd64=2.4+20151223.gitfa8646d.1-2+b2 +libsasl2-2:amd64=2.1.28+dfsg-10 +libsasl2-modules-db:amd64=2.1.28+dfsg-10 +libseccomp2:amd64=2.5.4-1+deb12u1 +libselinux1-dev:amd64=3.4-1+b6 +libselinux1:amd64=3.4-1+b6 +libsemanage-common=3.4-1 +libsemanage2:amd64=3.4-1+b5 +libsepol-dev:amd64=3.4-2.1 +libsepol2:amd64=3.4-2.1 +libserf-1-1:amd64=1.3.9-11 +libsm-dev:amd64=2:1.2.3-1 +libsm6:amd64=2:1.2.3-1 +libsmartcols1:amd64=2.38.1-5+deb12u3 +libsqlite3-0:amd64=3.40.1-2+deb12u1 +libsqlite3-dev:amd64=3.40.1-2+deb12u1 +libss2:amd64=1.47.0-2 +libssh2-1:amd64=1.10.0-3+b1 +libssl-dev:amd64=3.0.16-1~deb12u1 +libssl3:amd64=3.0.16-1~deb12u1 +libstdc++-12-dev:amd64=12.2.0-14+deb12u1 +libstdc++6:amd64=12.2.0-14+deb12u1 +libsvn1:amd64=1.14.2-4+deb12u1 +libsystemd0:amd64=252.36-1~deb12u1 +libtasn1-6:amd64=4.19.0-2+deb12u1 +libthai-data=0.1.29-1 +libthai0:amd64=0.1.29-1 +libtiff-dev:amd64=4.5.0-6+deb12u2 +libtiff6:amd64=4.5.0-6+deb12u2 +libtiffxx6:amd64=4.5.0-6+deb12u2 +libtinfo6:amd64=6.4-4 +libtirpc-common=1.3.3+ds-1 +libtirpc-dev:amd64=1.3.3+ds-1 +libtirpc3:amd64=1.3.3+ds-1 +libtool=2.4.7-7~deb12u1 +libtsan2:amd64=12.2.0-14+deb12u1 +libubsan1:amd64=12.2.0-14+deb12u1 +libudev1:amd64=252.36-1~deb12u1 +libunistring2:amd64=1.0-2 +libutf8proc2:amd64=2.8.0-1 +libuuid1:amd64=2.38.1-5+deb12u3 +libwebp-dev:amd64=1.2.4-0.2+deb12u1 +libwebp7:amd64=1.2.4-0.2+deb12u1 +libwebpdemux2:amd64=1.2.4-0.2+deb12u1 +libwebpmux3:amd64=1.2.4-0.2+deb12u1 +libwmf-0.2-7:amd64=0.2.12-5.1 +libwmf-dev=0.2.12-5.1 +libwmflite-0.2-7:amd64=0.2.12-5.1 +libx11-6:amd64=2:1.8.4-2+deb12u2 +libx11-data=2:1.8.4-2+deb12u2 +libx11-dev:amd64=2:1.8.4-2+deb12u2 +libx265-199:amd64=3.5-2+b1 +libxau-dev:amd64=1:1.0.9-1 +libxau6:amd64=1:1.0.9-1 +libxcb-render0-dev:amd64=1.15-1 +libxcb-render0:amd64=1.15-1 +libxcb-shm0-dev:amd64=1.15-1 +libxcb-shm0:amd64=1.15-1 +libxcb1-dev:amd64=1.15-1 +libxcb1:amd64=1.15-1 +libxdmcp-dev:amd64=1:1.1.2-3 +libxdmcp6:amd64=1:1.1.2-3 +libxext-dev:amd64=2:1.3.4-1+b1 +libxext6:amd64=2:1.3.4-1+b1 +libxml2-dev:amd64=2.9.14+dfsg-1.3~deb12u1 +libxml2:amd64=2.9.14+dfsg-1.3~deb12u1 +libxrender-dev:amd64=1:0.9.10-1.1 +libxrender1:amd64=1:0.9.10-1.1 +libxslt1-dev:amd64=1.1.35-1+deb12u1 +libxslt1.1:amd64=1.1.35-1+deb12u1 +libxt-dev:amd64=1:1.2.1-1.1 +libxt6:amd64=1:1.2.1-1.1 +libxxhash0:amd64=0.8.1-1 +libyaml-0-2:amd64=0.2.5-1 +libyaml-dev:amd64=0.2.5-1 +libz3-4:amd64=4.8.12-3.1 +libzstd-dev:amd64=1.5.4+dfsg2-5 +libzstd1:amd64=1.5.4+dfsg2-5 +linux-libc-dev:amd64=6.1.135-1 +llvm-14-linker-tools=1:14.0.6-12 +login=1:4.13+dfsg1-1+b1 +logsave=1.47.0-2 +m4=1.4.19-3 +make=4.3-4.1 +mariadb-common=1:10.11.11-0+deb12u1 +mawk=1.3.4.20200120-3.1 +media-types=10.0.0 +mercurial-common=6.3.2-1+deb12u1 +mercurial=6.3.2-1+deb12u1 +mount=2.38.1-5+deb12u3 +musl-dev:amd64=1.2.3-1 +musl-tools=1.2.3-1 +musl:amd64=1.2.3-1 +mysql-common=5.8+1.1.0 +ncurses-base=6.4-4 +ncurses-bin=6.4-4 +netbase=6.4 +openssh-client=1:9.2p1-2+deb12u5 +openssl=3.0.15-1~deb12u1 +passwd=1:4.13+dfsg1-1+b1 +patch=2.7.6-7 +perl-base=5.36.0-7+deb12u2 +perl-modules-5.36=5.36.0-7+deb12u2 +perl=5.36.0-7+deb12u2 +pinentry-curses=1.2.1-1 +pkg-config:amd64=1.8.1-1 +pkgconf-bin=1.8.1-1 +pkgconf:amd64=1.8.1-1 +procps=2:4.0.2-3 +protobuf-compiler=3.21.12-3 +python3-distutils=3.11.2-3 +python3-lib2to3=3.11.2-3 +python3-minimal=3.11.2-1+b1 +python3.11-minimal=3.11.2-6+deb12u5 +python3.11=3.11.2-6+deb12u5 +python3=3.11.2-1+b1 +readline-common=8.2-1.3 +rpcsvc-proto=1.4.3-1 +sed=4.9-1 +sensible-utils=0.0.17+nmu1 +shared-mime-info=2.2-1 +sq=0.27.0-2+b1 +subversion=1.14.2-4+deb12u1 +sysvinit-utils=3.06-4 +tar=1.34+dfsg-1.2+deb12u1 +tzdata=2025b-0+deb12u1 +ucf=3.0043+nmu1+deb12u1 +unzip=6.0-28 +usr-is-merged=37~deb12u1 +util-linux-extra=2.38.1-5+deb12u3 +util-linux=2.38.1-5+deb12u3 +uuid-dev:amd64=2.38.1-5+deb12u3 +wget=1.21.3-1+deb12u1 +x11-common=1:7.7+23 +x11proto-core-dev=2022.1-1 +x11proto-dev=2022.1-1 +xorg-sgml-doctools=1:1.11-1.1 +xtrans-dev=1.4.0-1 +xz-utils=5.4.1-1 +zlib1g-dev:amd64=1:1.2.13.dfsg-1 +zlib1g:amd64=1:1.2.13.dfsg-1 diff --git a/verifier/builder/shared/config-qemu.sh b/verifier/builder/shared/config-qemu.sh new file mode 100755 index 00000000..94174a58 --- /dev/null +++ b/verifier/builder/shared/config-qemu.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +BUILD_DIR="$1" +PREFIX="$2" +if [ -z "$BUILD_DIR" ]; then + echo "Usage: $0 " + exit 1 +fi + +mkdir -p "$BUILD_DIR" +cd "$BUILD_DIR" + +export SOURCE_DATE_EPOCH=$(git -C .. log -1 --pretty=%ct) +export CFLAGS="-DDUMP_ACPI_TABLES -Wno-builtin-macro-redefined -D__DATE__=\"\" -D__TIME__=\"\" -D__TIMESTAMP__=\"\"" +export LDFLAGS="-Wl,--build-id=none" + +../configure \ + --prefix="$PREFIX" \ + --target-list=x86_64-softmmu \ + --disable-werror + +echo "" +echo "Build configured for reproducibility in $BUILD_DIR" +echo "To build, run: cd $BUILD_DIR && make" diff --git a/verifier/builder/shared/pin-packages.sh b/verifier/builder/shared/pin-packages.sh new file mode 100755 index 00000000..5aa8ba4a --- /dev/null +++ b/verifier/builder/shared/pin-packages.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# SPDX-FileCopyrightText: © 2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +set -e +PKG_LIST=$1 + +echo 'deb [check-valid-until=no] https://snapshot.debian.org/archive/debian/20250626T204007Z bookworm main' > /etc/apt/sources.list +echo 'deb [check-valid-until=no] https://snapshot.debian.org/archive/debian-security/20250626T204007Z bookworm-security main' >> /etc/apt/sources.list +echo 'Acquire::Check-Valid-Until "false";' > /etc/apt/apt.conf.d/10no-check-valid-until + +mkdir -p /etc/apt/preferences.d +while IFS= read -r line; do + pkg=$(echo "$line" | cut -d= -f1) + ver=$(echo "$line" | cut -d= -f2) + if [ -n "$pkg" ] && [ -n "$ver" ]; then + printf 'Package: %s\nPin: version %s\nPin-Priority: 1001\n\n' "$pkg" "$ver" >> /etc/apt/preferences.d/pinned-packages + fi +done < "$PKG_LIST" diff --git a/verifier/builder/shared/pinned-packages.txt b/verifier/builder/shared/pinned-packages.txt new file mode 100644 index 00000000..409c097c --- /dev/null +++ b/verifier/builder/shared/pinned-packages.txt @@ -0,0 +1,108 @@ +adduser=3.134 +apt=2.6.1 +base-files=12.4+deb12u11 +base-passwd=3.6.1 +bash=5.2.15-2+b8 +bsdutils=1:2.38.1-5+deb12u3 +ca-certificates=20230311+deb12u1 +coreutils=9.1-1 +curl=7.88.1-10+deb12u14 +dash=0.5.12-2 +debconf=1.5.82 +debian-archive-keyring=2023.3+deb12u2 +debianutils=5.7-0.5~deb12u1 +diffutils=1:3.8-4 +dpkg=1.21.22 +e2fsprogs=1.47.0-2 +findutils=4.9.0-4 +gcc-12-base:amd64=12.2.0-14+deb12u1 +gpgv=2.2.40-1.1 +grep=3.8-5 +gzip=1.12-1 +hostname=3.23+nmu1 +init-system-helpers=1.65.2 +libacl1:amd64=2.3.1-3 +libapt-pkg6.0:amd64=2.6.1 +libattr1:amd64=1:2.5.1-4 +libaudit-common=1:3.0.9-1 +libaudit1:amd64=1:3.0.9-1 +libblkid1:amd64=2.38.1-5+deb12u3 +libbrotli1:amd64=1.0.9-2+b6 +libbz2-1.0:amd64=1.0.8-5+b1 +libc-bin=2.36-9+deb12u10 +libc6:amd64=2.36-9+deb12u10 +libcap-ng0:amd64=0.8.3-1+b3 +libcap2:amd64=1:2.66-4+deb12u1 +libcom-err2:amd64=1.47.0-2 +libcrypt1:amd64=1:4.4.33-2 +libcurl4:amd64=7.88.1-10+deb12u14 +libdb5.3:amd64=5.3.28+dfsg2-1 +libdebconfclient0:amd64=0.270 +libext2fs2:amd64=1.47.0-2 +libffi8:amd64=3.4.4-1 +libgcc-s1:amd64=12.2.0-14+deb12u1 +libgcrypt20:amd64=1.10.1-3 +libglib2.0-0:amd64=2.74.6-2+deb12u7 +libgmp10:amd64=2:6.2.1+dfsg1-1.1 +libgnutls30:amd64=3.7.9-2+deb12u4 +libgpg-error0:amd64=1.46-1 +libgssapi-krb5-2:amd64=1.20.1-2+deb12u4 +libhogweed6:amd64=3.8.1-2 +libidn2-0:amd64=2.3.3-1+b1 +libk5crypto3:amd64=1.20.1-2+deb12u4 +libkeyutils1:amd64=1.6.3-2 +libkrb5-3:amd64=1.20.1-2+deb12u4 +libkrb5support0:amd64=1.20.1-2+deb12u4 +libldap-2.5-0:amd64=2.5.13+dfsg-5 +liblz4-1:amd64=1.9.4-1 +liblzma5:amd64=5.4.1-1 +libmd0:amd64=1.0.4-2 +libmount1:amd64=2.38.1-5+deb12u3 +libnettle8:amd64=3.8.1-2 +libnghttp2-14:amd64=1.52.0-1+deb12u2 +libp11-kit0:amd64=0.24.1-2 +libpam-modules-bin=1.5.2-6+deb12u1 +libpam-modules:amd64=1.5.2-6+deb12u1 +libpam-runtime=1.5.2-6+deb12u1 +libpam0g:amd64=1.5.2-6+deb12u1 +libpcre2-8-0:amd64=10.42-1 +libpsl5:amd64=0.21.2-1 +librtmp1:amd64=2.4+20151223.gitfa8646d.1-2+b2 +libsasl2-2:amd64=2.1.28+dfsg-10 +libsasl2-modules-db:amd64=2.1.28+dfsg-10 +libseccomp2:amd64=2.5.4-1+deb12u1 +libselinux1:amd64=3.4-1+b6 +libsemanage-common=3.4-1 +libsemanage2:amd64=3.4-1+b5 +libsepol2:amd64=3.4-2.1 +libslirp0:amd64=4.7.0-1 +libsmartcols1:amd64=2.38.1-5+deb12u3 +libss2:amd64=1.47.0-2 +libssh2-1:amd64=1.10.0-3+b1 +libssl3:amd64=3.0.17-1~deb12u2 +libstdc++6:amd64=12.2.0-14+deb12u1 +libsystemd0:amd64=252.38-1~deb12u1 +libtasn1-6:amd64=4.19.0-2+deb12u1 +libtinfo6:amd64=6.4-4 +libudev1:amd64=252.38-1~deb12u1 +libunistring2:amd64=1.0-2 +libuuid1:amd64=2.38.1-5+deb12u3 +libxxhash0:amd64=0.8.1-1 +libzstd1:amd64=1.5.4+dfsg2-5 +login=1:4.13+dfsg1-1+deb12u1 +logsave=1.47.0-2 +mawk=1.3.4.20200120-3.1 +mount=2.38.1-5+deb12u3 +ncurses-base=6.4-4 +ncurses-bin=6.4-4 +openssl=3.0.17-1~deb12u2 +passwd=1:4.13+dfsg1-1+deb12u1 +perl-base=5.36.0-7+deb12u2 +sed=4.9-1 +sysvinit-utils=3.06-4 +tar=1.34+dfsg-1.2+deb12u1 +tzdata=2025b-0+deb12u1 +usr-is-merged=37~deb12u1 +util-linux-extra=2.38.1-5+deb12u3 +util-linux=2.38.1-5+deb12u3 +zlib1g:amd64=1:1.2.13.dfsg-1 diff --git a/verifier/builder/shared/qemu-pinned-packages.txt b/verifier/builder/shared/qemu-pinned-packages.txt new file mode 100644 index 00000000..1ae0d6b9 --- /dev/null +++ b/verifier/builder/shared/qemu-pinned-packages.txt @@ -0,0 +1,236 @@ +adduser=3.134 +apt=2.6.1 +base-files=12.4+deb12u11 +base-passwd=3.6.1 +bash=5.2.15-2+b8 +binutils-common:amd64=2.40-2 +binutils-x86-64-linux-gnu=2.40-2 +binutils=2.40-2 +bison=2:3.8.2+dfsg-1+b1 +bsdutils=1:2.38.1-5+deb12u3 +build-essential=12.9 +bzip2=1.0.8-5+b1 +ca-certificates=20230311+deb12u1 +coreutils=9.1-1 +cpp-12=12.2.0-14+deb12u1 +cpp=4:12.2.0-3 +dash=0.5.12-2 +debconf=1.5.82 +debian-archive-keyring=2023.3+deb12u2 +debianutils=5.7-0.5~deb12u1 +diffutils=1:3.8-4 +docutils-common=0.19+dfsg-6 +dpkg-dev=1.21.22 +dpkg=1.21.22 +e2fsprogs=1.47.0-2 +findutils=4.9.0-4 +flex=2.6.4-8.2 +fonts-font-awesome=5.0.10+really4.7.0~dfsg-4.1 +fonts-lato=2.0-2.1 +g++-12=12.2.0-14+deb12u1 +g++=4:12.2.0-3 +gcc-12-base:amd64=12.2.0-14+deb12u1 +gcc-12=12.2.0-14+deb12u1 +gcc=4:12.2.0-3 +git-man=1:2.39.5-0+deb12u2 +git=1:2.39.5-0+deb12u2 +gpgv=2.2.40-1.1 +grep=3.8-5 +gzip=1.12-1 +hostname=3.23+nmu1 +init-system-helpers=1.65.2 +libacl1:amd64=2.3.1-3 +libapt-pkg6.0:amd64=2.6.1 +libasan8:amd64=12.2.0-14+deb12u1 +libatomic1:amd64=12.2.0-14+deb12u1 +libattr1:amd64=1:2.5.1-4 +libaudit-common=1:3.0.9-1 +libaudit1:amd64=1:3.0.9-1 +libbinutils:amd64=2.40-2 +libblkid-dev:amd64=2.38.1-5+deb12u3 +libblkid1:amd64=2.38.1-5+deb12u3 +libbrotli1:amd64=1.0.9-2+b6 +libbz2-1.0:amd64=1.0.8-5+b1 +libc-bin=2.36-9+deb12u10 +libc-dev-bin=2.36-9+deb12u13 +libc6-dev:amd64=2.36-9+deb12u13 +libc6:amd64=2.36-9+deb12u13 +libcap-ng0:amd64=0.8.3-1+b3 +libcap2:amd64=1:2.66-4+deb12u1 +libcc1-0:amd64=12.2.0-14+deb12u1 +libcom-err2:amd64=1.47.0-2 +libcrypt-dev:amd64=1:4.4.33-2 +libcrypt1:amd64=1:4.4.33-2 +libctf-nobfd0:amd64=2.40-2 +libctf0:amd64=2.40-2 +libcurl3-gnutls:amd64=7.88.1-10+deb12u14 +libdb5.3:amd64=5.3.28+dfsg2-1 +libdebconfclient0:amd64=0.270 +libdpkg-perl=1.21.22 +libelf1:amd64=0.188-2.1 +liberror-perl=0.17029-2 +libexpat1:amd64=2.5.0-1+deb12u1 +libext2fs2:amd64=1.47.0-2 +libffi-dev:amd64=3.4.4-1 +libffi8:amd64=3.4.4-1 +libgcc-12-dev:amd64=12.2.0-14+deb12u1 +libgcc-s1:amd64=12.2.0-14+deb12u1 +libgcrypt20:amd64=1.10.1-3 +libgdbm-compat4:amd64=1.23-3 +libgdbm6:amd64=1.23-3 +libglib2.0-0:amd64=2.74.6-2+deb12u7 +libglib2.0-bin=2.74.6-2+deb12u7 +libglib2.0-data=2.74.6-2+deb12u7 +libglib2.0-dev-bin=2.74.6-2+deb12u7 +libglib2.0-dev:amd64=2.74.6-2+deb12u7 +libgmp10:amd64=2:6.2.1+dfsg1-1.1 +libgnutls30:amd64=3.7.9-2+deb12u4 +libgomp1:amd64=12.2.0-14+deb12u1 +libgpg-error0:amd64=1.46-1 +libgprofng0:amd64=2.40-2 +libgssapi-krb5-2:amd64=1.20.1-2+deb12u4 +libhogweed6:amd64=3.8.1-2 +libidn2-0:amd64=2.3.3-1+b1 +libisl23:amd64=0.25-1.1 +libitm1:amd64=12.2.0-14+deb12u1 +libjansson4:amd64=2.14-2 +libjs-jquery=3.6.1+dfsg+~3.5.14-1 +libjs-sphinxdoc=5.3.0-4 +libjs-underscore=1.13.4~dfsg+~1.11.4-3 +libjson-perl=4.10000-1 +libk5crypto3:amd64=1.20.1-2+deb12u4 +libkeyutils1:amd64=1.6.3-2 +libkrb5-3:amd64=1.20.1-2+deb12u4 +libkrb5support0:amd64=1.20.1-2+deb12u4 +libldap-2.5-0:amd64=2.5.13+dfsg-5 +liblsan0:amd64=12.2.0-14+deb12u1 +liblz4-1:amd64=1.9.4-1 +liblzma5:amd64=5.4.1-1 +libmd0:amd64=1.0.4-2 +libmount-dev:amd64=2.38.1-5+deb12u3 +libmount1:amd64=2.38.1-5+deb12u3 +libmpc3:amd64=1.3.1-1 +libmpfr6:amd64=4.2.0-1 +libncursesw6:amd64=6.4-4 +libnettle8:amd64=3.8.1-2 +libnghttp2-14:amd64=1.52.0-1+deb12u2 +libnsl-dev:amd64=1.3.0-2 +libnsl2:amd64=1.3.0-2 +libp11-kit0:amd64=0.24.1-2 +libpam-modules-bin=1.5.2-6+deb12u1 +libpam-modules:amd64=1.5.2-6+deb12u1 +libpam-runtime=1.5.2-6+deb12u1 +libpam0g:amd64=1.5.2-6+deb12u1 +libpcre2-16-0:amd64=10.42-1 +libpcre2-32-0:amd64=10.42-1 +libpcre2-8-0:amd64=10.42-1 +libpcre2-dev:amd64=10.42-1 +libpcre2-posix3:amd64=10.42-1 +libperl5.36:amd64=5.36.0-7+deb12u2 +libpkgconf3:amd64=1.8.1-1 +libpsl5:amd64=0.21.2-1 +libpython3-stdlib:amd64=3.11.2-1+b1 +libpython3.11-minimal:amd64=3.11.2-6+deb12u6 +libpython3.11-stdlib:amd64=3.11.2-6+deb12u6 +libquadmath0:amd64=12.2.0-14+deb12u1 +libreadline8:amd64=8.2-1.3 +librtmp1:amd64=2.4+20151223.gitfa8646d.1-2+b2 +libsasl2-2:amd64=2.1.28+dfsg-10 +libsasl2-modules-db:amd64=2.1.28+dfsg-10 +libseccomp2:amd64=2.5.4-1+deb12u1 +libselinux1-dev:amd64=3.4-1+b6 +libselinux1:amd64=3.4-1+b6 +libsemanage-common=3.4-1 +libsemanage2:amd64=3.4-1+b5 +libsepol-dev:amd64=3.4-2.1 +libsepol2:amd64=3.4-2.1 +libslirp-dev:amd64=4.7.0-1 +libslirp0:amd64=4.7.0-1 +libsmartcols1:amd64=2.38.1-5+deb12u3 +libsqlite3-0:amd64=3.40.1-2+deb12u2 +libss2:amd64=1.47.0-2 +libssh2-1:amd64=1.10.0-3+b1 +libssl3:amd64=3.0.17-1~deb12u2 +libstdc++-12-dev:amd64=12.2.0-14+deb12u1 +libstdc++6:amd64=12.2.0-14+deb12u1 +libsystemd0:amd64=252.38-1~deb12u1 +libtasn1-6:amd64=4.19.0-2+deb12u1 +libtinfo6:amd64=6.4-4 +libtirpc-common=1.3.3+ds-1 +libtirpc-dev:amd64=1.3.3+ds-1 +libtirpc3:amd64=1.3.3+ds-1 +libtsan2:amd64=12.2.0-14+deb12u1 +libubsan1:amd64=12.2.0-14+deb12u1 +libudev1:amd64=252.38-1~deb12u1 +libunistring2:amd64=1.0-2 +libuuid1:amd64=2.38.1-5+deb12u3 +libxxhash0:amd64=0.8.1-1 +libzstd1:amd64=1.5.4+dfsg2-5 +linux-libc-dev:amd64=6.1.148-1 +login=1:4.13+dfsg1-1+deb12u1 +logsave=1.47.0-2 +m4=1.4.19-3 +make=4.3-4.1 +mawk=1.3.4.20200120-3.1 +media-types=10.0.0 +mount=2.38.1-5+deb12u3 +ncurses-base=6.4-4 +ncurses-bin=6.4-4 +ninja-build=1.11.1-2~deb12u1 +openssl=3.0.17-1~deb12u2 +passwd=1:4.13+dfsg1-1+deb12u1 +patch=2.7.6-7 +perl-base=5.36.0-7+deb12u2 +perl-modules-5.36=5.36.0-7+deb12u2 +perl=5.36.0-7+deb12u2 +pkg-config:amd64=1.8.1-1 +pkgconf-bin=1.8.1-1 +pkgconf:amd64=1.8.1-1 +python-babel-localedata=2.10.3-1 +python3-alabaster=0.7.12-1 +python3-babel=2.10.3-1 +python3-certifi=2022.9.24-1 +python3-chardet=5.1.0+dfsg-2 +python3-charset-normalizer=3.0.1-2 +python3-distutils=3.11.2-3 +python3-docutils=0.19+dfsg-6 +python3-idna=3.3-1+deb12u1 +python3-imagesize=1.4.1-1 +python3-jinja2=3.1.2-1+deb12u3 +python3-lib2to3=3.11.2-3 +python3-markupsafe=2.1.2-1+b1 +python3-minimal=3.11.2-1+b1 +python3-packaging=23.0-1 +python3-pip=23.0.1+dfsg-1 +python3-pkg-resources=66.1.1-1+deb12u2 +python3-pygments=2.14.0+dfsg-1 +python3-requests=2.28.1+dfsg-1 +python3-roman=3.3-3 +python3-setuptools=66.1.1-1+deb12u2 +python3-six=1.16.0-4 +python3-snowballstemmer=2.2.0-2 +python3-sphinx-rtd-theme=1.2.0+dfsg-1 +python3-sphinx=5.3.0-4 +python3-tz=2022.7.1-4 +python3-urllib3=1.26.12-1+deb12u1 +python3-wheel=0.38.4-2 +python3.11-minimal=3.11.2-6+deb12u6 +python3.11=3.11.2-6+deb12u6 +python3=3.11.2-1+b1 +readline-common=8.2-1.3 +rpcsvc-proto=1.4.3-1 +sed=4.9-1 +sgml-base=1.31 +sphinx-common=5.3.0-4 +sphinx-rtd-theme-common=1.2.0+dfsg-1 +sysvinit-utils=3.06-4 +tar=1.34+dfsg-1.2+deb12u1 +tzdata=2025b-0+deb12u1 +usr-is-merged=37~deb12u1 +util-linux-extra=2.38.1-5+deb12u3 +util-linux=2.38.1-5+deb12u3 +uuid-dev:amd64=2.38.1-5+deb12u3 +xml-core=0.18+nmu1 +xz-utils=5.4.1-1 +zlib1g-dev:amd64=1:1.2.13.dfsg-1 +zlib1g:amd64=1:1.2.13.dfsg-1 diff --git a/verifier/dstack-verifier.toml b/verifier/dstack-verifier.toml new file mode 100644 index 00000000..8c8a9b89 --- /dev/null +++ b/verifier/dstack-verifier.toml @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: © 2024-2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +# Server configuration +address = "0.0.0.0" +port = 8080 + +# Image cache directory for OS image verification +image_cache_dir = "/tmp/dstack-verifier/cache" + +# Image download URL template (replace {OS_IMAGE_HASH} with actual hash) +image_download_url = "https://download.dstack.org/os-images/mr_{OS_IMAGE_HASH}.tar.gz" + +# Image download timeout in seconds +image_download_timeout_secs = 300 + +# Optional PCCS URL for quote verification +# pccs_url = "https://pccs.phala.network" \ No newline at end of file diff --git a/verifier/fixtures/quote-report.json b/verifier/fixtures/quote-report.json new file mode 100644 index 00000000..93624477 --- /dev/null +++ b/verifier/fixtures/quote-report.json @@ -0,0 +1 @@ +{"quote":"040002008100000000000000939a7233f79c4ca9940a0db3957f06071eadadc7f30fb7f911d24aa522afc590000000000b0104000000000000000000000000007bf063280e94fb051f5dd7b1fc59ce9aac42bb961df8d44b709c9b0ff87a7b4df648657ba6d1189589feab1d5a3c9a9d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000e702060000000000b24d3b24e9e3c16012376b52362ca09856c4adecb709d5fac33addf1c47e193da075b125b6c364115771390a5461e2170000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002e3843265f8ecdd4e2282694747f6f2f111605c33f2a8882f5734ee6f3a6ce63d8f34aeef06093dcda76fa5f9d33d8d6a1b79d76021970f57c45c4a7c395f780bab37011a4df27fe44e8559bd1abb4d6e52f12f866d1d08405448eb797a5970f1e31b59d605df7ee8160cf7966be9bafa6d0e1905de7e09695a24cd9748e71a603a51fae1297619fa0c30517addbcd070f787c3877f3e95095d5a4d13dd0fe0233803b30120d8469866719dc28f519ce021fe1e53459121e7a5a4443147185a812340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cc100000aa59a2ab97a78a0401ffc862efb76aa25eb7921915c96f1e284737ac287c30e9a2eeddd04176329c840ab282c0347659cf19681d8c205cec2185e9fa40b673002982655d89dbd3867e7370e8b1b27bbae5eb5f24dfaceea8a2ff9ad71161930c379cef3c7360ef97468031741483798585c1befb2f1d9827d2eb7a22299a01270600461000000404191b04ff0006000000000000000000000000000000000000000000000000000000000000000000000000000000001500000000000000e700000000000000e5a3a7b5d830c2953b98534c6c59a3a34fdc34e933f7f5898f0a85cf08846bca0000000000000000000000000000000000000000000000000000000000000000dc9e2a7c6f948f17474e34a7fc43ed030f7c1563f1babddf6340c82e0e54a8c5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005be61ea67e69e2411dd59d258969727c5cd13f082b59b3dcc4721e2f7c3b7a4e0000000000000000000000000000000000000000000000000000000000000000a7d05873f18690a9dfa580c695e1c0bd4dde53423bbbed02ed798b8d8b0a727dc92cf12582189873197ef784fd97cd44feb2fee19d388bc5be0abf4231586ba72000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f05005e0e00002d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d49494538544343424a65674177494241674956414c7142567a73712f787369354e387578426f59356c3641515a31444d416f4743437147534d343942414d430a4d484178496a416742674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d0a45556c756447567349454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155450a4341774351304578437a414a42674e5642415954416c56544d423458445449314d446b784e6a41794d6a67784e566f5844544d794d446b784e6a41794d6a67780a4e566f77634445694d434147413155454177775a535735305a5777675530645949464244537942445a584a3061575a70593246305a5445614d426747413155450a43677752535735305a577767513239796347397959585270623234784644415342674e564241634d43314e68626e526849454e7359584a684d517377435159440a5651514944414a445154454c4d416b474131554542684d4356564d775754415442676371686b6a4f5051494242676771686b6a4f50514d4242774e43414151380a39455a4b755278457952677a5a5873542b3079304346342b31683453582f6a54554c644f6771637275466b5033354750346562383634517361797779345877440a42755a65434b664569484e57356f3431353959646f3449444444434341776777487759445652306a42426777466f41556c5739647a62306234656c4153636e550a3944504f4156634c336c5177617759445652306642475177596a42676f46366758495a616148523063484d364c79396863476b7564484a316333526c5a484e6c0a636e5a705932567a4c6d6c75644756734c6d4e766253397a5a3367765932567964476c6d61574e6864476c76626939324e4339775932746a636d772f593245390a6347786864475a76636d306d5a57356a62325270626d63395a4756794d42304741315564446751574242525a63752f70597452454655736837726857544952750a2b446974786a414f42674e56485138424166384542414d434273417744415944565230544151482f4241497741444343416a6b4743537147534962345451454e0a4151534341696f776767496d4d42344743697147534962345451454e4151454545496c395072647571496533672f6d7450516478413073776767466a42676f710a686b69472b453042445145434d494942557a415142677371686b69472b45304244514543415149424244415142677371686b69472b45304244514543416749420a4244415142677371686b69472b4530424451454341774942416a415142677371686b69472b4530424451454342414942416a415142677371686b69472b4530420a44514543425149424244415142677371686b69472b45304244514543426749424154415142677371686b69472b453042445145434277494241444151426773710a686b69472b45304244514543434149424254415142677371686b69472b45304244514543435149424144415142677371686b69472b45304244514543436749420a4144415142677371686b69472b45304244514543437749424144415142677371686b69472b45304244514543444149424144415142677371686b69472b4530420a44514543445149424144415142677371686b69472b45304244514543446749424144415142677371686b69472b453042445145434477494241444151426773710a686b69472b45304244514543454149424144415142677371686b69472b45304244514543455149424454416642677371686b69472b45304244514543456751510a42415143416751424141554141414141414141414144415142676f71686b69472b45304244514544424149414144415542676f71686b69472b453042445145450a4241615177473841414141774477594b4b6f5a496876684e4151304242516f424154416542676f71686b69472b45304244514547424243764f3535314c75626a0a2b49363352564e713558734e4d45514743697147534962345451454e415163774e6a415142677371686b69472b45304244514548415145422f7a4151426773710a686b69472b45304244514548416745422f7a415142677371686b69472b45304244514548417745422f7a414b42676771686b6a4f5051514441674e49414442460a41694541366748457a39306f4c30362b4c6f414442307261326f587943333453504c4d6e35434e473569783862303043494131304d37796f79537a6a755178440a75617a6f505048722f745862432f64762b6b384362314175656b4a650a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436c6a4343416a32674177494241674956414a567658633239472b487051456e4a3150517a7a674658433935554d416f4743437147534d343942414d430a4d476778476a415942674e5642414d4d45556c756447567349464e48574342536232393049454e424d526f77474159445651514b4442464a626e526c624342440a62334a7762334a6864476c76626a45554d424947413155454277774c553246756447456751327868636d4578437a414a42674e564241674d416b4e424d5173770a435159445651514745774a56557a4165467730784f4441314d6a45784d4455774d5442614677307a4d7a41314d6a45784d4455774d5442614d484178496a41670a42674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d45556c75644756730a49454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b474131554543417743513045780a437a414a42674e5642415954416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a304441516344516741454e53422f377432316c58534f0a3243757a7078773734654a423732457944476757357258437478327456544c7136684b6b367a2b5569525a436e71523770734f766771466553786c6d546c4a6c0a65546d693257597a33714f42757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f536347724442530a42674e5648523845537a424a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b633256790a646d6c6a5a584d75615735305a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e5648513445466751556c5739640a7a62306234656c4153636e553944504f4156634c336c517744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159420a4166384341514177436759494b6f5a497a6a30454177494452774177524149675873566b6930772b6936565947573355462f32327561586530594a446a3155650a6e412b546a44316169356343494359623153416d4435786b66545670766f34556f79695359787244574c6d5552344349394e4b7966504e2b0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436a7a4343416a53674177494241674955496d554d316c71644e496e7a6737535655723951477a6b6e42717777436759494b6f5a497a6a3045417749770a614445614d4267474131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e760a636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a0a42674e5642415954416c56544d423458445445344d4455794d5445774e4455784d466f58445451354d54497a4d54497a4e546b314f566f77614445614d4267470a4131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e76636e4276636d46300a615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a42674e56424159540a416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a3044415163445167414543366e45774d4449595a4f6a2f69505773437a61454b69370a314f694f534c52466857476a626e42564a66566e6b59347533496a6b4459594c304d784f346d717379596a6c42616c54565978465032734a424b357a6c4b4f420a757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f5363477244425342674e5648523845537a424a0a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b63325679646d6c6a5a584d75615735300a5a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e564851344546675155496d554d316c71644e496e7a673753560a55723951477a6b6e4271777744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159424166384341514577436759490a4b6f5a497a6a3045417749445351417752674968414f572f35516b522b533943695344634e6f6f774c7550524c735747662f59693747535839344267775477670a41694541344a306c72486f4d732b586f356f2f7358364f39515778485241765a55474f6452513763767152586171493d0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","event_log":"[{\"imr\":0,\"event_type\":2147483659,\"digest\":\"8ae1e425351df7992c444586eff99d35af3b779aa2b0e981cb4b73bc5b279f2ade19b6a62a203fc3c3bbdaae80af596d\",\"event\":\"\",\"event_payload\":\"095464785461626c65000100000000000000af96bb93f2b9b84e9462e0ba745642360090800000000000\"},{\"imr\":0,\"event_type\":2147483658,\"digest\":\"344bc51c980ba621aaa00da3ed7436f7d6e549197dfe699515dfa2c6583d95e6412af21c097d473155875ffd561d6790\",\"event\":\"\",\"event_payload\":\"2946762858585858585858582d585858582d585858582d585858582d58585858585858585858585829000000c0ff000000000040080000000000\"},{\"imr\":0,\"event_type\":2147483649,\"digest\":\"9dc3a1f80bcec915391dcda5ffbb15e7419f77eab462bbf72b42166fb70d50325e37b36f93537a863769bcf9bedae6fb\",\"event\":\"\",\"event_payload\":\"61dfe48bca93d211aa0d00e098032b8c0a00000000000000000000000000000053006500630075007200650042006f006f007400\"},{\"imr\":0,\"event_type\":2147483649,\"digest\":\"6f2e3cbc14f9def86980f5f66fd85e99d63e69a73014ed8a5633ce56eca5b64b692108c56110e22acadcef58c3250f1b\",\"event\":\"\",\"event_payload\":\"61dfe48bca93d211aa0d00e098032b8c0200000000000000000000000000000050004b00\"},{\"imr\":0,\"event_type\":2147483649,\"digest\":\"d607c0efb41c0d757d69bca0615c3a9ac0b1db06c557d992e906c6b7dee40e0e031640c7bfd7bcd35844ef9edeadc6f9\",\"event\":\"\",\"event_payload\":\"61dfe48bca93d211aa0d00e098032b8c030000000000000000000000000000004b0045004b00\"},{\"imr\":0,\"event_type\":2147483649,\"digest\":\"08a74f8963b337acb6c93682f934496373679dd26af1089cb4eaf0c30cf260a12e814856385ab8843e56a9acea19e127\",\"event\":\"\",\"event_payload\":\"cbb219d73a3d9645a3bcdad00e67656f0200000000000000000000000000000064006200\"},{\"imr\":0,\"event_type\":2147483649,\"digest\":\"18cc6e01f0c6ea99aa23f8a280423e94ad81d96d0aeb5180504fc0f7a40cb3619dd39bd6a95ec1680a86ed6ab0f9828d\",\"event\":\"\",\"event_payload\":\"cbb219d73a3d9645a3bcdad00e67656f03000000000000000000000000000000640062007800\"},{\"imr\":0,\"event_type\":4,\"digest\":\"394341b7182cd227c5c6b07ef8000cdfd86136c4292b8e576573ad7ed9ae41019f5818b4b971c9effc60e1ad9f1289f0\",\"event\":\"\",\"event_payload\":\"00000000\"},{\"imr\":0,\"event_type\":10,\"digest\":\"2065dd48d647e4377db277ba203526901a17845e93e0df4c2dfc3ce136e0910324ead1e1c86b8d90c2acdf9c85ffac53\",\"event\":\"\",\"event_payload\":\"414350492044415441\"},{\"imr\":0,\"event_type\":10,\"digest\":\"772b0169c66b52e4453fff9e3c6257635ea950ebcc8edd7ef2e2f8241cf6a155f39df01a7c7a194b6bc0abe5de11861d\",\"event\":\"\",\"event_payload\":\"414350492044415441\"},{\"imr\":0,\"event_type\":10,\"digest\":\"abfb2256644b5786eefdcb92303d2008c36cb9500d98997e215ef5080745d4bf2e5b3629090918e193e7f05b173d48c5\",\"event\":\"\",\"event_payload\":\"414350492044415441\"},{\"imr\":1,\"event_type\":2147483651,\"digest\":\"0761fbfa317a42d8edbe9e404178d102adc059cface98c5e07d1d535371c145c3497fd2a19b8398568b8c8a6f95e0a86\",\"event\":\"\",\"event_payload\":\"18400d7b0000000000d47d000000000000000000000000002a000000000000000403140072f728144ab61e44b8c39ebdd7f893c7040412006b00650072006e0065006c0000007fff0400\"},{\"imr\":0,\"event_type\":2147483650,\"digest\":\"1dd6f7b457ad880d840d41c961283bab688e94e4b59359ea45686581e90feccea3c624b1226113f824f315eb60ae0a7c\",\"event\":\"\",\"event_payload\":\"61dfe48bca93d211aa0d00e098032b8c0900000000000000020000000000000042006f006f0074004f0072006400650072000000\"},{\"imr\":0,\"event_type\":2147483650,\"digest\":\"23ada07f5261f12f34a0bd8e46760962d6b4d576a416f1fea1c64bc656b1d28eacf7047ae6e967c58fd2a98bfa74c298\",\"event\":\"\",\"event_payload\":\"61dfe48bca93d211aa0d00e098032b8c08000000000000003e0000000000000042006f006f0074003000300030003000090100002c0055006900410070007000000004071400c9bdb87cebf8344faaea3ee4af6516a10406140021aa2c4614760345836e8ab6f46623317fff0400\"},{\"imr\":1,\"event_type\":2147483655,\"digest\":\"77a0dab2312b4e1e57a84d865a21e5b2ee8d677a21012ada819d0a98988078d3d740f6346bfe0abaa938ca20439a8d71\",\"event\":\"\",\"event_payload\":\"43616c6c696e6720454649204170706c69636174696f6e2066726f6d20426f6f74204f7074696f6e\"},{\"imr\":1,\"event_type\":4,\"digest\":\"394341b7182cd227c5c6b07ef8000cdfd86136c4292b8e576573ad7ed9ae41019f5818b4b971c9effc60e1ad9f1289f0\",\"event\":\"\",\"event_payload\":\"00000000\"},{\"imr\":2,\"event_type\":6,\"digest\":\"4027cb4ec64dbc24b6d98d9470daeefc749bbb6a9b011762d215f6ed3eb833d58fd72d9ad850958f72878182e6f61924\",\"event\":\"\",\"event_payload\":\"ed223b8f1a0000004c4f414445445f494d4147453a3a4c6f61644f7074696f6e7300\"},{\"imr\":2,\"event_type\":6,\"digest\":\"63e06e29cf98f2fce71abd3a9629dff48457b47c010b64e11f7a2b42dd99bfa14ee35660b3f5d3fc376261d6ba9a6d6b\",\"event\":\"\",\"event_payload\":\"ec223b8f0d0000004c696e757820696e6974726400\"},{\"imr\":1,\"event_type\":2147483655,\"digest\":\"214b0bef1379756011344877743fdc2a5382bac6e70362d624ccf3f654407c1b4badf7d8f9295dd3dabdef65b27677e0\",\"event\":\"\",\"event_payload\":\"4578697420426f6f7420536572766963657320496e766f636174696f6e\"},{\"imr\":1,\"event_type\":2147483655,\"digest\":\"0a2e01c85deae718a530ad8c6d20a84009babe6c8989269e950d8cf440c6e997695e64d455c4174a652cd080f6230b74\",\"event\":\"\",\"event_payload\":\"4578697420426f6f742053657276696365732052657475726e656420776974682053756363657373\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"f9974020ef507068183313d0ca808e0d1ca9b2d1ad0c61f5784e7157c362c06536f5ddacdad4451693f48fcc72fff624\",\"event\":\"system-preparing\",\"event_payload\":\"\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"837c2dd72f8a4c740159e5e042ed79b7eaaa5ab3a151a45e27bc366bb8b27e6c3faec87aab1e95197d3e6d23308d448c\",\"event\":\"app-id\",\"event_payload\":\"3763bc34552cf3a27ff71ad5f7a90471562a1a2d\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"b883bee0b216618b1ce0e7a1bb4a9379b486cef8aadf0c682cb6e80c083f7982dbf104183c24a74693d860f4ffc8b72f\",\"event\":\"compose-hash\",\"event_payload\":\"3763bc34552cf3a27ff71ad5f7a90471562a1a2df552dfc1998cba2d60da27e7\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"9af8567194629f6798aafa76d95427bb7e84864145ee79fdf4ca29f5c743c159379c1c805934decfa513821edaa77fb7\",\"event\":\"instance-id\",\"event_payload\":\"c3714eb66990eace777b4e664c16e09375dec4c9\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"98bd7e6bd3952720b65027fd494834045d06b4a714bf737a06b874638b3ea00ff402f7f583e3e3b05e921c8570433ac6\",\"event\":\"boot-mr-done\",\"event_payload\":\"\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"74ca939b8c3c74aab3c30966a788f7743951d54a936a711dd01422f003ff9df6666f3cc54975d2e4f35c829865583f0f\",\"event\":\"key-provider\",\"event_payload\":\"7b226e616d65223a226c6f63616c2d736778222c226964223a2231623761343933373834303332343962363938366139303738343463616230393231656361333264643437653635376633633130333131636361656363663862227d\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"1a76b2a80a0be71eae59f80945d876351a7a3fb8e9fd1ff1cede5734aa84ea11fd72b4edfbb6f04e5a85edd114c751bd\",\"event\":\"system-ready\",\"event_payload\":\"\"},{\"imr\":3,\"event_type\":134217729,\"digest\":\"64c2c025c0e916a1802e8beee830954fe5693f3fb0f2ffb077d7d3f149c5525e2c1bfb0a15046b84f4038ba6f152588f\",\"event\":\"LIUM_MINER_HOTKEY\",\"event_payload\":\"35443333507467666b475951734d4c434d724b426a56454d54455371525944466666543672396a4264614833654c7434\"}]","report_data":"12340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","vm_config":"{\"spec_version\": 1, \"os_image_hash\": \"14ad42d0270b444eaeb53918a5a94d9b17eec7a817cd336173b17c5327541c67\", \"cpu_count\": 16, \"memory_size\": 68719476736, \"qemu_single_pass_add_pages\": false, \"pic\": false, \"pci_hole64_size\": 17592186044416, \"num_gpus\": 1, \"num_nvswitches\": 0, \"hugepages\": false, \"hotplug_off\": true, \"qemu_version\": \"9.2.1\"}"} diff --git a/verifier/src/main.rs b/verifier/src/main.rs new file mode 100644 index 00000000..f145d71d --- /dev/null +++ b/verifier/src/main.rs @@ -0,0 +1,215 @@ +// SPDX-FileCopyrightText: © 2024-2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use anyhow::{Context, Result}; +use clap::Parser; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use rocket::{fairing::AdHoc, get, post, serde::json::Json, State}; +use serde::{Deserialize, Serialize}; +use tracing::{error, info}; + +mod types; +mod verification; + +use types::{VerificationRequest, VerificationResponse}; +use verification::CvmVerifier; + +#[derive(Parser)] +#[command(name = "dstack-verifier")] +#[command(about = "HTTP server providing CVM verification services")] +struct Cli { + #[arg(short, long, default_value = "dstack-verifier.toml")] + config: String, + + /// Oneshot mode: verify a single report JSON file and exit + #[arg(long, value_name = "FILE")] + verify: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Config { + pub address: String, + pub port: u16, + pub image_cache_dir: String, + pub pccs_url: Option, + pub image_download_url: String, + pub image_download_timeout_secs: u64, +} + +#[post("/verify", data = "")] +async fn verify_cvm( + verifier: &State>, + request: Json, +) -> Json { + match verifier.verify(&request.into_inner()).await { + Ok(response) => Json(response), + Err(e) => { + error!("Verification failed: {:?}", e); + Json(VerificationResponse { + is_valid: false, + details: types::VerificationDetails { + quote_verified: false, + event_log_verified: false, + os_image_hash_verified: false, + report_data: None, + tcb_status: None, + advisory_ids: vec![], + app_info: None, + acpi_tables: None, + rtmr_debug: None, + }, + reason: Some(format!("Internal error: {}", e)), + }) + } + } +} + +#[get("/health")] +fn health() -> Json { + Json(serde_json::json!({ + "status": "ok", + "service": "dstack-verifier" + })) +} + +async fn run_oneshot(file_path: &str, config: &Config) -> anyhow::Result<()> { + use std::fs; + + info!("Running in oneshot mode for file: {}", file_path); + + // Read the JSON file + let content = fs::read_to_string(file_path) + .map_err(|e| anyhow::anyhow!("Failed to read file {}: {}", file_path, e))?; + + // Parse as VerificationRequest + let mut request: VerificationRequest = serde_json::from_str(&content) + .map_err(|e| anyhow::anyhow!("Failed to parse JSON: {}", e))?; + + // Ensure PCCS URL is populated from config when the report omits it + request.pccs_url = request.pccs_url.or_else(|| config.pccs_url.clone()); + + // Create verifier + let verifier = CvmVerifier::new( + config.image_cache_dir.clone(), + config.image_download_url.clone(), + std::time::Duration::from_secs(config.image_download_timeout_secs), + ); + + // Run verification + info!("Starting verification..."); + let response = verifier.verify(&request).await?; + + // Persist response next to the input file for convenience + let output_path = format!("{file_path}.verification.json"); + let serialized = serde_json::to_string_pretty(&response) + .map_err(|e| anyhow::anyhow!("Failed to encode verification result: {}", e))?; + fs::write(&output_path, serialized).map_err(|e| { + anyhow::anyhow!( + "Failed to write verification result to {}: {}", + output_path, + e + ) + })?; + info!("Stored verification result at {}", output_path); + + // Output results + println!("\n=== Verification Results ==="); + println!("Valid: {}", response.is_valid); + println!("Quote verified: {}", response.details.quote_verified); + println!( + "Event log verified: {}", + response.details.event_log_verified + ); + println!( + "OS image hash verified: {}", + response.details.os_image_hash_verified + ); + + if let Some(tcb_status) = &response.details.tcb_status { + println!("TCB status: {}", tcb_status); + } + + if !response.details.advisory_ids.is_empty() { + println!("Advisory IDs: {:?}", response.details.advisory_ids); + } + + if let Some(reason) = &response.reason { + println!("Reason: {}", reason); + } + + if let Some(report_data) = &response.details.report_data { + println!("Report data: {}", report_data); + } + + if let Some(app_info) = &response.details.app_info { + println!("\n=== App Info ==="); + println!("App ID: {}", hex::encode(&app_info.app_id)); + println!("Instance ID: {}", hex::encode(&app_info.instance_id)); + println!("Compose hash: {}", hex::encode(&app_info.compose_hash)); + println!("MRTD: {}", hex::encode(app_info.mrtd)); + println!("RTMR0: {}", hex::encode(app_info.rtmr0)); + println!("RTMR1: {}", hex::encode(app_info.rtmr1)); + println!("RTMR2: {}", hex::encode(app_info.rtmr2)); + } + + // Exit with appropriate code + if !response.is_valid { + std::process::exit(1); + } + + Ok(()) +} + +#[rocket::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + + let cli = Cli::parse(); + + let default_config_str = include_str!("../dstack-verifier.toml"); + + let figment = Figment::from(rocket::Config::default()) + .merge(Toml::string(default_config_str)) + .merge(Toml::file(&cli.config)) + .merge(Env::prefixed("DSTACK_VERIFIER_")); + + let config: Config = figment.extract().context("Failed to load configuration")?; + + // Check for oneshot mode + if let Some(file_path) = cli.verify { + // Run oneshot verification and exit + let rt = tokio::runtime::Runtime::new().context("Failed to create runtime")?; + rt.block_on(async { + if let Err(e) = run_oneshot(&file_path, &config).await { + error!("Oneshot verification failed: {:#}", e); + std::process::exit(1); + } + }); + std::process::exit(0); + } + + let verifier = Arc::new(CvmVerifier::new( + config.image_cache_dir.clone(), + config.image_download_url.clone(), + std::time::Duration::from_secs(config.image_download_timeout_secs), + )); + + rocket::custom(figment) + .mount("/", rocket::routes![verify_cvm, health]) + .manage(verifier) + .attach(AdHoc::on_liftoff("Startup", |_| { + Box::pin(async { + info!("dstack-verifier started successfully"); + }) + })) + .launch() + .await + .map_err(|err| anyhow::anyhow!("launch rocket failed: {err:?}"))?; + Ok(()) +} diff --git a/verifier/src/types.rs b/verifier/src/types.rs new file mode 100644 index 00000000..e4e5d2c5 --- /dev/null +++ b/verifier/src/types.rs @@ -0,0 +1,81 @@ +// SPDX-FileCopyrightText: © 2024-2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +use ra_tls::attestation::AppInfo; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VerificationRequest { + pub quote: String, + pub event_log: String, + pub vm_config: String, + pub pccs_url: Option, + pub debug: Option, +} + +#[derive(Debug, Clone, Serialize)] +pub struct VerificationResponse { + pub is_valid: bool, + pub details: VerificationDetails, + pub reason: Option, +} + +#[derive(Debug, Clone, Serialize)] +pub struct VerificationDetails { + pub quote_verified: bool, + pub event_log_verified: bool, + pub os_image_hash_verified: bool, + pub report_data: Option, + pub tcb_status: Option, + pub advisory_ids: Vec, + pub app_info: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub acpi_tables: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub rtmr_debug: Option>, +} + +#[derive(Debug, Clone, Serialize)] +pub struct AcpiTables { + pub tables: String, + pub rsdp: String, + pub loader: String, +} + +#[derive(Debug, Clone, Serialize)] +pub struct RtmrMismatch { + pub rtmr: String, + pub expected: String, + pub actual: String, + pub events: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub missing_expected_digests: Vec, +} + +#[derive(Debug, Clone, Serialize)] +pub struct RtmrEventEntry { + pub index: usize, + pub event_type: u32, + pub event_name: String, + pub actual_digest: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub expected_digest: Option, + pub payload_len: usize, + pub status: RtmrEventStatus, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum RtmrEventStatus { + Match, + Mismatch, + Extra, + Missing, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ErrorResponse { + pub error: String, + pub details: Option, +} diff --git a/verifier/src/verification.rs b/verifier/src/verification.rs new file mode 100644 index 00000000..a53da571 --- /dev/null +++ b/verifier/src/verification.rs @@ -0,0 +1,786 @@ +// SPDX-FileCopyrightText: © 2024-2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + ffi::OsStr, + path::{Path, PathBuf}, + time::Duration, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use cc_eventlog::TdxEventLog as EventLog; +use dstack_mr::{RtmrLog, TdxMeasurementDetails, TdxMeasurements}; +use dstack_types::VmConfig; +use ra_tls::attestation::{Attestation, VerifiedAttestation}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest as _, Sha256, Sha384}; +use tokio::{io::AsyncWriteExt, process::Command}; +use tracing::{debug, info, warn}; + +use crate::types::{ + AcpiTables, RtmrEventEntry, RtmrEventStatus, RtmrMismatch, VerificationDetails, + VerificationRequest, VerificationResponse, +}; + +#[derive(Debug, Clone)] +struct RtmrComputationResult { + event_indices: [Vec; 4], + rtmrs: [[u8; 48]; 4], +} + +fn replay_event_logs(eventlog: &[EventLog]) -> Result { + let mut event_indices: [Vec; 4] = Default::default(); + let mut rtmrs: [[u8; 48]; 4] = [[0u8; 48]; 4]; + + for idx in 0..4 { + for (event_idx, event) in eventlog.iter().enumerate() { + event + .validate() + .context("Failed to validate event digest")?; + + if event.imr == idx { + event_indices[idx as usize].push(event_idx); + let mut hasher = Sha384::new(); + hasher.update(rtmrs[idx as usize]); + hasher.update(event.digest); + rtmrs[idx as usize] = hasher.finalize().into(); + } + } + } + + Ok(RtmrComputationResult { + event_indices, + rtmrs, + }) +} + +fn collect_rtmr_mismatch( + rtmr_label: &str, + expected: &[u8], + actual: &[u8], + expected_sequence: &RtmrLog, + actual_indices: &[usize], + event_log: &[EventLog], +) -> RtmrMismatch { + let expected_hex = hex::encode(expected); + let actual_hex = hex::encode(actual); + + let mut events = Vec::new(); + + for (&idx, expected_digest) in actual_indices.iter().zip(expected_sequence.iter()) { + match event_log.get(idx) { + Some(event) => { + let event_name = if event.event.is_empty() { + "(unnamed)".to_string() + } else { + event.event.clone() + }; + let status = if event.digest == expected_digest.as_slice() { + RtmrEventStatus::Match + } else { + RtmrEventStatus::Mismatch + }; + events.push(RtmrEventEntry { + index: idx, + event_type: event.event_type, + event_name, + actual_digest: hex::encode(event.digest), + expected_digest: Some(hex::encode(expected_digest)), + payload_len: event.event_payload.len(), + status, + }); + } + None => { + events.push(RtmrEventEntry { + index: idx, + event_type: 0, + event_name: "(missing)".to_string(), + actual_digest: String::new(), + expected_digest: Some(hex::encode(expected_digest)), + payload_len: 0, + status: RtmrEventStatus::Missing, + }); + } + } + } + + for &idx in actual_indices.iter().skip(expected_sequence.len()) { + let (event_type, event_name, actual_digest, payload_len) = match event_log.get(idx) { + Some(event) => ( + event.event_type, + if event.event.is_empty() { + "(unnamed)".to_string() + } else { + event.event.clone() + }, + hex::encode(event.digest), + event.event_payload.len(), + ), + None => (0, "(missing)".to_string(), String::new(), 0), + }; + events.push(RtmrEventEntry { + index: idx, + event_type, + event_name, + actual_digest, + expected_digest: None, + payload_len, + status: RtmrEventStatus::Extra, + }); + } + + let missing_expected_digests = if expected_sequence.len() > actual_indices.len() { + expected_sequence[actual_indices.len()..] + .iter() + .map(hex::encode) + .collect() + } else { + Vec::new() + }; + + RtmrMismatch { + rtmr: rtmr_label.to_string(), + expected: expected_hex.to_string(), + actual: actual_hex.to_string(), + events, + missing_expected_digests, + } +} + +const MEASUREMENT_CACHE_VERSION: u32 = 1; + +#[derive(Clone, Serialize, Deserialize)] +struct CachedMeasurement { + version: u32, + measurements: TdxMeasurements, +} + +pub struct CvmVerifier { + pub image_cache_dir: String, + pub download_url: String, + pub download_timeout: Duration, +} + +impl CvmVerifier { + pub fn new(image_cache_dir: String, download_url: String, download_timeout: Duration) -> Self { + Self { + image_cache_dir, + download_url, + download_timeout, + } + } + + fn measurement_cache_dir(&self) -> PathBuf { + Path::new(&self.image_cache_dir).join("measurements") + } + + fn measurement_cache_path(&self, cache_key: &str) -> PathBuf { + self.measurement_cache_dir() + .join(format!("{cache_key}.json")) + } + + fn vm_config_cache_key(vm_config: &VmConfig) -> Result { + let serialized = serde_json::to_vec(vm_config) + .context("Failed to serialize VM config for cache key computation")?; + Ok(hex::encode(Sha256::digest(&serialized))) + } + + fn load_measurements_from_cache(&self, cache_key: &str) -> Result> { + let path = self.measurement_cache_path(cache_key); + if !path.exists() { + return Ok(None); + } + + let path_display = path.display().to_string(); + let contents = match fs_err::read(&path) { + Ok(data) => data, + Err(e) => { + warn!("Failed to read measurement cache {}: {e:?}", path_display); + return Ok(None); + } + }; + + let cached: CachedMeasurement = match serde_json::from_slice(&contents) { + Ok(entry) => entry, + Err(e) => { + warn!("Failed to parse measurement cache {}: {e:?}", path_display); + return Ok(None); + } + }; + + if cached.version != MEASUREMENT_CACHE_VERSION { + debug!( + "Ignoring measurement cache {} due to version mismatch (found {}, expected {})", + path_display, cached.version, MEASUREMENT_CACHE_VERSION + ); + return Ok(None); + } + + debug!("Loaded measurement cache entry {}", cache_key); + Ok(Some(cached.measurements)) + } + + fn store_measurements_in_cache( + &self, + cache_key: &str, + measurements: &TdxMeasurements, + ) -> Result<()> { + let cache_dir = self.measurement_cache_dir(); + fs_err::create_dir_all(&cache_dir) + .context("Failed to create measurement cache directory")?; + + let path = self.measurement_cache_path(cache_key); + let mut tmp = tempfile::NamedTempFile::new_in(&cache_dir) + .context("Failed to create temporary cache file")?; + + let entry = CachedMeasurement { + version: MEASUREMENT_CACHE_VERSION, + measurements: measurements.clone(), + }; + serde_json::to_writer(tmp.as_file_mut(), &entry) + .context("Failed to serialize measurement cache entry")?; + tmp.as_file_mut() + .sync_all() + .context("Failed to flush measurement cache entry to disk")?; + + tmp.persist(&path).map_err(|e| { + anyhow!( + "Failed to persist measurement cache to {}: {e}", + path.display() + ) + })?; + debug!("Stored measurement cache entry {}", cache_key); + Ok(()) + } + + fn compute_measurement_details( + &self, + vm_config: &VmConfig, + fw_path: &Path, + kernel_path: &Path, + initrd_path: &Path, + kernel_cmdline: &str, + ) -> Result { + let firmware = fw_path.display().to_string(); + let kernel = kernel_path.display().to_string(); + let initrd = initrd_path.display().to_string(); + + let details = dstack_mr::Machine::builder() + .cpu_count(vm_config.cpu_count) + .memory_size(vm_config.memory_size) + .firmware(&firmware) + .kernel(&kernel) + .initrd(&initrd) + .kernel_cmdline(kernel_cmdline) + .root_verity(true) + .hotplug_off(vm_config.hotplug_off) + .maybe_two_pass_add_pages(vm_config.qemu_single_pass_add_pages) + .maybe_pic(vm_config.pic) + .maybe_qemu_version(vm_config.qemu_version.clone()) + .maybe_pci_hole64_size(if vm_config.pci_hole64_size > 0 { + Some(vm_config.pci_hole64_size) + } else { + None + }) + .hugepages(vm_config.hugepages) + .num_gpus(vm_config.num_gpus) + .num_nvswitches(vm_config.num_nvswitches) + .build() + .measure_with_logs() + .context("Failed to compute expected MRs")?; + + Ok(details) + } + + fn compute_measurements( + &self, + vm_config: &VmConfig, + fw_path: &Path, + kernel_path: &Path, + initrd_path: &Path, + kernel_cmdline: &str, + ) -> Result { + self.compute_measurement_details( + vm_config, + fw_path, + kernel_path, + initrd_path, + kernel_cmdline, + ) + .map(|details| details.measurements) + } + + fn load_or_compute_measurements( + &self, + vm_config: &VmConfig, + fw_path: &Path, + kernel_path: &Path, + initrd_path: &Path, + kernel_cmdline: &str, + ) -> Result { + let cache_key = Self::vm_config_cache_key(vm_config)?; + + if let Some(measurements) = self.load_measurements_from_cache(&cache_key)? { + return Ok(measurements); + } + + let measurements = self.compute_measurements( + vm_config, + fw_path, + kernel_path, + initrd_path, + kernel_cmdline, + )?; + + if let Err(e) = self.store_measurements_in_cache(&cache_key, &measurements) { + warn!( + "Failed to write measurement cache entry for {}: {e:?}", + cache_key + ); + } + + Ok(measurements) + } + + pub async fn verify(&self, request: &VerificationRequest) -> Result { + let quote = hex::decode(&request.quote).context("Failed to decode quote hex")?; + + // Event log is always JSON string + let event_log = request.event_log.as_bytes().to_vec(); + + let attestation = Attestation::new(quote, event_log) + .context("Failed to create attestation from quote and event log")?; + + let debug = request.debug.unwrap_or(false); + + let mut details = VerificationDetails { + quote_verified: false, + event_log_verified: false, + os_image_hash_verified: false, + report_data: None, + tcb_status: None, + advisory_ids: vec![], + app_info: None, + acpi_tables: None, + rtmr_debug: None, + }; + + let vm_config: VmConfig = + serde_json::from_str(&request.vm_config).context("Failed to decode VM config JSON")?; + + // Step 1: Verify the TDX quote using dcap-qvl + let verified_attestation = match self.verify_quote(attestation, &request.pccs_url).await { + Ok(att) => { + details.quote_verified = true; + details.tcb_status = Some(att.report.status.clone()); + details.advisory_ids = att.report.advisory_ids.clone(); + // Extract and store report_data + if let Ok(report_data) = att.decode_report_data() { + details.report_data = Some(hex::encode(report_data)); + } + att + } + Err(e) => { + return Ok(VerificationResponse { + is_valid: false, + details, + reason: Some(format!("Quote verification failed: {}", e)), + }); + } + }; + + // Step 3: Verify os-image-hash matches using dstack-mr + if let Err(e) = self + .verify_os_image_hash(&vm_config, &verified_attestation, debug, &mut details) + .await + { + return Ok(VerificationResponse { + is_valid: false, + details, + reason: Some(format!("OS image hash verification failed: {e:#}")), + }); + } + details.os_image_hash_verified = true; + match verified_attestation.decode_app_info(false) { + Ok(mut info) => { + info.os_image_hash = vm_config.os_image_hash; + details.event_log_verified = true; + details.app_info = Some(info); + } + Err(e) => { + return Ok(VerificationResponse { + is_valid: false, + details, + reason: Some(format!("Event log verification failed: {}", e)), + }); + } + }; + + Ok(VerificationResponse { + is_valid: true, + details, + reason: None, + }) + } + + async fn verify_quote( + &self, + attestation: Attestation, + pccs_url: &Option, + ) -> Result { + // Extract report data from quote + let report_data = attestation.decode_report_data()?; + + attestation + .verify(&report_data, pccs_url.as_deref()) + .await + .context("Quote verification failed") + } + + async fn verify_os_image_hash( + &self, + vm_config: &VmConfig, + attestation: &VerifiedAttestation, + debug: bool, + details: &mut VerificationDetails, + ) -> Result<()> { + let hex_os_image_hash = hex::encode(&vm_config.os_image_hash); + + // Get boot info from attestation + let report = attestation + .report + .report + .as_td10() + .context("Failed to decode TD report")?; + + // Extract the verified MRs from the report + let verified_mrs = Mrs { + mrtd: report.mr_td.to_vec(), + rtmr0: report.rt_mr0.to_vec(), + rtmr1: report.rt_mr1.to_vec(), + rtmr2: report.rt_mr2.to_vec(), + }; + + // Get image directory + let image_dir = Path::new(&self.image_cache_dir) + .join("images") + .join(&hex_os_image_hash); + + let metadata_path = image_dir.join("metadata.json"); + if !metadata_path.exists() { + info!("Image {} not found, downloading", hex_os_image_hash); + tokio::time::timeout( + self.download_timeout, + self.download_image(&hex_os_image_hash, &image_dir), + ) + .await + .context("Download image timeout")? + .with_context(|| format!("Failed to download image {hex_os_image_hash}"))?; + } + + let image_info = + fs_err::read_to_string(metadata_path).context("Failed to read image metadata")?; + let image_info: dstack_types::ImageInfo = + serde_json::from_str(&image_info).context("Failed to parse image metadata")?; + + let fw_path = image_dir.join(&image_info.bios); + let kernel_path = image_dir.join(&image_info.kernel); + let initrd_path = image_dir.join(&image_info.initrd); + let kernel_cmdline = image_info.cmdline + " initrd=initrd"; + + // Use dstack-mr to compute expected MRs + let (mrs, expected_logs) = if debug { + let TdxMeasurementDetails { + measurements, + rtmr_logs, + acpi_tables, + } = self + .compute_measurement_details( + vm_config, + &fw_path, + &kernel_path, + &initrd_path, + &kernel_cmdline, + ) + .context("Failed to compute expected measurements")?; + + details.acpi_tables = Some(AcpiTables { + tables: hex::encode(&acpi_tables.tables), + rsdp: hex::encode(&acpi_tables.rsdp), + loader: hex::encode(&acpi_tables.loader), + }); + + (measurements, Some(rtmr_logs)) + } else { + ( + self.load_or_compute_measurements( + vm_config, + &fw_path, + &kernel_path, + &initrd_path, + &kernel_cmdline, + ) + .context("Failed to obtain expected measurements")?, + None, + ) + }; + + let expected_mrs = Mrs { + mrtd: mrs.mrtd.clone(), + rtmr0: mrs.rtmr0.clone(), + rtmr1: mrs.rtmr1.clone(), + rtmr2: mrs.rtmr2.clone(), + }; + + let event_log: Vec = serde_json::from_slice(&attestation.raw_event_log) + .context("Failed to parse event log for mismatch analysis")?; + + let computation_result = replay_event_logs(&event_log) + .context("Failed to replay event logs for mismatch analysis")?; + + if computation_result.rtmrs[3] != report.rt_mr3 { + bail!("RTMR3 mismatch"); + } + + match expected_mrs.assert_eq(&verified_mrs) { + Ok(()) => Ok(()), + Err(e) => { + let result = Err(e).context("MRs do not match"); + if !debug { + return result; + } + let Some(expected_logs) = expected_logs.as_ref() else { + return result; + }; + let mut rtmr_debug = Vec::new(); + + if expected_mrs.rtmr0 != verified_mrs.rtmr0 { + rtmr_debug.push(collect_rtmr_mismatch( + "RTMR0", + &expected_mrs.rtmr0, + &verified_mrs.rtmr0, + &expected_logs[0], + &computation_result.event_indices[0], + &event_log, + )); + } + + if expected_mrs.rtmr1 != verified_mrs.rtmr1 { + rtmr_debug.push(collect_rtmr_mismatch( + "RTMR1", + &expected_mrs.rtmr1, + &verified_mrs.rtmr1, + &expected_logs[1], + &computation_result.event_indices[1], + &event_log, + )); + } + + if expected_mrs.rtmr2 != verified_mrs.rtmr2 { + rtmr_debug.push(collect_rtmr_mismatch( + "RTMR2", + &expected_mrs.rtmr2, + &verified_mrs.rtmr2, + &expected_logs[2], + &computation_result.event_indices[2], + &event_log, + )); + } + + if !rtmr_debug.is_empty() { + details.rtmr_debug = Some(rtmr_debug); + } + + result + } + } + } + + async fn download_image(&self, hex_os_image_hash: &str, dst_dir: &Path) -> Result<()> { + let url = self + .download_url + .replace("{OS_IMAGE_HASH}", hex_os_image_hash); + + // Create a temporary directory for extraction within the cache directory + let cache_dir = Path::new(&self.image_cache_dir).join("images").join("tmp"); + fs_err::create_dir_all(&cache_dir).context("Failed to create cache directory")?; + let auto_delete_temp_dir = tempfile::Builder::new() + .prefix("tmp-download-") + .tempdir_in(&cache_dir) + .context("Failed to create temporary directory")?; + let tmp_dir = auto_delete_temp_dir.path(); + + info!("Downloading image from {}", url); + let client = reqwest::Client::new(); + let response = client + .get(&url) + .send() + .await + .context("Failed to download image")?; + + if !response.status().is_success() { + bail!( + "Failed to download image: HTTP status {}, url: {url}", + response.status(), + ); + } + + // Save the tarball to a temporary file using streaming + let tarball_path = tmp_dir.join("image.tar.gz"); + let mut file = tokio::fs::File::create(&tarball_path) + .await + .context("Failed to create tarball file")?; + let mut response = response; + while let Some(chunk) = response.chunk().await? { + file.write_all(&chunk) + .await + .context("Failed to write chunk to file")?; + } + + let extracted_dir = tmp_dir.join("extracted"); + fs_err::create_dir_all(&extracted_dir).context("Failed to create extraction directory")?; + + // Extract the tarball + let output = Command::new("tar") + .arg("xzf") + .arg(&tarball_path) + .current_dir(&extracted_dir) + .output() + .await + .context("Failed to extract tarball")?; + + if !output.status.success() { + bail!( + "Failed to extract tarball: {}", + String::from_utf8_lossy(&output.stderr) + ); + } + + // Verify checksum + let output = Command::new("sha256sum") + .arg("-c") + .arg("sha256sum.txt") + .current_dir(&extracted_dir) + .output() + .await + .context("Failed to verify checksum")?; + + if !output.status.success() { + bail!( + "Checksum verification failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + } + + // Remove the files that are not listed in sha256sum.txt + let sha256sum_path = extracted_dir.join("sha256sum.txt"); + let files_doc = + fs_err::read_to_string(&sha256sum_path).context("Failed to read sha256sum.txt")?; + let listed_files: Vec<&OsStr> = files_doc + .lines() + .flat_map(|line| line.split_whitespace().nth(1)) + .map(|s| s.as_ref()) + .collect(); + let files = fs_err::read_dir(&extracted_dir).context("Failed to read directory")?; + for file in files { + let file = file.context("Failed to read directory entry")?; + let filename = file.file_name(); + if !listed_files.contains(&filename.as_os_str()) { + if file.path().is_dir() { + fs_err::remove_dir_all(file.path()).context("Failed to remove directory")?; + } else { + fs_err::remove_file(file.path()).context("Failed to remove file")?; + } + } + } + + // os_image_hash should eq to sha256sum of the sha256sum.txt + let os_image_hash = Sha256::new_with_prefix(files_doc.as_bytes()).finalize(); + if hex::encode(os_image_hash) != hex_os_image_hash { + bail!("os_image_hash does not match sha256sum of the sha256sum.txt"); + } + + // Move the extracted files to the destination directory + let metadata_path = extracted_dir.join("metadata.json"); + if !metadata_path.exists() { + bail!("metadata.json not found in the extracted archive"); + } + + if dst_dir.exists() { + fs_err::remove_dir_all(dst_dir).context("Failed to remove destination directory")?; + } + let dst_dir_parent = dst_dir.parent().context("Failed to get parent directory")?; + fs_err::create_dir_all(dst_dir_parent).context("Failed to create parent directory")?; + // Move the extracted files to the destination directory + fs_err::rename(extracted_dir, dst_dir) + .context("Failed to move extracted files to destination directory")?; + Ok(()) + } +} + +#[derive(Debug, Clone)] +struct Mrs { + mrtd: Vec, + rtmr0: Vec, + rtmr1: Vec, + rtmr2: Vec, +} + +impl Mrs { + fn assert_eq(&self, other: &Self) -> Result<()> { + if self.mrtd != other.mrtd { + bail!( + "MRTD mismatch: expected={}, actual={}", + hex::encode(&self.mrtd), + hex::encode(&other.mrtd) + ); + } + if self.rtmr0 != other.rtmr0 { + bail!( + "RTMR0 mismatch: expected={}, actual={}", + hex::encode(&self.rtmr0), + hex::encode(&other.rtmr0) + ); + } + if self.rtmr1 != other.rtmr1 { + bail!( + "RTMR1 mismatch: expected={}, actual={}", + hex::encode(&self.rtmr1), + hex::encode(&other.rtmr1) + ); + } + if self.rtmr2 != other.rtmr2 { + bail!( + "RTMR2 mismatch: expected={}, actual={}", + hex::encode(&self.rtmr2), + hex::encode(&other.rtmr2) + ); + } + Ok(()) + } +} + +mod upgrade_authority { + use serde::{Deserialize, Serialize}; + + #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] + pub struct BootInfo { + pub mrtd: Vec, + pub rtmr0: Vec, + pub rtmr1: Vec, + pub rtmr2: Vec, + pub rtmr3: Vec, + pub mr_aggregated: Vec, + pub os_image_hash: Vec, + pub mr_system: Vec, + pub app_id: Vec, + pub compose_hash: Vec, + pub instance_id: Vec, + pub device_id: Vec, + pub key_provider_info: Vec, + pub event_log: String, + pub tcb_status: String, + pub advisory_ids: Vec, + } +} diff --git a/verifier/test.sh b/verifier/test.sh new file mode 100755 index 00000000..4f9554cf --- /dev/null +++ b/verifier/test.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +# SPDX-FileCopyrightText: © 2024-2025 Phala Network +# +# SPDX-License-Identifier: Apache-2.0 + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +BINARY="$PROJECT_ROOT/target/debug/dstack-verifier" +LOG_FILE="/tmp/verifier-test.log" +FIXTURE_FILE="$SCRIPT_DIR/fixtures/quote-report.json" + +echo -e "${YELLOW}dstack-verifier Test Script${NC}" +echo "==================================" + +# Function to cleanup on exit +cleanup() { + echo -e "\n${YELLOW}Cleaning up...${NC}" + pkill -f dstack-verifier 2>/dev/null || true + sleep 1 +} +trap cleanup EXIT + +# Build the project +echo -e "${YELLOW}Building dstack-verifier...${NC}" +cd "$PROJECT_ROOT" +cargo build --bin dstack-verifier --quiet + +if [ ! -f "$BINARY" ]; then + echo -e "${RED}Error: Binary not found at $BINARY${NC}" + exit 1 +fi + +# Start the server +echo -e "${YELLOW}Starting dstack-verifier server...${NC}" +"$BINARY" >"$LOG_FILE" 2>&1 & +SERVER_PID=$! + +# Wait for server to start +echo -e "${YELLOW}Waiting for server to start...${NC}" +for i in {1..10}; do + if curl -s http://localhost:8080/health >/dev/null 2>&1; then + echo -e "${GREEN}Server started successfully${NC}" + break + fi + if [ $i -eq 10 ]; then + echo -e "${RED}Server failed to start${NC}" + echo "Server logs:" + cat "$LOG_FILE" + exit 1 + fi + sleep 1 +done + +# Check if fixture file exists +if [ ! -f "$FIXTURE_FILE" ]; then + echo -e "${RED}Error: Fixture file not found at $FIXTURE_FILE${NC}" + exit 1 +fi + +# Run the verification test +echo -e "${YELLOW}Running verification test...${NC}" +echo "Using fixture: $FIXTURE_FILE" + +RESPONSE=$(curl -s -X POST http://localhost:8080/verify \ + -H "Content-Type: application/json" \ + -d @"$FIXTURE_FILE") + +# Parse and display results +echo -e "\n${YELLOW}Test Results:${NC}" +echo "=============" + +IS_VALID=$(echo "$RESPONSE" | jq -r '.is_valid') +QUOTE_VERIFIED=$(echo "$RESPONSE" | jq -r '.details.quote_verified') +EVENT_LOG_VERIFIED=$(echo "$RESPONSE" | jq -r '.details.event_log_verified') +OS_IMAGE_VERIFIED=$(echo "$RESPONSE" | jq -r '.details.os_image_hash_verified') +TCB_STATUS=$(echo "$RESPONSE" | jq -r '.details.tcb_status') +REASON=$(echo "$RESPONSE" | jq -r '.reason // "null"') + +echo -e "Overall Valid: $([ "$IS_VALID" = "true" ] && echo -e "${GREEN}✓${NC}" || echo -e "${RED}✗${NC}") $IS_VALID" +echo -e "Quote Verified: $([ "$QUOTE_VERIFIED" = "true" ] && echo -e "${GREEN}✓${NC}" || echo -e "${RED}✗${NC}") $QUOTE_VERIFIED" +echo -e "Event Log Verified: $([ "$EVENT_LOG_VERIFIED" = "true" ] && echo -e "${GREEN}✓${NC}" || echo -e "${RED}✗${NC}") $EVENT_LOG_VERIFIED" +echo -e "OS Image Verified: $([ "$OS_IMAGE_VERIFIED" = "true" ] && echo -e "${GREEN}✓${NC}" || echo -e "${RED}✗${NC}") $OS_IMAGE_VERIFIED" +echo -e "TCB Status: ${GREEN}$TCB_STATUS${NC}" + +if [ "$REASON" != "null" ]; then + echo -e "${RED}Failure Reason:${NC}" + echo "$REASON" +fi + +# Show app info if available +APP_ID=$(echo "$RESPONSE" | jq -r '.details.app_info.app_id // "null"') +OS_IMAGE_HASH=$(echo "$RESPONSE" | jq -r '.details.app_info.os_image_hash // "null"') +if [ "$APP_ID" != "null" ]; then + echo -e "\n${YELLOW}App Information:${NC}" + echo "App ID: $APP_ID" + echo "Compose Hash: $(echo "$RESPONSE" | jq -r '.details.app_info.compose_hash')" + echo "OS Image Hash: $OS_IMAGE_HASH" +fi + +# Show report data +REPORT_DATA=$(echo "$RESPONSE" | jq -r '.details.report_data // "null"') +if [ "$REPORT_DATA" != "null" ]; then + echo -e "\n${YELLOW}Report Data:${NC}" + echo "$REPORT_DATA" +fi + +echo -e "\n${YELLOW}Server Logs:${NC}" +echo "============" +tail -10 "$LOG_FILE" + +echo -e "\n${YELLOW}Test completed!${NC}" +if [ "$IS_VALID" = "true" ]; then + echo -e "${GREEN}✓ Verification PASSED${NC}" + exit 0 +else + echo -e "${RED}✗ Verification FAILED${NC}" + exit 1 +fi diff --git a/vmm/Cargo.toml b/vmm/Cargo.toml index ccdeff85..84cb5ff6 100644 --- a/vmm/Cargo.toml +++ b/vmm/Cargo.toml @@ -32,9 +32,10 @@ tailf.workspace = true tokio = { workspace = true, features = ["full"] } git-version.workspace = true rocket-apitoken.workspace = true +serde_ini.workspace = true supervisor-client.workspace = true -ra-rpc = { workspace = true, features = ["client", "rocket"] } +ra-rpc = { workspace = true, features = ["client", "rocket", "openapi"] } dstack-vmm-rpc.workspace = true dstack-kms-rpc.workspace = true path-absolutize.workspace = true @@ -48,6 +49,11 @@ hex_fmt.workspace = true lspci.workspace = true base64.workspace = true serde-human-bytes.workspace = true +size-parser = { workspace = true, features = ["serde"] } +or-panic.workspace = true [dev-dependencies] insta.workspace = true + +[build-dependencies] +or-panic.workspace = true diff --git a/vmm/rpc/build.rs b/vmm/rpc/build.rs index 77e6a9e8..fe19530a 100644 --- a/vmm/rpc/build.rs +++ b/vmm/rpc/build.rs @@ -2,9 +2,11 @@ // // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::expect_used)] + fn main() { prpc_build::configure() - .out_dir(std::env::var_os("OUT_DIR").unwrap()) + .out_dir(std::env::var_os("OUT_DIR").expect("OUT_DIR not set")) .mod_prefix("super::") .build_scale_ext(false) .disable_package_emission() diff --git a/vmm/rpc/proto/prpc.proto b/vmm/rpc/proto/prpc.proto new file mode 100644 index 00000000..cd5a5d47 --- /dev/null +++ b/vmm/rpc/proto/prpc.proto @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package prpc; + +/// RPC error payload returned by prpc endpoints. +message PrpcError { + string message = 1; +} + diff --git a/vmm/rpc/proto/vmm_rpc.proto b/vmm/rpc/proto/vmm_rpc.proto index 64de42e8..51c3c3dd 100644 --- a/vmm/rpc/proto/vmm_rpc.proto +++ b/vmm/rpc/proto/vmm_rpc.proto @@ -7,7 +7,7 @@ syntax = "proto3"; import "google/protobuf/empty.proto"; package vmm; -// Information about a Virtual Machine (VM) +// Aggregates runtime and provisioning state for a single CVM. message VmInfo { // Unique identifier for the VM string id = 1; @@ -35,18 +35,32 @@ message VmInfo { string shutdown_progress = 12; // Image version string image_version = 13; + // Events + repeated GuestEvent events = 14; } +// Structured log or lifecycle event emitted by the guest or runtime. +message GuestEvent { + // Event type, e.g. "boot_progress" or "shutdown". + string event = 1; + // Human readable payload describing the event. + string body = 2; + // Timestamp in milliseconds since Unix epoch + uint64 timestamp = 3; +} + +// Wrapper around a VM identifier to keep RPC signatures consistent. message Id { // Unique identifier for the VM string id = 1; } +// Stable hash of a compose spec used to detect drift between client and VMM. message ComposeHash { string hash = 1; } -// Message for creating a VM request +// Full desired state for provisioning or re-provisioning a CVM instance. message VmConfiguration { // Name of the VM string name = 1; @@ -83,8 +97,11 @@ message VmConfiguration { repeated string gateway_urls = 15; // The VM is stopped bool stopped = 16; + // Disable confidential computing (fallback to non-TEE VM). + bool no_tee = 17; } +// Requested GPU layout for a CVM. message GpuConfig { // GPUs repeated GpuSpec gpus = 1; @@ -92,11 +109,12 @@ message GpuConfig { string attach_mode = 2; } +// Identifies a physical GPU on the host. message GpuSpec { string slot = 1; } -// Message for port mapping +// Describes how host ports are forwarded into the CVM network namespace. message PortMapping { // Protocol string protocol = 1; @@ -108,8 +126,8 @@ message PortMapping { string host_address = 4; } -// Message for upgrading an app request -message UpgradeAppRequest { +// Partial configuration used when mutating an existing VM. +message UpdateVmRequest { // ID of the VM string id = 1; // Compose file to be used for the VM @@ -122,11 +140,24 @@ message UpgradeAppRequest { bool update_ports = 5; // Port mapping repeated PortMapping ports = 7; + // Optional update KMS URLs. + bool update_kms_urls = 8; + repeated string kms_urls = 9; + // Optional update gateway URLs. + bool update_gateway_urls = 10; + repeated string gateway_urls = 11; // gpus GpuConfig gpus = 13; + // Optional compute resource updates. Leave unset to keep current configuration. + optional uint32 vcpu = 14; + optional uint32 memory = 15; + optional uint32 disk_size = 16; + optional string image = 17; + // Disable or re-enable TEE for an existing VM. + optional bool no_tee = 18; } -// Message for Status request +// Parameters for listing CVMs with pagination and keyword filtering. message StatusRequest { // List of VM IDs repeated string ids = 1; @@ -140,6 +171,7 @@ message StatusRequest { uint32 page_size = 5; } +// Summary of the current fleet status. message StatusResponse { // List of VMs repeated VmInfo vms = 1; @@ -149,10 +181,12 @@ message StatusResponse { uint32 total = 3; } +// Available base images for the VMM to launch. message ImageListResponse { repeated ImageInfo images = 1; } +// Metadata describing an OCI image published to the cluster. message ImageInfo { string name = 1; string description = 2; @@ -160,20 +194,24 @@ message ImageInfo { bool is_dev = 4; } +// App identifier used for encrypting env vars and routing telemetry. message AppId { bytes app_id = 1; } +// Response with the KMS-backed public key used for env encryption plus audit signature. message PublicKeyResponse { bytes public_key = 1; bytes signature = 2; } +// Optional VM info payload returned by GetInfo. message GetInfoResponse { bool found = 1; optional VmInfo info = 2; } +// Input used when resizing compute or storage for a VM. message ResizeVmRequest { // Unique identifier for the VM string id = 1; @@ -187,11 +225,13 @@ message ResizeVmRequest { optional string image = 5; } +// Tunables for the KMS integration. message KmsSettings { string url = 1; repeated string urls = 2; } +// Details about the HTTP gateway that fronts the CVMs. message GatewaySettings { string url = 1; string base_domain = 2; @@ -200,28 +240,40 @@ message GatewaySettings { repeated string urls = 5; } +// Capacity caps enforced by the scheduler. message ResourcesSettings { uint32 max_cvm_number = 1; // equals to the cid pool size. uint32 max_allocable_vcpu = 2; uint32 max_allocable_memory_in_mb = 3; // in MB. } +// Aggregated metadata exposed through GetMeta. message GetMetaResponse { KmsSettings kms = 1; GatewaySettings gateway = 2; ResourcesSettings resources = 3; } +// Build information for the running VMM binary. message VersionResponse { string version = 1; string rev = 2; } +// Available GPUs and policy flags. message ListGpusResponse { repeated GpuInfo gpus = 1; bool allow_attach_all = 2; } +// Counts applied when reloading persistent VM state from disk. +message ReloadVmsResponse { + uint32 loaded = 1; // Number of VMs that were loaded + uint32 updated = 2; // Number of VMs that were updated + uint32 removed = 3; // Number of VMs that were removed +} + +// Metadata for a single GPU discovered on the host. message GpuInfo { string slot = 1; string product_id = 2; @@ -229,7 +281,7 @@ message GpuInfo { bool is_free = 4; } -// Service definition for dstack-vmm +// Exposes lifecycle and metadata management RPCs for dstack-vmm. service Vmm { // RPC to create a VM rpc CreateVm(VmConfiguration) returns (Id); @@ -239,11 +291,13 @@ service Vmm { rpc StopVm(Id) returns (google.protobuf.Empty); // RPC to remove a VM rpc RemoveVm(Id) returns (google.protobuf.Empty); - // RPC to upgrade an app - rpc UpgradeApp(UpgradeAppRequest) returns (Id); + // RPC to upgrade an app. Deprecated, use UpdateVm instead. + rpc UpgradeApp(UpdateVmRequest) returns (Id); + // RPC to update a VM + rpc UpdateVm(UpdateVmRequest) returns (Id); // Shutdown a VM rpc ShutdownVm(Id) returns (google.protobuf.Empty); - // RPC to resize a VM + // RPC to resize a VM. Deprecated, use UpdateVm instead. rpc ResizeVm(ResizeVmRequest) returns (google.protobuf.Empty); // RPC to compute the compose hash, it's helpful for debugging & developing SDK. rpc GetComposeHash(VmConfiguration) returns (ComposeHash); @@ -267,4 +321,7 @@ service Vmm { // List GPUs rpc ListGpus(google.protobuf.Empty) returns (ListGpusResponse); + + // Reload VMs directory and sync with memory state + rpc ReloadVms(google.protobuf.Empty) returns (ReloadVmsResponse); } diff --git a/vmm/rpc/src/generated.rs b/vmm/rpc/src/generated.rs index a6e9a646..8cf98c39 100644 --- a/vmm/rpc/src/generated.rs +++ b/vmm/rpc/src/generated.rs @@ -1,3 +1,6 @@ #![allow(async_fn_in_trait)] +pub const FILE_DESCRIPTOR_SET: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + include!(concat!(env!("OUT_DIR"), "/vmm.rs")); diff --git a/vmm/rpc/src/lib.rs b/vmm/rpc/src/lib.rs index 089da7c6..4780ac98 100644 --- a/vmm/rpc/src/lib.rs +++ b/vmm/rpc/src/lib.rs @@ -7,3 +7,12 @@ extern crate alloc; pub use generated::*; mod generated; + +impl GpuConfig { + pub fn is_empty(&self) -> bool { + if self.attach_mode == "all" { + return false; + } + self.gpus.is_empty() + } +} diff --git a/vmm/src/app.rs b/vmm/src/app.rs index 133f823a..289ddf7a 100644 --- a/vmm/src/app.rs +++ b/vmm/src/app.rs @@ -8,21 +8,25 @@ use anyhow::{bail, Context, Result}; use bon::Builder; use dstack_kms_rpc::kms_client::KmsClient; use dstack_types::shared_filenames::{ - compat_v3, APP_COMPOSE, ENCRYPTED_ENV, INSTANCE_INFO, SYS_CONFIG, USER_CONFIG, + APP_COMPOSE, ENCRYPTED_ENV, INSTANCE_INFO, SYS_CONFIG, USER_CONFIG, +}; +use dstack_vmm_rpc::{ + self as pb, GpuInfo, ReloadVmsResponse, StatusRequest, StatusResponse, VmConfiguration, }; -use dstack_vmm_rpc::{self as pb, GpuInfo, StatusRequest, StatusResponse, VmConfiguration}; use fs_err as fs; use guest_api::client::DefaultClient as GuestClient; use id_pool::IdPool; +use or_panic::ResultOrPanic; use ra_rpc::client::RaClient; use serde::{Deserialize, Serialize}; use serde_json::json; -use std::collections::{BTreeSet, HashMap}; +use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; use std::net::IpAddr; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex, MutexGuard}; +use std::time::SystemTime; use supervisor_client::SupervisorClient; -use tracing::{error, info}; +use tracing::{error, info, warn}; pub use image::{Image, ImageInfo}; pub use qemu::{VmConfig, VmWorkDir}; @@ -60,6 +64,8 @@ pub struct Manifest { pub kms_urls: Vec, #[serde(default)] pub gateway_urls: Vec, + #[serde(default)] + pub no_tee: bool, } #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -118,7 +124,7 @@ pub struct App { impl App { fn lock(&self) -> MutexGuard { - self.state.lock().unwrap() + self.state.lock().or_panic("mutex poisoned") } pub(crate) fn vm_dir(&self) -> PathBuf { @@ -325,6 +331,204 @@ impl App { Ok(()) } + /// Reload VMs directory and sync with memory state while preserving statistics + pub async fn reload_vms_sync(&self) -> Result { + let vm_path = self.vm_dir(); + let mut loaded = 0u32; + let mut updated = 0u32; + let mut removed = 0u32; + + // Get running VMs to preserve CIDs and process info + let running_vms = self.supervisor.list().await.context("Failed to list VMs")?; + let running_vms_map: HashMap = running_vms + .into_iter() + .map(|p| (p.config.id.clone(), p)) + .collect(); + let occupied_cids = running_vms_map + .iter() + .filter(|(_, p)| { + serde_json::from_str::(&p.config.note) + .unwrap_or_default() + .is_cvm() + }) + .flat_map(|(id, p)| p.config.cid.map(|cid| (id.clone(), cid))) + .collect::>(); + + // Update CID pool with running VMs + { + let mut state = self.lock(); + // First clear the pool and re-occupy running VM CIDs + state.cid_pool.clear(); + for cid in occupied_cids.values() { + state.cid_pool.occupy(*cid)?; + } + } + + // Get VM IDs from filesystem + let mut fs_vm_ids = HashSet::new(); + if vm_path.exists() { + for entry in fs::read_dir(&vm_path).context("Failed to read VM directory")? { + let entry = entry.context("Failed to read directory entry")?; + let vm_dir_path = entry.path(); + if vm_dir_path.is_dir() { + // Try to get VM ID from directory name or manifest + if let Some(vm_id) = vm_dir_path.file_name().and_then(|n| n.to_str()) { + fs_vm_ids.insert(vm_id.to_string()); + } + } + } + } + + // Get VM IDs currently in memory and their CIDs + let (memory_vm_ids, existing_cids): (HashSet, HashSet) = { + let state = self.lock(); + ( + state.vms.keys().cloned().collect(), + state.vms.values().map(|vm| vm.config.cid).collect(), + ) + }; + + // Remove VMs that no longer exist in filesystem + let to_remove: Vec = memory_vm_ids.difference(&fs_vm_ids).cloned().collect(); + if !to_remove.is_empty() { + for vm_id in &to_remove { + // Stop the VM process first if it's running + if running_vms_map.contains_key(vm_id) { + if let Err(err) = self.supervisor.stop(vm_id).await { + warn!("Failed to stop VM process {vm_id}: {err:?}"); + } + } + + // Remove from memory and free CID + let mut state = self.lock(); + if let Some(vm) = state.vms.remove(vm_id) { + state.cid_pool.free(vm.config.cid); + removed += 1; + info!("Removed VM {vm_id} from memory (directory no longer exists)"); + } + } + } + + // Load or update VMs from filesystem + if vm_path.exists() { + for entry in fs::read_dir(vm_path).context("Failed to read VM directory")? { + let entry = entry.context("Failed to read directory entry")?; + let vm_path = entry.path(); + if vm_path.is_dir() { + match self.load_or_update_vm(&vm_path, &occupied_cids, true).await { + Ok(is_new) => { + if is_new { + loaded += 1; + } else { + updated += 1; + } + } + Err(err) => { + error!("Failed to load or update VM: {err:?}"); + } + } + } + } + } + + // Clean up any orphaned CIDs that aren't being used + { + let mut state = self.lock(); + let used_cids: HashSet = state.vms.values().map(|vm| vm.config.cid).collect(); + let orphaned_cids: Vec = existing_cids.difference(&used_cids).cloned().collect(); + for cid in orphaned_cids { + state.cid_pool.free(cid); + info!("Released orphaned CID {cid}"); + } + } + + Ok(ReloadVmsResponse { + loaded, + updated, + removed, + }) + } + + /// Load or update a VM, preserving existing statistics + async fn load_or_update_vm( + &self, + work_dir: impl AsRef, + cids_assigned: &HashMap, + auto_start: bool, + ) -> Result { + let vm_work_dir = VmWorkDir::new(work_dir.as_ref()); + let manifest = vm_work_dir.manifest().context("Failed to read manifest")?; + if manifest.image.len() > 64 + || manifest.image.contains("..") + || !manifest + .image + .chars() + .all(|c| c.is_alphanumeric() || c == '_' || c == '-' || c == '.') + { + bail!("Invalid image name"); + } + let image_path = self.config.image_path.join(&manifest.image); + let image = Image::load(&image_path).context("Failed to load image")?; + let vm_id = manifest.id.clone(); + let already_running = cids_assigned.contains_key(&vm_id); + let app_compose = vm_work_dir + .app_compose() + .context("Failed to read compose file")?; + + let mut is_new = false; + { + let mut states = self.lock(); + + // For existing VMs, keep their current CID + // For new VMs, try to use assigned CID or allocate a new one + let cid = if let Some(existing_vm) = states.get(&vm_id) { + // Keep existing CID + existing_vm.config.cid + } else if let Some(assigned_cid) = cids_assigned.get(&vm_id) { + // Use assigned CID from running processes + *assigned_cid + } else { + // Allocate new CID only for truly new VMs + states.cid_pool.allocate().context("CID pool exhausted")? + }; + + let vm_config = VmConfig { + manifest, + image, + cid, + workdir: vm_work_dir.path().to_path_buf(), + gateway_enabled: app_compose.gateway_enabled(), + }; + + match states.get_mut(&vm_id) { + Some(vm) => { + // Update existing VM but preserve statistics and CID + let old_state = vm.state.clone(); + vm.config = vm_config.into(); + vm.state = old_state; // Preserve the existing state with statistics + } + None => { + // This is a new VM, need to occupy its CID if it wasn't allocated + if !cids_assigned.contains_key(&vm_id) { + states.cid_pool.occupy(cid)?; + } + states.add(VmState::new(vm_config)); + is_new = true; + } + } + }; + + if auto_start && vm_work_dir.started().unwrap_or_default() { + if already_running { + info!("Skipping, {vm_id} is already running"); + } else { + self.start_vm(&vm_id).await?; + } + } + + Ok(is_new) + } + pub async fn list_vms(&self, request: StatusRequest) -> Result { let vms = self .supervisor @@ -411,6 +615,17 @@ impl App { let Some(vm) = state.vms.values_mut().find(|vm| vm.config.cid == cid) else { bail!("VM not found"); }; + vm.state.events.push_back(pb::GuestEvent { + event: event.into(), + body: body.clone(), + timestamp: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64, + }); + while vm.state.events.len() > self.config.event_buffer_size { + vm.state.events.pop_front(); + } match event { "boot.progress" => { vm.state.boot_progress = body; @@ -489,117 +704,9 @@ impl App { let shared_dir = self.shared_dir(id); let manifest = work_dir.manifest().context("Failed to read manifest")?; let cfg = &self.config; - let image_path = cfg.image_path.join(&manifest.image); - let image = Image::load(image_path).context("Failed to load image info")?; - let img_ver = image.info.version_tuple().unwrap_or((0, 0, 0)); - let kms_urls = if manifest.kms_urls.is_empty() { - cfg.cvm.kms_urls.clone() - } else { - manifest.kms_urls.clone() - }; - let gateway_urls = if manifest.gateway_urls.is_empty() { - cfg.cvm.gateway_urls.clone() - } else { - manifest.gateway_urls.clone() - }; - let sys_config = if img_ver >= (0, 5, 0) { - let os_image_hash = hex::decode(image.digest.unwrap_or_default()) - .context("Failed to decode image digest")?; - let gpus = manifest.gpus.unwrap_or_default(); - let vm_config = serde_json::to_string(&dstack_types::VmConfig { - spec_version: 1, - os_image_hash, - cpu_count: manifest.vcpu, - memory_size: manifest.memory as u64 * 1024 * 1024, - qemu_single_pass_add_pages: cfg.cvm.qemu_single_pass_add_pages, - pic: cfg.cvm.qemu_pic, - pci_hole64_size: cfg.cvm.qemu_pci_hole64_size, - hugepages: manifest.hugepages, - num_gpus: gpus.gpus.len() as u32, - num_nvswitches: gpus.bridges.len() as u32, - hotplug_off: cfg.cvm.qemu_hotplug_off, - })?; - json!({ - "kms_urls": kms_urls, - "gateway_urls": gateway_urls, - "pccs_url": cfg.cvm.pccs_url, - "docker_registry": cfg.cvm.docker_registry, - "host_api_url": format!("vsock://2:{}/api", cfg.host_api.port), - "vm_config": vm_config, - }) - } else if img_ver >= (0, 4, 2) { - json!({ - "kms_urls": kms_urls, - "gateway_urls": gateway_urls, - "pccs_url": cfg.cvm.pccs_url, - "docker_registry": cfg.cvm.docker_registry, - "host_api_url": format!("vsock://2:{}/api", cfg.host_api.port), - }) - } else if img_ver >= (0, 4, 0) { - let rootfs_hash = image - .info - .rootfs_hash - .as_ref() - .context("Rootfs hash not found in image info")?; - json!({ - "rootfs_hash": rootfs_hash, - "kms_urls": kms_urls, - "tproxy_urls": gateway_urls, - "pccs_url": cfg.cvm.pccs_url, - "docker_registry": cfg.cvm.docker_registry, - "host_api_url": format!("vsock://2:{}/api", cfg.host_api.port), - }) - } else { - let rootfs_hash = image - .info - .rootfs_hash - .as_ref() - .context("Rootfs hash not found in image info")?; - json!({ - "rootfs_hash": rootfs_hash, - "kms_url": kms_urls.first(), - "tproxy_url": gateway_urls.first(), - "pccs_url": cfg.cvm.pccs_url, - "docker_registry": cfg.cvm.docker_registry, - "host_api_url": format!("vsock://2:{}/api", cfg.host_api.port), - }) - }; - let sys_config_str = - serde_json::to_string(&sys_config).context("Failed to serialize vm config")?; - let config_file = if img_ver >= (0, 4, 0) { - SYS_CONFIG - } else { - compat_v3::SYS_CONFIG - }; - fs::write(shared_dir.join(config_file), sys_config_str) + let sys_config_str = make_sys_config(cfg, &manifest)?; + fs::write(shared_dir.join(SYS_CONFIG), sys_config_str) .context("Failed to write vm config")?; - if img_ver < (0, 4, 0) { - // Sync .encrypted-env to encrypted-env - let compat_encrypted_env_path = shared_dir.join(compat_v3::ENCRYPTED_ENV); - let encrypted_env_path = shared_dir.join(ENCRYPTED_ENV); - if compat_encrypted_env_path.exists() { - fs::remove_file(&compat_encrypted_env_path)?; - } - if encrypted_env_path.exists() { - fs::copy(&encrypted_env_path, &compat_encrypted_env_path)?; - } - - // Sync certs - let certs_dir = shared_dir.join("certs"); - fs::create_dir_all(&certs_dir).context("Failed to create certs directory")?; - if cfg.cvm.ca_cert.is_empty() - || cfg.cvm.tmp_ca_cert.is_empty() - || cfg.cvm.tmp_ca_key.is_empty() - { - bail!("Certificates are required for older images"); - } - fs::copy(&cfg.cvm.ca_cert, certs_dir.join("ca.cert")) - .context("Failed to copy ca cert")?; - fs::copy(&cfg.cvm.tmp_ca_cert, certs_dir.join("tmp-ca.cert")) - .context("Failed to copy tmp ca cert")?; - fs::copy(&cfg.cvm.tmp_ca_key, certs_dir.join("tmp-ca.key")) - .context("Failed to copy tmp ca key")?; - } Ok(()) } @@ -674,6 +781,61 @@ impl App { } } +pub(crate) fn make_sys_config(cfg: &Config, manifest: &Manifest) -> Result { + let image_path = cfg.image_path.join(&manifest.image); + let image = Image::load(image_path).context("Failed to load image info")?; + let img_ver = image.info.version_tuple().unwrap_or((0, 0, 0)); + let kms_urls = if manifest.kms_urls.is_empty() { + cfg.cvm.kms_urls.clone() + } else { + manifest.kms_urls.clone() + }; + let gateway_urls = if manifest.gateway_urls.is_empty() { + cfg.cvm.gateway_urls.clone() + } else { + manifest.gateway_urls.clone() + }; + if img_ver < (0, 5, 0) { + bail!("Unsupported image version: {img_ver:?}"); + } + + let sys_config = json!({ + "kms_urls": kms_urls, + "gateway_urls": gateway_urls, + "pccs_url": cfg.cvm.pccs_url, + "docker_registry": cfg.cvm.docker_registry, + "host_api_url": format!("vsock://2:{}/api", cfg.host_api.port), + "vm_config": serde_json::to_string(&make_vm_config(cfg, manifest, &image))?, + }); + let sys_config_str = + serde_json::to_string(&sys_config).context("Failed to serialize vm config")?; + Ok(sys_config_str) +} + +fn make_vm_config(cfg: &Config, manifest: &Manifest, image: &Image) -> dstack_types::VmConfig { + let os_image_hash = image + .digest + .as_ref() + .and_then(|d| hex::decode(d).ok()) + .unwrap_or_default(); + let gpus = manifest.gpus.clone().unwrap_or_default(); + dstack_types::VmConfig { + spec_version: 1, + os_image_hash, + cpu_count: manifest.vcpu, + memory_size: manifest.memory as u64 * 1024 * 1024, + qemu_single_pass_add_pages: cfg.cvm.qemu_single_pass_add_pages, + pic: cfg.cvm.qemu_pic, + qemu_version: cfg.cvm.qemu_version.clone(), + pci_hole64_size: cfg.cvm.qemu_pci_hole64_size, + hugepages: manifest.hugepages, + num_gpus: gpus.gpus.len() as u32, + num_nvswitches: gpus.bridges.len() as u32, + hotplug_off: cfg.cvm.qemu_hotplug_off, + image: Some(manifest.image.clone()), + } +} + fn paginate(items: Vec, page: u32, page_size: u32) -> impl Iterator { let skip; let take; @@ -701,6 +863,7 @@ struct VmStateMut { boot_error: String, shutdown_progress: String, devices: GpuConfig, + events: VecDeque, } impl VmStateMut { diff --git a/vmm/src/app/id_pool.rs b/vmm/src/app/id_pool.rs index a111a16f..5bbb205e 100644 --- a/vmm/src/app/id_pool.rs +++ b/vmm/src/app/id_pool.rs @@ -62,4 +62,8 @@ impl IdPool { pub fn free(&mut self, id: T) { self.allocated.remove(&id); } + + pub fn clear(&mut self) { + self.allocated.clear(); + } } diff --git a/vmm/src/app/qemu.rs b/vmm/src/app/qemu.rs index b67bd8d0..f2ab2f5f 100644 --- a/vmm/src/app/qemu.rs +++ b/vmm/src/app/qemu.rs @@ -51,6 +51,7 @@ pub struct VmInfo { pub shutdown_progress: String, pub image_version: String, pub gateway_enabled: bool, + pub events: Vec, } #[derive(Debug, Builder)] @@ -116,6 +117,10 @@ impl VmInfo { .as_ref() .map(|c| c.gateway_urls.clone()) .unwrap_or_default(); + let no_tee = vm_config + .as_ref() + .map(|c| c.no_tee) + .unwrap_or(self.manifest.no_tee); let stopped = !workdir.started().unwrap_or(false); Some(pb::VmConfiguration { @@ -158,6 +163,7 @@ impl VmInfo { kms_urls, gateway_urls, stopped, + no_tee, }) }, app_url: self @@ -165,14 +171,19 @@ impl VmInfo { .then_some(self.instance_id.as_ref()) .flatten() .map(|id| { - format!( - "https://{id}-{}.{}:{}", - gw.agent_port, gw.base_domain, gw.port - ) + if gw.port == 443 { + format!("https://{id}-{}.{}", gw.agent_port, gw.base_domain) + } else { + format!( + "https://{id}-{}.{}:{}", + gw.agent_port, gw.base_domain, gw.port + ) + } }), app_id: self.manifest.app_id.clone(), instance_id: self.instance_id.as_deref().map(Into::into), exited_at: self.exited_at.clone(), + events: self.events.clone(), } } } @@ -218,6 +229,7 @@ impl VmState { shutdown_progress: self.state.shutdown_progress.clone(), image_version: self.config.image.info.version.clone(), gateway_enabled: self.config.gateway_enabled, + events: self.state.events.clone().into(), } } } @@ -452,46 +464,7 @@ impl VmConfig { command.arg("-netdev").arg(netdev); command.arg("-device").arg("virtio-net-pci,netdev=net0"); - command - .arg("-machine") - .arg("q35,kernel-irqchip=split,confidential-guest-support=tdx,hpet=off"); - - let img_ver = self.image.info.version_tuple().unwrap_or_default(); - let support_mr_config_id = img_ver >= (0, 5, 2); - let tdx_object = if cfg.use_mrconfigid && support_mr_config_id { - let app_compose = workdir.app_compose().context("Failed to get app compose")?; - let compose_hash = workdir - .app_compose_hash() - .context("Failed to get compose hash")?; - let mr_config = if app_compose.key_provider_id.is_empty() { - MrConfig::V1 { - compose_hash: &compose_hash, - } - } else { - let instance_info = workdir - .instance_info() - .context("Failed to get instance info")?; - let app_id = if instance_info.app_id.is_empty() { - &compose_hash[..20] - } else { - &instance_info.app_id - }; - - let key_provider = app_compose.key_provider(); - let key_provider_id = &app_compose.key_provider_id; - MrConfig::V2 { - compose_hash: &compose_hash, - app_id: &app_id.try_into().context("Invalid app ID")?, - key_provider, - key_provider_id, - } - }; - let mrconfigid = BASE64_STANDARD.encode(mr_config.to_mr_config_id()); - format!("tdx-guest,id=tdx,mrconfigid={mrconfigid}") - } else { - "tdx-guest,id=tdx".to_string() - }; - command.arg("-object").arg(tdx_object); + self.configure_machine(&mut command, &workdir, cfg)?; command .arg("-device") @@ -679,6 +652,62 @@ impl VmConfig { Ok(processes) } + + fn configure_machine( + &self, + command: &mut Command, + workdir: &VmWorkDir, + cfg: &CvmConfig, + ) -> Result<()> { + if self.manifest.no_tee { + command + .arg("-machine") + .arg("q35,kernel-irqchip=split,hpet=off"); + return Ok(()); + } + + command + .arg("-machine") + .arg("q35,kernel-irqchip=split,confidential-guest-support=tdx,hpet=off"); + + let img_ver = self.image.info.version_tuple().unwrap_or_default(); + let support_mr_config_id = img_ver >= (0, 5, 2); + let tdx_object = if cfg.use_mrconfigid && support_mr_config_id { + let app_compose = workdir.app_compose().context("Failed to get app compose")?; + let compose_hash = workdir + .app_compose_hash() + .context("Failed to get compose hash")?; + let mr_config = if app_compose.key_provider_id.is_empty() { + MrConfig::V1 { + compose_hash: &compose_hash, + } + } else { + let instance_info = workdir + .instance_info() + .context("Failed to get instance info")?; + let app_id = if instance_info.app_id.is_empty() { + &compose_hash[..20] + } else { + &instance_info.app_id + }; + + let key_provider = app_compose.key_provider(); + let key_provider_id = &app_compose.key_provider_id; + MrConfig::V2 { + compose_hash: &compose_hash, + app_id: &app_id.try_into().context("Invalid app ID")?, + key_provider, + key_provider_id, + } + }; + let mrconfigid = BASE64_STANDARD.encode(mr_config.to_mr_config_id()); + format!("tdx-guest,id=tdx,mrconfigid={mrconfigid}") + } else { + "tdx-guest,id=tdx".to_string() + }; + command.arg("-object").arg(tdx_object); + Ok(()) + } } /// Round up a value to the nearest multiple of another value. diff --git a/vmm/src/config.rs b/vmm/src/config.rs index 40b06d81..c4d15395 100644 --- a/vmm/src/config.rs +++ b/vmm/src/config.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -use std::{net::IpAddr, path::PathBuf, str::FromStr}; +use std::{net::IpAddr, path::PathBuf, process::Command, str::FromStr}; use anyhow::{bail, Context, Result}; use load_config::load_config; @@ -11,9 +11,69 @@ use rocket::figment::Figment; use serde::{Deserialize, Serialize}; use lspci::{lspci_filtered, Device}; -use tracing::info; +use tracing::{info, warn}; pub const DEFAULT_CONFIG: &str = include_str!("../vmm.toml"); + +fn detect_qemu_version(qemu_path: &PathBuf) -> Result { + let output = Command::new(qemu_path) + .arg("--version") + .output() + .context("Failed to execute qemu --version")?; + + if !output.status.success() { + bail!("QEMU version command failed with status: {}", output.status); + } + + let version_output = + String::from_utf8(output.stdout).context("QEMU version output is not valid UTF-8")?; + + parse_qemu_version_from_output(&version_output) + .context("Could not parse QEMU version from output") +} + +fn parse_qemu_version_from_output(output: &str) -> Result { + // Parse version from output like: + // "QEMU emulator version 8.2.2 (Debian 2:8.2.2+ds-0ubuntu1.4+tdx1.0)" + // "QEMU emulator version 9.1.0" + let version = output + .lines() + .next() + .and_then(|line| { + let words: Vec<&str> = line.split_whitespace().collect(); + + // First try: Look for "version" keyword and get the next word (only if it looks like a version) + if let Some(version_idx) = words.iter().position(|&word| word == "version") { + if let Some(next_word) = words.get(version_idx + 1) { + // Only use the word after "version" if it looks like a version number + if next_word.chars().next().is_some_and(|c| c.is_ascii_digit()) + && (next_word.contains('.') + || next_word.chars().all(|c| c.is_ascii_digit() || c == '-')) + { + return Some(*next_word); + } + } + } + + // Fallback: find first word that looks like a version number + words + .iter() + .find(|word| { + // Check if word starts with digit and contains dots (version-like) + word.chars().next().is_some_and(|c| c.is_ascii_digit()) + && (word.contains('.') + || word.chars().all(|c| c.is_ascii_digit() || c == '-')) + }) + .copied() + }) + .context("Could not parse QEMU version from output")?; + + // Extract just the version number (e.g., "8.2.2" from "8.2.2+ds-0ubuntu1.4+tdx1.0") + let clean_version = version.split('+').next().unwrap_or(version).to_string(); + + Ok(clean_version) +} + pub fn load_config_figment(config_file: Option<&str>) -> Figment { load_config("vmm", DEFAULT_CONFIG, config_file, false) } @@ -110,16 +170,6 @@ pub struct CvmConfig { /// Use sudo to run the VM pub user: String, - /// The CA certificate - #[serde(default)] - pub ca_cert: String, - /// The tmp CA certificate - #[serde(default)] - pub tmp_ca_cert: String, - /// The tmp CA key - #[serde(default)] - pub tmp_ca_key: String, - /// Auto restart configuration pub auto_restart: AutoRestartConfig, @@ -127,10 +177,13 @@ pub struct CvmConfig { pub use_mrconfigid: bool, /// QEMU single pass add page - pub qemu_single_pass_add_pages: bool, + pub qemu_single_pass_add_pages: Option, /// QEMU pic - pub qemu_pic: bool, + pub qemu_pic: Option, + /// QEMU qemu_version + pub qemu_version: Option, /// QEMU pci_hole64_size + #[serde(with = "size_parser::human_size")] pub qemu_pci_hole64_size: u64, /// QEMU hotplug_off pub qemu_hotplug_off: bool, @@ -212,6 +265,13 @@ pub struct Config { /// The URL of the KMS server pub kms_url: String, + /// Node name (optional, used as prefix in UI title) + #[serde(default)] + pub node_name: String, + + /// The buffer size in VMM process for guest events + pub event_buffer_size: usize, + /// CVM configuration pub cvm: CvmConfig, /// Gateway configuration @@ -310,6 +370,32 @@ pub struct KeyProviderConfig { pub port: u16, } +const CLIENT_CONF_PATH: &str = "/etc/dstack/client.conf"; +fn read_qemu_path_from_client_conf() -> Option { + #[derive(Debug, Deserialize)] + struct ClientQemuSection { + path: Option, + } + #[derive(Debug, Deserialize)] + struct ClientIniConfig { + qemu: Option, + } + + let raw = fs_err::read_to_string(CLIENT_CONF_PATH).ok()?; + let parsed: ClientIniConfig = serde_ini::from_str(&raw).ok()?; + let path = parsed.qemu?.path?; + let path = path.trim().trim_matches('"').trim_matches('\''); + if path.is_empty() { + return None; + } + let path = PathBuf::from(path); + if path.exists() { + Some(path) + } else { + None + } +} + impl Config { pub fn extract_or_default(figment: &Figment) -> Result { let mut me: Self = figment.extract()?; @@ -323,12 +409,81 @@ impl Config { me.run_path = app_home.join("vm"); } if me.cvm.qemu_path == PathBuf::default() { - let cpu_arch = std::env::consts::ARCH; - let qemu_path = which::which(format!("qemu-system-{}", cpu_arch)) - .context("Failed to find qemu executable")?; - me.cvm.qemu_path = qemu_path; + // Prefer the path from dstack client config if present + if let Some(qemu_path) = read_qemu_path_from_client_conf() { + info!("Found QEMU path from client config: {CLIENT_CONF_PATH:?}"); + me.cvm.qemu_path = qemu_path; + } else { + let cpu_arch = std::env::consts::ARCH; + let qemu_path = which::which(format!("qemu-system-{}", cpu_arch)) + .context("Failed to find qemu executable")?; + me.cvm.qemu_path = qemu_path; + } + } + info!("QEMU path: {}", me.cvm.qemu_path.display()); + + // Detect QEMU version if not already set + match &me.cvm.qemu_version { + None => match detect_qemu_version(&me.cvm.qemu_path) { + Ok(version) => { + info!("Detected QEMU version: {version}"); + me.cvm.qemu_version = Some(version); + } + Err(e) => { + warn!("Failed to detect QEMU version: {e}"); + // Continue without version - the system will use defaults + } + }, + Some(version) => info!("Configured QEMU version: {version}"), } } Ok(me) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_qemu_version_debian_format() { + let output = "QEMU emulator version 8.2.2 (Debian 2:8.2.2+ds-0ubuntu1.4+tdx1.0)\nCopyright (c) 2003-2023 Fabrice Bellard and the QEMU Project developers"; + let version = parse_qemu_version_from_output(output).unwrap(); + assert_eq!(version, "8.2.2"); + } + + #[test] + fn test_parse_qemu_version_simple_format() { + let output = "QEMU emulator version 9.1.0\nCopyright (c) 2003-2024 Fabrice Bellard and the QEMU Project developers"; + let version = parse_qemu_version_from_output(output).unwrap(); + assert_eq!(version, "9.1.0"); + } + + #[test] + fn test_parse_qemu_version_old_debian_format() { + let output = "QEMU emulator version 8.2.2 (Debian 1:8.2.2+ds-0ubuntu1.2)\nCopyright (c) 2003-2023 Fabrice Bellard and the QEMU Project developers"; + let version = parse_qemu_version_from_output(output).unwrap(); + assert_eq!(version, "8.2.2"); + } + + #[test] + fn test_parse_qemu_version_with_rc() { + let output = "QEMU emulator version 9.0.0-rc1\nCopyright (c) 2003-2024 Fabrice Bellard and the QEMU Project developers"; + let version = parse_qemu_version_from_output(output).unwrap(); + assert_eq!(version, "9.0.0-rc1"); + } + + #[test] + fn test_parse_qemu_version_fallback() { + let output = "Some unusual format 8.1.5 with version info"; + let version = parse_qemu_version_from_output(output).unwrap(); + assert_eq!(version, "8.1.5"); + } + + #[test] + fn test_parse_qemu_version_invalid() { + let output = "No version information here"; + let result = parse_qemu_version_from_output(output); + assert!(result.is_err()); + } +} diff --git a/vmm/src/console.html b/vmm/src/console_v0.html similarity index 92% rename from vmm/src/console.html rename to vmm/src/console_v0.html index 3ab894fa..e6b6a007 100644 --- a/vmm/src/console.html +++ b/vmm/src/console_v0.html @@ -10,7 +10,7 @@ - dstack VM Management Console + {{TITLE}} + + + + +
+ + + + + diff --git a/vmm/src/main.rs b/vmm/src/main.rs index 92cb9f46..bb1f7873 100644 --- a/vmm/src/main.rs +++ b/vmm/src/main.rs @@ -29,6 +29,7 @@ mod host_api_service; mod main_routes; mod main_service; mod one_shot; +mod openapi; const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); const GIT_REV: &str = git_version::git_version!( @@ -74,6 +75,9 @@ struct RunArgs { } async fn run_external_api(app: App, figment: Figment, api_auth: ApiToken) -> Result<()> { + let version = app_version(); + let openapi_doc = openapi::build_openapi_doc(&version)?; + let external_api = rocket::custom(figment) .mount("/", main_routes::routes()) .mount("/guest", ra_rpc::prpc_routes!(App, GuestApiHandler)) @@ -94,6 +98,8 @@ async fn run_external_api(app: App, figment: Figment, api_auth: ApiToken) -> Res res.set_raw_header("X-Accel-Buffering", "no"); }) })); + let external_api = + ra_rpc::rocket_helper::mount_openapi_docs(external_api, openapi_doc, "/api-docs"); let _ = external_api .launch() diff --git a/vmm/src/main_routes.rs b/vmm/src/main_routes.rs index 2e246fa0..f4ef9a45 100644 --- a/vmm/src/main_routes.rs +++ b/vmm/src/main_routes.rs @@ -27,9 +27,38 @@ macro_rules! file_or_include_str { }; } +fn replace_title(app: &App, html: &str) -> String { + let title = if app.config.node_name.is_empty() { + "dstack-vmm".to_string() + } else { + format!("{} | dstack-vmm", app.config.node_name) + }; + html.replace("{{TITLE}}", &title) +} + +fn render_console(html: String, app: &State) -> (ContentType, String) { + let html = replace_title(app, &html); + (ContentType::HTML, html) +} + #[get("/")] -async fn index() -> (ContentType, String) { - (ContentType::HTML, file_or_include_str!("console.html")) +async fn index(app: &State) -> (ContentType, String) { + render_console(file_or_include_str!("console_v1.html"), app) +} + +#[get("/v1")] +async fn v1(app: &State) -> (ContentType, String) { + index(app).await +} + +#[get("/beta")] +async fn beta(app: &State) -> (ContentType, String) { + index(app).await +} + +#[get("/v0")] +async fn v0(app: &State) -> (ContentType, String) { + render_console(file_or_include_str!("console_v0.html"), app) } #[get("/res/")] @@ -153,5 +182,5 @@ fn vm_logs( } pub fn routes() -> Vec { - routes![index, res, vm_logs] + routes![index, v1, beta, v0, res, vm_logs] } diff --git a/vmm/src/main_service.rs b/vmm/src/main_service.rs index 9003c526..67acf604 100644 --- a/vmm/src/main_service.rs +++ b/vmm/src/main_service.rs @@ -5,15 +5,15 @@ use std::ops::Deref; use std::time::{SystemTime, UNIX_EPOCH}; -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{bail, Context, Result}; use dstack_types::AppCompose; use dstack_vmm_rpc as rpc; use dstack_vmm_rpc::vmm_server::{VmmRpc, VmmServer}; use dstack_vmm_rpc::{ AppId, ComposeHash as RpcComposeHash, GatewaySettings, GetInfoResponse, GetMetaResponse, Id, ImageInfo as RpcImageInfo, ImageListResponse, KmsSettings, ListGpusResponse, PublicKeyResponse, - ResizeVmRequest, ResourcesSettings, StatusRequest, StatusResponse, UpgradeAppRequest, - VersionResponse, VmConfiguration, + ReloadVmsResponse, ResizeVmRequest, ResourcesSettings, StatusRequest, StatusResponse, + UpdateVmRequest, VersionResponse, VmConfiguration, }; use fs_err as fs; use ra_rpc::{CallContext, RpcCall}; @@ -51,13 +51,17 @@ fn app_id_of(compose_file: &str) -> String { truncate40(&hex_sha256(compose_file)).to_string() } -/// Validate the label of the VM. Valid chars are alphanumeric, dash and underscore. +/// Validate the VM label, restricting it to a safe character set to prevent injection vectors. fn validate_label(label: &str) -> Result<()> { - if label - .chars() - .any(|c| !c.is_alphanumeric() && c != '-' && c != '_') - { - bail!("Invalid name: {}", label); + fn is_valid_label_char(c: char) -> bool { + c.is_ascii_alphanumeric() + || matches!( + c, + '-' | '_' | '.' | ' ' | '@' | '~' | '!' | '$' | '^' | '(' | ')' + ) + } + if !label.chars().all(is_valid_label_char) { + bail!("Invalid name: {label}"); } Ok(()) } @@ -66,7 +70,7 @@ pub fn resolve_gpus_with_config( gpu_cfg: &rpc::GpuConfig, cvm_config: &crate::config::CvmConfig, ) -> Result { - if !cvm_config.gpu.enabled { + if !cvm_config.gpu.enabled && !gpu_cfg.is_empty() { bail!("GPU is not enabled"); } let gpus = resolve_gpus(gpu_cfg)?; @@ -173,28 +177,84 @@ pub fn create_manifest_from_vm_config( None => GpuConfig::default(), }; - Ok(Manifest::builder() - .id(id) - .name(request.name.clone()) - .app_id(app_id) - .image(request.image.clone()) - .vcpu(request.vcpu) - .memory(request.memory) - .disk_size(request.disk_size) - .port_map(port_map) - .created_at_ms(now) - .hugepages(request.hugepages) - .pin_numa(request.pin_numa) - .gpus(gpus) - .kms_urls(request.kms_urls.clone()) - .gateway_urls(request.gateway_urls.clone()) - .build()) + Ok(Manifest { + id, + name: request.name.clone(), + app_id, + vcpu: request.vcpu, + memory: request.memory, + disk_size: request.disk_size, + image: request.image.clone(), + port_map, + created_at_ms: now, + hugepages: request.hugepages, + pin_numa: request.pin_numa, + gpus: Some(gpus), + kms_urls: request.kms_urls.clone(), + gateway_urls: request.gateway_urls.clone(), + no_tee: request.no_tee, + }) } impl RpcHandler { fn resolve_gpus(&self, gpu_cfg: &rpc::GpuConfig) -> Result { resolve_gpus_with_config(gpu_cfg, &self.app.config.cvm) } + + #[allow(clippy::too_many_arguments)] + async fn apply_resource_updates( + &self, + vm_id: &str, + manifest: &mut Manifest, + vm_work_dir: &VmWorkDir, + vcpu: Option, + memory: Option, + disk_size: Option, + image: Option<&str>, + ) -> Result { + let has_updates = + vcpu.is_some() || memory.is_some() || disk_size.is_some() || image.is_some(); + if !has_updates { + return Ok(false); + } + + let vm = self.app.vm_info(vm_id).await?.context("vm not found")?; + if !["stopped", "exited"].contains(&vm.status.as_str()) { + bail!("vm should be stopped before resize: {}", vm_id); + } + + if let Some(vcpu) = vcpu { + manifest.vcpu = vcpu; + } + if let Some(memory) = memory { + manifest.memory = memory; + } + if let Some(image) = image { + manifest.image = image.to_string(); + } + if let Some(disk_size) = disk_size { + if disk_size < manifest.disk_size { + bail!("Cannot shrink disk size"); + } + manifest.disk_size = disk_size; + + info!("Resizing disk to {}GB", disk_size); + let hda_path = vm_work_dir.hda_path(); + let new_size_str = format!("{}G", disk_size); + let output = std::process::Command::new("qemu-img") + .args(["resize", &hda_path.display().to_string(), &new_size_str]) + .output() + .context("Failed to resize disk")?; + if !output.status.success() { + bail!( + "Failed to resize disk: {}", + String::from_utf8_lossy(&output.stderr) + ); + } + } + + Ok(true) + } } impl VmmRpc for RpcHandler { @@ -280,7 +340,11 @@ impl VmmRpc for RpcHandler { }) } - async fn upgrade_app(self, request: UpgradeAppRequest) -> Result { + async fn upgrade_app(self, request: UpdateVmRequest) -> Result { + self.update_vm(request).await + } + + async fn update_vm(self, request: UpdateVmRequest) -> Result { let new_id = if !request.compose_file.is_empty() { // check the compose file is valid let _app_compose: AppCompose = @@ -308,9 +372,22 @@ impl VmmRpc for RpcHandler { } let vm_work_dir = self.app.work_dir(&request.id); let mut manifest = vm_work_dir.manifest().context("Failed to read manifest")?; + self.apply_resource_updates( + &request.id, + &mut manifest, + &vm_work_dir, + request.vcpu, + request.memory, + request.disk_size, + request.image.as_deref(), + ) + .await?; if let Some(gpus) = request.gpus { manifest.gpus = Some(self.resolve_gpus(&gpus)?); } + if let Some(no_tee) = request.no_tee { + manifest.no_tee = no_tee; + } if request.update_ports { manifest.port_map = request .ports @@ -325,6 +402,12 @@ impl VmmRpc for RpcHandler { }) .collect::>>()?; } + if request.update_kms_urls { + manifest.kms_urls = request.kms_urls.clone(); + } + if request.update_gateway_urls { + manifest.gateway_urls = request.gateway_urls.clone(); + } vm_work_dir .put_manifest(&manifest) .context("Failed to put manifest")?; @@ -366,55 +449,23 @@ impl VmmRpc for RpcHandler { #[tracing::instrument(skip(self, request), fields(id = request.id))] async fn resize_vm(self, request: ResizeVmRequest) -> Result<()> { info!("Resizing VM: {:?}", request); - let vm = self - .app - .vm_info(&request.id) - .await? - .context("vm not found")?; - if !["stopped", "exited"].contains(&vm.status.as_str()) { - return Err(anyhow!( - "vm should be stopped before resize: {}", - request.id - )); - } - let work_dir = self.app.config.run_path.join(&request.id); - let vm_work_dir = VmWorkDir::new(&work_dir); + let vm_work_dir = self.app.work_dir(&request.id); let mut manifest = vm_work_dir.manifest().context("failed to read manifest")?; - if let Some(vcpu) = request.vcpu { - manifest.vcpu = vcpu; - } - if let Some(memory) = request.memory { - manifest.memory = memory; - } - if let Some(image) = request.image { - manifest.image = image; - } - if let Some(disk_size) = request.disk_size { - if disk_size < manifest.disk_size { - bail!("Cannot shrink disk size"); - } - manifest.disk_size = disk_size; - - // Run qemu-img resize to resize the disk - info!("Resizing disk to {}GB", disk_size); - let hda_path = vm_work_dir.hda_path(); - let new_size_str = format!("{}G", disk_size); - let output = std::process::Command::new("qemu-img") - .args(["resize", &hda_path.display().to_string(), &new_size_str]) - .output() - .context("Failed to resize disk")?; - if !output.status.success() { - bail!( - "Failed to resize disk: {}", - String::from_utf8_lossy(&output.stderr) - ); - } - } + self.apply_resource_updates( + &request.id, + &mut manifest, + &vm_work_dir, + request.vcpu, + request.memory, + request.disk_size, + request.image.as_deref(), + ) + .await?; vm_work_dir .put_manifest(&manifest) .context("failed to update manifest")?; self.app - .load_vm(work_dir, &Default::default(), false) + .load_vm(vm_work_dir.path(), &Default::default(), false) .await .context("Failed to load VM")?; Ok(()) @@ -484,6 +535,11 @@ impl VmmRpc for RpcHandler { let hash = hex_sha256(&request.compose_file); Ok(RpcComposeHash { hash }) } + + async fn reload_vms(self) -> Result { + info!("Reloading VMs directory and syncing with memory state"); + self.app.reload_vms_sync().await + } } impl RpcCall for RpcHandler { diff --git a/vmm/src/one_shot.rs b/vmm/src/one_shot.rs index b51378e6..cbc33f77 100644 --- a/vmm/src/one_shot.rs +++ b/vmm/src/one_shot.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -use crate::app::{Image, VmConfig, VmWorkDir}; +use crate::app::{make_sys_config, Image, VmConfig, VmWorkDir}; use crate::config::Config; use crate::main_service; use anyhow::{Context, Result}; @@ -235,42 +235,9 @@ Compose file content (first 200 chars): // 2. Create .sys-config.json (critical for 0.5.x VMs) // Use manifest URLs if available, fallback to config URLs (matching VMM's sync_dynamic_config logic) - let kms_urls = if manifest.kms_urls.is_empty() { - config.cvm.kms_urls.clone() - } else { - manifest.kms_urls.clone() - }; - let gateway_urls = if manifest.gateway_urls.is_empty() { - config.cvm.gateway_urls.clone() - } else { - manifest.gateway_urls.clone() - }; - - let sys_config = serde_json::json!({ - "kms_urls": kms_urls, - "gateway_urls": gateway_urls, - "pccs_url": config.cvm.pccs_url, - "docker_registry": config.cvm.docker_registry, - "host_api_url": format!("vsock://2:{}/api", config.host_api.port), - "vm_config": serde_json::to_string(&dstack_types::VmConfig { - spec_version: 1, - os_image_hash: image.digest.as_ref() - .and_then(|d| hex::decode(d).ok()) - .unwrap_or_default(), - cpu_count: manifest.vcpu, - memory_size: manifest.memory as u64 * 1024 * 1024, - qemu_single_pass_add_pages: config.cvm.qemu_single_pass_add_pages, - pic: config.cvm.qemu_pic, - pci_hole64_size: config.cvm.qemu_pci_hole64_size, - hugepages: manifest.hugepages, - num_gpus: manifest.gpus.as_ref().map_or(0, |g| g.gpus.len() as u32), - num_nvswitches: manifest.gpus.as_ref().map_or(0, |g| g.bridges.len() as u32), - hotplug_off: config.cvm.qemu_hotplug_off, - })? - }); + let sys_config_str = make_sys_config(&config, &manifest)?; let sys_config_path = vm_work_dir.shared_dir().join(".sys-config.json"); - fs_err::write(&sys_config_path, serde_json::to_string(&sys_config)?) - .context("Failed to write sys config")?; + fs_err::write(&sys_config_path, sys_config_str).context("Failed to write sys config")?; // Create vm-state.json with initial state vm_work_dir diff --git a/vmm/src/openapi.rs b/vmm/src/openapi.rs new file mode 100644 index 00000000..d9b01ba9 --- /dev/null +++ b/vmm/src/openapi.rs @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{Context, Result}; +use ra_rpc::openapi::{ + build_openapi_doc as build_doc, DescriptorSource, DocumentInfo, OpenApiDoc, ServiceConfig, + SwaggerUiConfig, +}; + +pub fn build_openapi_doc(app_version: &str) -> Result { + let info = DocumentInfo::new("dstack-vmm RPC", app_version.to_string()) + .with_description( + "Auto-generated OpenAPI spec for the pRPC surfaces exposed by dstack-vmm.", + ) + .add_server("/"); + + let sources = vec![ + DescriptorSource::new( + dstack_vmm_rpc::FILE_DESCRIPTOR_SET, + vec![ServiceConfig::new("Vmm", "/prpc")], + ), + DescriptorSource::new( + guest_api::FILE_DESCRIPTOR_SET, + vec![ServiceConfig::new("ProxiedGuestApi", "/guest")], + ), + ]; + + let ui = SwaggerUiConfig { + title: "dstack-vmm RPC Explorer".to_string(), + ..Default::default() + }; + + build_doc(&sources, &info, ui).context("failed to build OpenAPI document") +} diff --git a/vmm/src/vmm-cli.py b/vmm/src/vmm-cli.py index e3cf66e7..7bf4b3fa 100755 --- a/vmm/src/vmm-cli.py +++ b/vmm/src/vmm-cli.py @@ -123,7 +123,6 @@ def read_utf8(filepath: str) -> str: with open(filepath, 'rb') as f: return f.read().decode('utf-8') - class UnixSocketHTTPConnection(http.client.HTTPConnection): """HTTPConnection that connects to a Unix domain socket.""" @@ -332,6 +331,33 @@ def remove_vm(self, vm_id: str) -> None: self.rpc_call('RemoveVm', {'id': vm_id}) print(f"Removed VM {vm_id}") + def resize_vm( + self, + vm_id: str, + vcpu: Optional[int] = None, + memory: Optional[int] = None, + disk_size: Optional[int] = None, + image: Optional[str] = None, + ) -> None: + """Resize a VM""" + params = {"id": vm_id} + if vcpu is not None: + params["vcpu"] = vcpu + if memory is not None: + params["memory"] = memory + if disk_size is not None: + params["disk_size"] = disk_size + if image is not None: + params["image"] = image + + if len(params) == 1: + raise Exception( + "at least one parameter must be specified for resize: --vcpu, --memory, --disk, or --image" + ) + + self.rpc_call("ResizeVm", params) + print(f"Resized VM {vm_id}") + def show_logs(self, vm_id: str, lines: int = 20, follow: bool = False) -> None: """Show VM logs""" path = f"/logs?id={vm_id}&follow={str(follow).lower()}&ansi=false&lines={lines}" @@ -498,6 +524,12 @@ def create_app_compose(self, args) -> None: if args.prelaunch_script: app_compose["pre_launch_script"] = open( args.prelaunch_script, 'rb').read().decode('utf-8') + if args.swap is not None: + swap_bytes = max(0, int(round(args.swap)) * 1024 * 1024) + if swap_bytes > 0: + app_compose["swap_size"] = swap_bytes + else: + app_compose.pop("swap_size", None) compose_file = json.dumps( app_compose, indent=4, ensure_ascii=False).encode('utf-8') @@ -536,7 +568,12 @@ def create_vm(self, args) -> None: "hugepages": args.hugepages, "pin_numa": args.pin_numa, "stopped": args.stopped, + "no_tee": args.no_tee, } + if args.swap is not None: + swap_bytes = max(0, int(round(args.swap)) * 1024 * 1024) + if swap_bytes > 0: + params["swap_size"] = swap_bytes if args.ppcie: params["gpus"] = { @@ -567,6 +604,7 @@ def create_vm(self, args) -> None: def update_vm_env(self, vm_id: str, envs: Dict[str, str], kms_urls: Optional[List[str]] = None) -> None: """Update environment variables for a VM""" + envs = envs or {} # First get the VM info to retrieve the app_id vm_info_response = self.rpc_call('GetInfo', {'id': vm_id}) @@ -575,6 +613,8 @@ def update_vm_env(self, vm_id: str, envs: Dict[str, str], kms_urls: Optional[Lis app_id = vm_info_response['info']['app_id'] print(f"Retrieved app ID: {app_id}") + vm_configuration = vm_info_response['info'].get('configuration') or {} + compose_file = vm_configuration.get('compose_file') # Now get the encryption key for the app encrypt_pubkey = self.get_app_env_encrypt_pub_key( @@ -584,8 +624,31 @@ def update_vm_env(self, vm_id: str, envs: Dict[str, str], kms_urls: Optional[Lis encrypted_env = encrypt_env(envs_list, encrypt_pubkey) # Use UpdateApp with the VM ID - self.rpc_call('UpgradeApp', {'id': vm_id, - 'encrypted_env': encrypted_env}) + payload = {'id': vm_id, 'encrypted_env': encrypted_env} + + if compose_file: + try: + app_compose = json.loads(compose_file) + except json.JSONDecodeError: + app_compose = {} + compose_changed = False + allowed_envs = list(envs.keys()) + if app_compose.get('allowed_envs') != allowed_envs: + app_compose['allowed_envs'] = allowed_envs + compose_changed = True + launch_token_value = envs.get('APP_LAUNCH_TOKEN') + if launch_token_value is not None: + launch_token_hash = hashlib.sha256( + launch_token_value.encode('utf-8') + ).hexdigest() + if app_compose.get('launch_token_hash') != launch_token_hash: + app_compose['launch_token_hash'] = launch_token_hash + compose_changed = True + if compose_changed: + payload['compose_file'] = json.dumps( + app_compose, indent=4, ensure_ascii=False) + + self.rpc_call('UpgradeApp', payload) print(f"Environment variables updated for VM {vm_id}") def update_vm_user_config(self, vm_id: str, user_config: str) -> None: @@ -599,6 +662,195 @@ def update_vm_app_compose(self, vm_id: str, app_compose: str) -> None: self.rpc_call('UpgradeApp', {'id': vm_id, 'compose_file': app_compose}) print(f"App compose updated for VM {vm_id}") + + def update_vm_ports(self, vm_id: str, ports: List[str]) -> None: + """Update port mapping for a VM""" + port_mappings = [parse_port_mapping(port) for port in ports] + self.rpc_call( + "UpgradeApp", {"id": vm_id, + "update_ports": True, "ports": port_mappings} + ) + print(f"Port mapping updated for VM {vm_id}") + + def update_vm( + self, + vm_id: str, + vcpu: Optional[int] = None, + memory: Optional[int] = None, + disk_size: Optional[int] = None, + image: Optional[str] = None, + docker_compose_content: Optional[str] = None, + prelaunch_script: Optional[str] = None, + swap_size: Optional[int] = None, + env_file: Optional[str] = None, + user_config: Optional[str] = None, + ports: Optional[List[str]] = None, + no_ports: bool = False, + gpu_slots: Optional[List[str]] = None, + attach_all: bool = False, + no_gpus: bool = False, + kms_urls: Optional[List[str]] = None, + no_tee: Optional[bool] = None, + ) -> None: + """Update multiple aspects of a VM in one command""" + updates = [] + + # handle resize operations (vcpu, memory, disk, image) + resize_params = {} + if vcpu is not None: + resize_params["vcpu"] = vcpu + updates.append(f"vCPU: {vcpu}") + if memory is not None: + resize_params["memory"] = memory + updates.append(f"memory: {memory}MB") + if disk_size is not None: + resize_params["disk_size"] = disk_size + updates.append(f"disk: {disk_size}GB") + if image is not None: + resize_params["image"] = image + updates.append(f"image: {image}") + + if resize_params: + resize_params["id"] = vm_id + self.rpc_call("ResizeVm", resize_params) + + # handle upgrade operations (compose, env, user_config, ports, gpu) + upgrade_params = {"id": vm_id} + + # handle compose file updates (docker-compose, prelaunch script, swap) + needs_compose_update = docker_compose_content or prelaunch_script is not None or swap_size is not None + vm_info_response = None + + if needs_compose_update or env_file: + vm_info_response = self.rpc_call('GetInfo', {'id': vm_id}) + if not vm_info_response.get('found', False) or 'info' not in vm_info_response: + raise Exception(f"VM with ID {vm_id} not found") + + if needs_compose_update: + vm_configuration = vm_info_response['info'].get('configuration') or {} + compose_file_content = vm_configuration.get('compose_file') + + try: + app_compose = json.loads(compose_file_content) if compose_file_content else {} + except json.JSONDecodeError: + app_compose = {} + + if docker_compose_content: + app_compose['docker_compose_file'] = docker_compose_content + updates.append("docker compose") + + if prelaunch_script is not None: + script_stripped = prelaunch_script.strip() + if script_stripped: + app_compose['pre_launch_script'] = script_stripped + updates.append("prelaunch script") + elif 'pre_launch_script' in app_compose: + del app_compose['pre_launch_script'] + updates.append("prelaunch script (removed)") + + if swap_size is not None: + swap_bytes = max(0, int(round(swap_size)) * 1024 * 1024) + if swap_bytes > 0: + app_compose['swap_size'] = swap_bytes + updates.append(f"swap: {swap_size}MB") + elif 'swap_size' in app_compose: + del app_compose['swap_size'] + updates.append("swap (disabled)") + + upgrade_params['compose_file'] = json.dumps(app_compose, indent=4, ensure_ascii=False) + + if env_file: + envs = parse_env_file(env_file) + if envs: + app_id = vm_info_response['info']['app_id'] + vm_configuration = vm_info_response['info'].get('configuration') or {} + compose_file_content = vm_configuration.get('compose_file') + + encrypt_pubkey = self.get_app_env_encrypt_pub_key( + app_id, kms_urls[0] if kms_urls else None) + envs_list = [{"key": k, "value": v} for k, v in envs.items()] + upgrade_params["encrypted_env"] = encrypt_env(envs_list, encrypt_pubkey) + updates.append("environment variables") + + # update allowed_envs in compose file if needed + if compose_file_content: + try: + app_compose = json.loads(compose_file_content) + except json.JSONDecodeError: + app_compose = {} + compose_changed = False + allowed_envs = list(envs.keys()) + if app_compose.get('allowed_envs') != allowed_envs: + app_compose['allowed_envs'] = allowed_envs + compose_changed = True + launch_token_value = envs.get('APP_LAUNCH_TOKEN') + if launch_token_value is not None: + launch_token_hash = hashlib.sha256( + launch_token_value.encode('utf-8') + ).hexdigest() + if app_compose.get('launch_token_hash') != launch_token_hash: + app_compose['launch_token_hash'] = launch_token_hash + compose_changed = True + if compose_changed: + upgrade_params['compose_file'] = json.dumps( + app_compose, indent=4, ensure_ascii=False) + + if user_config: + upgrade_params["user_config"] = user_config + updates.append("user config") + + # handle port updates - only update if --port or --no-ports is specified + if no_ports or ports is not None: + if no_ports: + port_mappings = [] + updates.append("port mappings (removed)") + elif ports: + port_mappings = [parse_port_mapping(port) for port in ports] + updates.append("port mappings") + else: + # ports is an empty list - shouldn't happen with mutually exclusive group + port_mappings = [] + updates.append("port mappings (none)") + upgrade_params["update_ports"] = True + upgrade_params["ports"] = port_mappings + + # handle GPU updates - only update if one of the GPU flags is set + if attach_all or no_gpus or gpu_slots is not None: + if attach_all: + gpu_config = {"attach_mode": "all"} + updates.append("GPUs (all)") + elif no_gpus: + gpu_config = { + "attach_mode": "listed", + "gpus": [] + } + updates.append("GPUs (detached)") + elif gpu_slots: + gpu_config = { + "attach_mode": "listed", + "gpus": [{"slot": gpu} for gpu in gpu_slots] + } + updates.append(f"GPUs ({len(gpu_slots)} devices)") + else: + # gpu_slots is an empty list ([] not None) - shouldn't happen with mutually exclusive group + gpu_config = { + "attach_mode": "listed", + "gpus": [] + } + updates.append("GPUs (none)") + upgrade_params["gpus"] = gpu_config + + if no_tee is not None: + upgrade_params["no_tee"] = no_tee + updates.append("TEE disabled" if no_tee else "TEE enabled") + + if len(upgrade_params) > 1: # more than just the id + self.rpc_call("UpgradeApp", upgrade_params) + + if updates: + print(f"Updated VM {vm_id}: {', '.join(updates)}") + else: + print(f"No updates specified for VM {vm_id}") def list_gpus(self, json_output: bool = False) -> None: """List all available GPUs""" @@ -874,6 +1126,18 @@ def main(): remove_parser = subparsers.add_parser('remove', help='Remove a VM') remove_parser.add_argument('vm_id', help='VM ID to remove') + # Resize command + resize_parser = subparsers.add_parser("resize", help="Resize a VM") + resize_parser.add_argument("vm_id", help="VM ID to resize") + resize_parser.add_argument("--vcpu", type=int, help="Number of vCPUs") + resize_parser.add_argument( + "--memory", type=parse_memory_size, help="Memory size (e.g. 1G, 100M)" + ) + resize_parser.add_argument( + "--disk", type=parse_disk_size, help="Disk size (e.g. 20G, 1T)" + ) + resize_parser.add_argument("--image", type=str, help="Image name") + # Logs command logs_parser = subparsers.add_parser('logs', help='Show VM logs') logs_parser.add_argument('vm_id', help='VM ID to show logs for') @@ -908,6 +1172,9 @@ def main(): '--no-instance-id', action='store_true', help='Disable instance ID') compose_parser.add_argument( '--secure-time', action='store_true', help='Enable secure time') + compose_parser.add_argument( + '--swap', type=parse_memory_size, default=None, + help='Swap size (e.g. 4G). Set to 0 to disable') compose_parser.add_argument( '--output', required=True, help='Path to output app-compose.json file') @@ -923,6 +1190,9 @@ def main(): '--memory', type=parse_memory_size, default=1024, help='Memory size (e.g. 1G, 100M)') deploy_parser.add_argument( '--disk', type=parse_disk_size, default=20, help='Disk size (e.g. 1G, 100M)') + deploy_parser.add_argument( + '--swap', type=parse_memory_size, default=None, + help='Swap size (e.g. 4G). Set to 0 to disable') deploy_parser.add_argument( '--env-file', help='File with environment variables to encrypt', default=None) deploy_parser.add_argument( @@ -944,6 +1214,11 @@ def main(): help='Gateway URL') deploy_parser.add_argument('--stopped', action='store_true', help='Create VM in stopped state (requires dstack-vmm >= 0.5.4)') + deploy_parser.add_argument('--no-tee', dest='no_tee', action='store_true', + help='Disable Intel TDX / run without TEE') + deploy_parser.add_argument('--tee', dest='no_tee', action='store_false', + help='Force-enable Intel TDX (default)') + deploy_parser.set_defaults(no_tee=False) # Images command lsimage_parser = subparsers.add_parser( @@ -1000,6 +1275,109 @@ def main(): update_user_config_parser.add_argument( 'user_config', help='Path to user config file') + # Update port mapping + update_ports_parser = subparsers.add_parser( + "update-ports", help="Update port mapping for a VM" + ) + update_ports_parser.add_argument("vm_id", help="VM ID to update") + update_ports_parser.add_argument( + "--port", + action="append", + type=str, + required=True, + help="Port mapping in format: protocol[:address]:from:to (can be used multiple times)", + ) + + # Update (all-in-one) command + update_parser = subparsers.add_parser( + "update", help="Update multiple aspects of a VM in one command" + ) + update_parser.add_argument("vm_id", help="VM ID to update") + + # Resource options (requires VM to be stopped) + update_parser.add_argument( + "--vcpu", type=int, help="Number of vCPUs" + ) + update_parser.add_argument( + "--memory", type=parse_memory_size, help="Memory size (e.g. 1G, 100M)" + ) + update_parser.add_argument( + "--disk", type=parse_disk_size, help="Disk size (e.g. 20G, 1T)" + ) + update_parser.add_argument( + "--image", type=str, help="Image name" + ) + + # Application options + update_parser.add_argument( + "--compose", help="Path to app-compose.json file" + ) + update_parser.add_argument( + "--prelaunch-script", help="Path to pre-launch script file" + ) + update_parser.add_argument( + "--env-file", help="File with environment variables to encrypt" + ) + update_parser.add_argument( + "--user-config", help="Path to user config file" + ) + # Port mapping options (mutually exclusive with --no-ports) + port_group = update_parser.add_mutually_exclusive_group() + port_group.add_argument( + "--port", + action="append", + type=str, + help="Port mapping in format: protocol[:address]:from:to (can be used multiple times)", + ) + port_group.add_argument( + "--no-ports", + action="store_true", + help="Remove all port mappings from the VM", + ) + update_parser.add_argument( + "--swap", type=parse_memory_size, help="Swap size (e.g. 4G). Set to 0 to disable" + ) + + # GPU options (mutually exclusive) + gpu_group = update_parser.add_mutually_exclusive_group() + gpu_group.add_argument( + "--gpu", + action="append", + type=str, + help="GPU slot to attach (can be used multiple times)", + ) + gpu_group.add_argument( + "--ppcie", + action="store_true", + help="Enable PPCIE (Protected PCIe) mode - attach all available GPUs", + ) + gpu_group.add_argument( + "--no-gpus", + action="store_true", + help="Detach all GPUs from the VM", + ) + + # TDX toggle + tee_group = update_parser.add_mutually_exclusive_group() + tee_group.add_argument( + "--no-tee", + dest="no_tee", + action="store_true", + help="Disable Intel TDX / run without TEE", + ) + tee_group.add_argument( + "--tee", + dest="no_tee", + action="store_false", + help="Enable Intel TDX for the VM", + ) + update_parser.set_defaults(no_tee=None) + + # KMS URL for environment encryption + update_parser.add_argument( + "--kms-url", action="append", type=str, help="KMS URL" + ) + args = parser.parse_args() cli = VmmCLI(args.url, args.auth_user, args.auth_password) @@ -1012,6 +1390,14 @@ def main(): cli.stop_vm(args.vm_id, args.force) elif args.command == 'remove': cli.remove_vm(args.vm_id) + elif args.command == 'resize': + cli.resize_vm( + args.vm_id, + vcpu=args.vcpu, + memory=args.memory, + disk_size=args.disk, + image=args.image, + ) elif args.command == 'logs': cli.show_logs(args.vm_id, args.lines, args.follow) elif args.command == 'compose': @@ -1030,6 +1416,37 @@ def main(): args.vm_id, open(args.user_config, 'r').read()) elif args.command == 'update-app-compose': cli.update_vm_app_compose(args.vm_id, open(args.compose, 'r').read()) + elif args.command == "update-ports": + cli.update_vm_ports(args.vm_id, args.port) + elif args.command == "update": + compose_content = None + if args.compose: + compose_content = read_utf8(args.compose) + prelaunch_content = None + if hasattr(args, 'prelaunch_script') and args.prelaunch_script: + prelaunch_content = read_utf8(args.prelaunch_script) + user_config_content = None + if args.user_config: + user_config_content = read_utf8(args.user_config) + cli.update_vm( + args.vm_id, + vcpu=args.vcpu, + memory=args.memory, + disk_size=args.disk, + image=args.image, + docker_compose_content=compose_content, + prelaunch_script=prelaunch_content, + swap_size=args.swap if hasattr(args, 'swap') else None, + env_file=args.env_file, + user_config=user_config_content, + ports=args.port, + no_ports=args.no_ports if hasattr(args, 'no_ports') else False, + gpu_slots=args.gpu, + attach_all=args.ppcie, + no_gpus=args.no_gpus if hasattr(args, 'no_gpus') else False, + kms_urls=args.kms_url, + no_tee=args.no_tee, + ) elif args.command == 'kms': if not args.kms_action: kms_parser.print_help() diff --git a/vmm/ui/.gitignore b/vmm/ui/.gitignore new file mode 100644 index 00000000..82934168 --- /dev/null +++ b/vmm/ui/.gitignore @@ -0,0 +1,5 @@ +node_modules +dist +build +*.log +/src/proto/ diff --git a/vmm/ui/README.md b/vmm/ui/README.md new file mode 100644 index 00000000..55476d20 --- /dev/null +++ b/vmm/ui/README.md @@ -0,0 +1,26 @@ +# dstack Console UI + +This directory contains the source for the Vue-based VM management console. + +## Usage + +```bash +# Install dev dependencies (installs protobufjs CLI) +npm install + +# Build the console once +npm run build + +# Build continuously (writes console_v1 on changes) +npm run watch +``` + +The build step generates a single-file HTML artifact at `../src/console_v1.html` +which is served by `dstack-vmm` under `/` and `/v1`. The previous +`console_v0.html` remains untouched so the legacy UI stays available under `/v0`. + +The UI codebase is written in TypeScript. The build pipeline performs three steps: + +1. `scripts/build_proto.sh` (borrowed from `phala-blockchain`) uses `pbjs/pbts` to regenerate static JS bindings for `vmm_rpc.proto`. +2. `tsc` transpiles `src/**/*.ts` into `build/ts/`. +3. `build.mjs` bundles the transpiled output together with the runtime assets into a single HTML page `console_v1.html`. diff --git a/vmm/ui/build.mjs b/vmm/ui/build.mjs new file mode 100644 index 00000000..b1193a45 --- /dev/null +++ b/vmm/ui/build.mjs @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +import fs from 'fs/promises'; +import path from 'path'; +import { spawn } from 'child_process'; +import { createRequire } from 'module'; + +const ROOT = path.resolve(new URL('.', import.meta.url).pathname); +const SOURCE_DIR = path.join(ROOT, 'src'); +const TS_OUT_DIR = path.join(ROOT, 'build', 'ts'); +const DIST_DIR = path.join(ROOT, 'dist'); +const ENTRY = 'main.js'; +const PBJS = path.join(ROOT, 'node_modules', '.bin', process.platform === 'win32' ? 'pbjs.cmd' : 'pbjs'); +const PBTS = path.join(ROOT, 'node_modules', '.bin', process.platform === 'win32' ? 'pbts.cmd' : 'pbts'); +const TSC = path.join(ROOT, 'node_modules', '.bin', process.platform === 'win32' ? 'tsc.cmd' : 'tsc'); +let MODULE_DIR = TS_OUT_DIR; +const nodeRequire = createRequire(path.join(ROOT, 'package.json')); + +function canonicalId(absPath) { + return path.relative(ROOT, absPath).split(path.sep).join('/'); +} + +function resolveModule(parentId, request) { + const base = parentId ? path.dirname(path.resolve(ROOT, parentId)) : MODULE_DIR; + const absPath = nodeRequire.resolve(request, { paths: [base] }); + return canonicalId(absPath); +} + +async function readFileCached(filePath) { + return fs.readFile(filePath, 'utf-8'); +} + +async function collectModules(entryId) { + const modules = new Map(); + + async function processModule(moduleId) { + if (modules.has(moduleId)) { + return; + } + const absPath = path.resolve(ROOT, moduleId); + const ext = path.extname(absPath); + if (ext === '.html') { + const content = await readFileCached(absPath); + modules.set(moduleId, { + type: 'raw', + code: `module.exports = ${JSON.stringify(content)};`, + dependencyMap: {}, + }); + return; + } + if (ext === '.json') { + const content = await readFileCached(absPath); + modules.set(moduleId, { + type: 'raw', + code: `module.exports = ${content};`, + dependencyMap: {}, + }); + return; + } + if (ext !== '.js' && ext !== '.cjs' && ext !== '.mjs') { + throw new Error(`Unsupported module extension: ${absPath}`); + } + const source = await readFileCached(absPath); + const dependencyMap = {}; + const requireRegex = /require\(['"](.+?)['"]\)/g; + let match; + while ((match = requireRegex.exec(source)) !== null) { + const lineStart = source.lastIndexOf('\n', match.index) + 1; + const trimmed = source.slice(lineStart, match.index).trim(); + if (trimmed.startsWith('//') || trimmed.startsWith('*')) { + continue; + } + const request = match[1]; + const resolved = resolveModule(moduleId, request); + dependencyMap[request] = resolved; + } + const dependencies = Array.from(new Set(Object.values(dependencyMap))); + modules.set(moduleId, { + type: 'js', + code: source, + dependencyMap, + dependencies, + }); + for (const dep of dependencies) { + await processModule(dep); + } + } + + await processModule(entryId); + return modules; +} + +function createBundle(modules, entryId) { + const moduleEntries = []; + for (const [id, info] of modules.entries()) { + const deps = JSON.stringify(info.dependencyMap || {}); + moduleEntries.push( + `'${id}': { factory: function(module, exports, require) {\n${info.code}\n}, map: ${deps} }`, + ); + } + return `(function(){\n const modules = {\n${moduleEntries.join(',\n')}\n };\n const cache = {};\n function load(id) {\n if (cache[id]) {\n return cache[id];\n }\n const entry = modules[id];\n if (!entry) {\n throw new Error('Unknown module ' + id);\n }\n const module = { exports: {} };\n cache[id] = module.exports;\n entry.factory(module, module.exports, createRequire(id));\n cache[id] = module.exports;\n return cache[id];\n }\n function createRequire(parentId) {\n return function(request) {\n const parent = modules[parentId];\n if (!parent) {\n throw new Error('Unknown parent module ' + parentId);\n }\n const resolved = parent.map && parent.map[request];\n if (!resolved) {\n throw new Error('Cannot resolve module ' + request + ' from ' + parentId);\n }\n return load(resolved);\n };\n }\n load('${entryId}');\n})();`; +} + +async function inlineStyles(html, baseDir) { + const linkRegex = /(?:\s*<\/link>)?/gi; + let result = html; + let match; + while ((match = linkRegex.exec(html)) !== null) { + const href = match[1]; + const cssPath = path.resolve(baseDir, href); + const cssContent = await fs.readFile(cssPath, 'utf-8'); + const styleTag = ``; + result = result.replace(match[0], styleTag); + } + return result; +} + +async function inlineScripts(html, scripts) { + let result = html; + for (const { placeholder, code } of scripts) { + result = result.replace(placeholder, ``); + } + return result; +} + +async function run(command, args) { + await new Promise((resolve, reject) => { + const proc = spawn(command, args, { stdio: 'inherit' }); + proc.on('close', (code) => { + if (code === 0) { + resolve(); + } else { + reject(new Error(`${command} exited with code ${code}`)); + } + }); + }); +} + +async function copyDir(src, dest) { + const entries = await fs.readdir(src, { withFileTypes: true }); + await fs.mkdir(dest, { recursive: true }); + await Promise.all( + entries.map((entry) => { + const srcPath = path.join(src, entry.name); + const destPath = path.join(dest, entry.name); + if (entry.isDirectory()) { + return copyDir(srcPath, destPath); + } + return fs.copyFile(srcPath, destPath); + }), + ); +} + +async function compileProto() { + await run('bash', [path.join(ROOT, 'scripts', 'build_proto.sh')]); +} + +async function compileTypeScript() { + await fs.rm(TS_OUT_DIR, { recursive: true, force: true }); + await run(TSC, ['--project', path.join(ROOT, 'tsconfig.json')]); + await copyDir(path.join(SOURCE_DIR, 'templates'), path.join(TS_OUT_DIR, 'templates')); +} + +async function build({ watch = false } = {}) { + await fs.mkdir(DIST_DIR, { recursive: true }); + MODULE_DIR = TS_OUT_DIR; + + await compileProto(); + await compileTypeScript(); + + const entryId = canonicalId(path.resolve(MODULE_DIR, ENTRY)); + const modules = await collectModules(entryId); + const bundle = createBundle(modules, entryId); + + const indexPath = path.join(SOURCE_DIR, 'index.html'); + let html = await fs.readFile(indexPath, 'utf-8'); + html = await inlineStyles(html, SOURCE_DIR); + + const vuePlaceholder = /<\/script>/i; + const vuePath = path.join(ROOT, 'vendor/vue.global.prod.js'); + let vueInlined = false; + try { + const vueCode = await fs.readFile(vuePath, 'utf-8'); + html = html.replace(vuePlaceholder, ``); + vueInlined = true; + } catch { + console.warn('Warning: vendor/vue.global.prod.js not found – using CDN fallback.'); + } + if (!vueInlined) { + html = html.replace( + vuePlaceholder, + '', + ); + } + + html = await inlineScripts(html, [ + { + placeholder: '', + code: bundle, + }, + ]); + + const distFile = path.join(DIST_DIR, 'index.html'); + await fs.writeFile(distFile, html); + + const targetFile = path.resolve(ROOT, '../src/console_v1.html'); + await fs.writeFile(targetFile, html); + + if (watch) { + console.log('Watching for changes...'); + const watcher = fs.watch(SOURCE_DIR, { recursive: true }, async () => { + try { + await compileProto(); + await compileTypeScript(); + const mods = await collectModules(entryId); + const rebundle = createBundle(mods, entryId); + let rehtml = await fs.readFile(indexPath, 'utf-8'); + rehtml = await inlineStyles(rehtml, SOURCE_DIR); + let vueEmbedded = false; + try { + const vueCode = await fs.readFile(vuePath, 'utf-8'); + rehtml = rehtml.replace(vuePlaceholder, ``); + vueEmbedded = true; + } catch { + console.warn('Warning: vendor/vue.global.prod.js not found – using CDN fallback.'); + } + if (!vueEmbedded) { + rehtml = rehtml.replace( + vuePlaceholder, + '', + ); + } + rehtml = await inlineScripts(rehtml, [ + { + placeholder: '', + code: rebundle, + }, + ]); + await fs.writeFile(distFile, rehtml); + const spdxHeader = '\n'; + await fs.writeFile(targetFile, spdxHeader + rehtml); + console.log('Rebuilt console'); + } catch (err) { + console.error('Build failed:', err); + } + }); + process.on('SIGINT', () => watcher.close()); + } +} + +const watchMode = process.argv.includes('--watch'); + +build({ watch: watchMode }).catch((error) => { + console.error(error); + process.exit(1); +}); diff --git a/vmm/ui/package-lock.json b/vmm/ui/package-lock.json new file mode 100644 index 00000000..22d204e9 --- /dev/null +++ b/vmm/ui/package-lock.json @@ -0,0 +1,778 @@ +{ + "name": "vmm-ui", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "vmm-ui", + "version": "0.1.0", + "devDependencies": { + "@types/node": "^20.11.30", + "protobufjs": "^7.2.4", + "protobufjs-cli": "^1.1.3", + "typescript": "^5.4.5" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@jsdoc/salty": { + "version": "0.2.9", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "lodash": "^4.17.21" + }, + "engines": { + "node": ">=v12.0.0" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@types/linkify-it": { + "version": "5.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/markdown-it": { + "version": "14.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/linkify-it": "^5", + "@types/mdurl": "^2" + } + }, + "node_modules/@types/mdurl": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.24", + "resolved": "https://registry.npmmirror.com/@types/node/-/node-20.19.24.tgz", + "integrity": "sha512-FE5u0ezmi6y9OZEzlJfg37mqqf6ZDSF2V/NLjUyGrR9uTZ7Sb9F7bLNZ03S4XVUNRWGA7Ck4c1kK+YnuWjl+DA==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/bluebird": { + "version": "3.7.2", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/catharsis": { + "version": "0.9.0", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.15" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "dev": true, + "license": "MIT" + }, + "node_modules/deep-is": { + "version": "0.1.4", + "dev": true, + "license": "MIT" + }, + "node_modules/entities": { + "version": "4.5.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/escodegen": { + "version": "1.14.3", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=4.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/escodegen/node_modules/estraverse": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/glob": { + "version": "8.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "dev": true, + "license": "ISC" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "dev": true, + "license": "ISC" + }, + "node_modules/js2xmlparser": { + "version": "4.0.2", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "xmlcreate": "^2.0.4" + } + }, + "node_modules/jsdoc": { + "version": "4.0.5", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@babel/parser": "^7.20.15", + "@jsdoc/salty": "^0.2.1", + "@types/markdown-it": "^14.1.1", + "bluebird": "^3.7.2", + "catharsis": "^0.9.0", + "escape-string-regexp": "^2.0.0", + "js2xmlparser": "^4.0.2", + "klaw": "^3.0.0", + "markdown-it": "^14.1.0", + "markdown-it-anchor": "^8.6.7", + "marked": "^4.0.10", + "mkdirp": "^1.0.4", + "requizzle": "^0.2.3", + "strip-json-comments": "^3.1.0", + "underscore": "~1.13.2" + }, + "bin": { + "jsdoc": "jsdoc.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/klaw": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.9" + } + }, + "node_modules/levn": { + "version": "0.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/linkify-it": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "uc.micro": "^2.0.0" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "dev": true, + "license": "MIT" + }, + "node_modules/long": { + "version": "5.3.2", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/markdown-it": { + "version": "14.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + }, + "bin": { + "markdown-it": "bin/markdown-it.mjs" + } + }, + "node_modules/markdown-it-anchor": { + "version": "8.6.7", + "dev": true, + "license": "Unlicense", + "peerDependencies": { + "@types/markdown-it": "*", + "markdown-it": "*" + } + }, + "node_modules/marked": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/mdurl": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/minimatch": { + "version": "5.1.6", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/once": { + "version": "1.4.0", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prelude-ls": { + "version": "1.1.2", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/protobufjs": { + "version": "7.5.4", + "dev": true, + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/protobufjs-cli": { + "version": "1.1.3", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "chalk": "^4.0.0", + "escodegen": "^1.13.0", + "espree": "^9.0.0", + "estraverse": "^5.1.0", + "glob": "^8.0.0", + "jsdoc": "^4.0.0", + "minimist": "^1.2.0", + "semver": "^7.1.2", + "tmp": "^0.2.1", + "uglify-js": "^3.7.7" + }, + "bin": { + "pbjs": "bin/pbjs", + "pbts": "bin/pbts" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "protobufjs": "^7.0.0" + } + }, + "node_modules/punycode.js": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/requizzle": { + "version": "0.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tmp": { + "version": "0.2.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.14" + } + }, + "node_modules/type-check": { + "version": "0.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmmirror.com/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uc.micro": { + "version": "2.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/underscore": { + "version": "1.13.7", + "dev": true, + "license": "MIT" + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "dev": true, + "license": "ISC" + }, + "node_modules/xmlcreate": { + "version": "2.0.4", + "dev": true, + "license": "Apache-2.0" + } + } +} diff --git a/vmm/ui/package.json b/vmm/ui/package.json new file mode 100644 index 00000000..18165f32 --- /dev/null +++ b/vmm/ui/package.json @@ -0,0 +1,17 @@ +{ + "name": "vmm-ui", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "generate:proto": "bash scripts/build_proto.sh", + "build": "node build.mjs", + "watch": "node build.mjs --watch" + }, + "devDependencies": { + "@types/node": "^20.11.30", + "protobufjs": "^7.2.4", + "protobufjs-cli": "^1.1.3", + "typescript": "^5.4.5" + } +} diff --git a/vmm/ui/scripts/build_proto.sh b/vmm/ui/scripts/build_proto.sh new file mode 100755 index 00000000..a39088db --- /dev/null +++ b/vmm/ui/scripts/build_proto.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# SPDX-FileCopyrightText: © 2024-2025 Phala Network +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +PROTO_DIR="${ROOT}/../rpc/proto" +OUT_DIR="${ROOT}/src/proto" +PBJS="${ROOT}/node_modules/.bin/pbjs" +JSDOC="${ROOT}/node_modules/.bin/jsdoc" +PBTS_CONFIG="${ROOT}/node_modules/protobufjs-cli/lib/tsd-jsdoc.json" + +if [ ! -x "${PBJS}" ] || [ ! -x "${JSDOC}" ]; then + echo "protobufjs CLI not found. Run 'npm install' first." >&2 + exit 1 +fi + +mkdir -p "${OUT_DIR}" + +generate_proto() { + local name="$1" + echo "[proto] Generating ${name} bindings..." + "${PBJS}" --keep-case -w commonjs -t static-module --path "${PROTO_DIR}" "${PROTO_DIR}/${name}.proto" -o "${OUT_DIR}/${name}.js" + tmp_file=$(mktemp) + "${JSDOC}" -c "${PBTS_CONFIG}" -q "module=null&comments=true" "${OUT_DIR}/${name}.js" > "${tmp_file}" + { + echo 'import * as $protobuf from "protobufjs";' + echo 'import Long = require("long");' + cat "${tmp_file}" + } > "${OUT_DIR}/${name}.d.ts" + rm -f "${tmp_file}" +} + +generate_proto "vmm_rpc" +generate_proto "prpc" + +echo "[proto] Done." diff --git a/vmm/ui/src/App.ts b/vmm/ui/src/App.ts new file mode 100644 index 00000000..d1f09ce6 --- /dev/null +++ b/vmm/ui/src/App.ts @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +const EncryptedEnvEditor = require('./components/EncryptedEnvEditor'); +const PortMappingEditor = require('./components/PortMappingEditor'); +const GpuConfigEditor = require('./components/GpuConfigEditor'); +const CreateVmDialog = require('./components/CreateVmDialog'); +const UpdateVmDialog = require('./components/UpdateVmDialog'); +const ForkVmDialog = require('./components/ForkVmDialog'); +const { useVmManager } = require('./composables/useVmManager'); +const template: string = require('./templates/app.html'); + +const AppComponent = { + name: 'DstackConsoleApp', + components: { + 'encrypted-env-editor': EncryptedEnvEditor, + 'port-mapping-editor': PortMappingEditor, + 'gpu-config-editor': GpuConfigEditor, + 'create-vm-dialog': CreateVmDialog, + 'update-vm-dialog': UpdateVmDialog, + 'fork-vm-dialog': ForkVmDialog, + }, + setup() { + return useVmManager(); + }, + template, +}; + +export = AppComponent; diff --git a/vmm/ui/src/components/CreateVmDialog.ts b/vmm/ui/src/components/CreateVmDialog.ts new file mode 100644 index 00000000..21d2fa99 --- /dev/null +++ b/vmm/ui/src/components/CreateVmDialog.ts @@ -0,0 +1,165 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +const EncryptedEnvEditor = require('./EncryptedEnvEditor'); +const PortMappingEditor = require('./PortMappingEditor'); +const GpuConfigEditor = require('./GpuConfigEditor'); + +const CreateVmDialogComponent = { + name: 'CreateVmDialog', + components: { + 'encrypted-env-editor': EncryptedEnvEditor, + 'port-mapping-editor': PortMappingEditor, + 'gpu-config-editor': GpuConfigEditor, + }, + props: { + visible: { type: Boolean, required: true }, + form: { type: Object, required: true }, + availableImages: { type: Array, required: true }, + availableGpus: { type: Array, required: true }, + allowAttachAllGpus: { type: Boolean, required: true }, + kmsAvailable: { type: Boolean, required: true }, + portMappingEnabled: { type: Boolean, required: true }, + }, + emits: ['close', 'submit', 'load-compose'], + template: /* html */ ` +
+
+

Deploy a new instance

+
+
+
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ +
+ + +
+
+ +
+ +
+ + +
+ Leave as 0 to disable swap. +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ +
+
+ + or paste below + +
+ +
+
+ +
+ + +
+ +
+ + +
+ +
+ +
+ +
+ +
+ + + + + + + + + +
+
+ +
+ + +
+ +
+ +
+ +
+ +
+
+ + +
+
+
+ `, +}; + +export = CreateVmDialogComponent; diff --git a/vmm/ui/src/components/EncryptedEnvEditor.ts b/vmm/ui/src/components/EncryptedEnvEditor.ts new file mode 100644 index 00000000..1a081bfd --- /dev/null +++ b/vmm/ui/src/components/EncryptedEnvEditor.ts @@ -0,0 +1,163 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +declare const FileReader: any; + +type EnvVar = { key: string; value: string }; + +type ComponentInstance = { + envVars: EnvVar[]; + $refs: { envFileInput?: HTMLInputElement }; + $data: { + editMode: 'form' | 'text'; + textContent: string; + }; + parseTextContent(): void; +}; + +const EncryptedEnvEditorComponent = { + name: 'EncryptedEnvEditor', + props: { + envVars: { + type: Array, + required: true, + }, + }, + data() { + return { + editMode: 'form' as 'form' | 'text', + textContent: '', + }; + }, + template: /* html */ ` +
+
+

Encrypted Environment Variables

+
+ + +
+
+ +
+
+

No environment variables yet. Click "Add" to create one.

+
+
+ + + +
+
+ + + +
+
+ +
+ +

Format: KEY=VALUE (one per line). Lines starting with # are ignored.

+
+
+ `, + methods: { + addEnv(this: ComponentInstance) { + this.envVars.push({ key: '', value: '' }); + }, + removeEnv(this: ComponentInstance, index: number) { + this.envVars.splice(index, 1); + }, + triggerFileInput(this: ComponentInstance) { + this.$refs.envFileInput?.click(); + }, + switchToForm(this: ComponentInstance) { + this.parseTextContent(); + this.$data.editMode = 'form'; + }, + switchToText(this: ComponentInstance) { + this.$data.textContent = this.envVars + .map((env) => `${env.key}=${env.value}`) + .join('\n'); + this.$data.editMode = 'text'; + }, + parseTextContent(this: ComponentInstance) { + const content = this.$data.textContent; + if (!content.trim()) { + return; + } + const lines = content.split('\n'); + this.envVars.splice(0, this.envVars.length); + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) { + continue; + } + const equalIndex = trimmed.indexOf('='); + if (equalIndex === -1) { + continue; + } + const key = trimmed.substring(0, equalIndex).trim(); + const value = trimmed.substring(equalIndex + 1).trim(); + if (!key) { + continue; + } + this.envVars.push({ key, value }); + } + }, + loadEnvFromFile(this: ComponentInstance, event: Event) { + const input = event.target as HTMLInputElement | null; + const file = input?.files?.[0]; + if (!file) { + return; + } + const reader = new FileReader(); + reader.onload = (e: { target: { result: string } }) => { + const content = e.target.result || ''; + const lines = content.split('\n'); + this.envVars.splice(0, this.envVars.length); + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) { + continue; + } + const equalIndex = trimmed.indexOf('='); + if (equalIndex === -1) { + continue; + } + const key = trimmed.substring(0, equalIndex).trim(); + const value = trimmed.substring(equalIndex + 1).trim(); + if (!key) { + continue; + } + this.envVars.push({ key, value }); + } + }; + reader.readAsText(file); + if (input) { + input.value = ''; + } + }, + }, +}; + +export = EncryptedEnvEditorComponent; diff --git a/vmm/ui/src/components/ForkVmDialog.ts b/vmm/ui/src/components/ForkVmDialog.ts new file mode 100644 index 00000000..1a5b2d61 --- /dev/null +++ b/vmm/ui/src/components/ForkVmDialog.ts @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +const ForkVmDialogComponent = { + name: 'ForkVmDialog', + props: { + visible: { type: Boolean, required: true }, + dialog: { type: Object, required: true }, + availableImages: { type: Array, required: true }, + }, + emits: ['close', 'submit'], + template: /* html */ ` +
+
+

Derive VM

+

+ This will create a new VM instance with the same app id, but the disk state will NOT migrate to the new instance. +

+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+
+ `, +}; + +export = ForkVmDialogComponent; diff --git a/vmm/ui/src/components/GpuConfigEditor.ts b/vmm/ui/src/components/GpuConfigEditor.ts new file mode 100644 index 00000000..11a142e0 --- /dev/null +++ b/vmm/ui/src/components/GpuConfigEditor.ts @@ -0,0 +1,79 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +declare const Vue: any; +const { computed } = Vue; + +type ComponentInstance = { + availableGpus: Array<{ slot: string; description?: string; is_free?: boolean }>; + gpus: string[]; + attachAll: boolean; +}; + +const GpuConfigEditorComponent = { + name: 'GpuConfigEditor', + props: { + availableGpus: { + type: Array, + required: true, + }, + gpus: { + type: Array, + required: true, + }, + attachAll: { + type: Boolean, + required: true, + }, + allowAttachAll: { + type: Boolean, + required: true, + }, + }, + emits: ['update:gpus', 'update:attachAll'], + setup(props: any, { emit }: any) { + const selectedGpus = computed({ + get: () => props.gpus, + set: (value: string[]) => emit('update:gpus', value), + }); + + const attachAllComputed = computed({ + get: () => props.attachAll, + set: (value: boolean) => emit('update:attachAll', value), + }); + + return { + selectedGpus, + attachAllComputed, + }; + }, + template: /* html */ ` +
+ +
+ +
+
+
+ Select GPUs to attach: +
+
+
+ +
+
+
+
+ All NVIDIA GPUs and NVSwitches will be attached to the VM +
+
+ `, +}; + +export = GpuConfigEditorComponent; diff --git a/vmm/ui/src/components/PortMappingEditor.ts b/vmm/ui/src/components/PortMappingEditor.ts new file mode 100644 index 00000000..8658b8bb --- /dev/null +++ b/vmm/ui/src/components/PortMappingEditor.ts @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +type PortEntry = { + protocol: string; + host_address: string; + host_port: number | null; + vm_port: number | null; +}; + +type ComponentInstance = { + ports: PortEntry[]; +}; + +const PortMappingEditorComponent = { + name: 'PortMappingEditor', + props: { + ports: { + type: Array, + required: true, + }, + }, + template: /* html */ ` +
+ +
+ + + + + +
+ +
+ `, + methods: { + addPort(this: ComponentInstance) { + this.ports.push({ + protocol: 'tcp', + host_address: '127.0.0.1', + host_port: null, + vm_port: null, + }); + }, + removePort(this: ComponentInstance, index: number) { + this.ports.splice(index, 1); + }, + }, +}; + +export = PortMappingEditorComponent; diff --git a/vmm/ui/src/components/UpdateVmDialog.ts b/vmm/ui/src/components/UpdateVmDialog.ts new file mode 100644 index 00000000..f1c499ec --- /dev/null +++ b/vmm/ui/src/components/UpdateVmDialog.ts @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +const EncryptedEnvEditor = require('./EncryptedEnvEditor'); +const PortMappingEditor = require('./PortMappingEditor'); +const GpuConfigEditor = require('./GpuConfigEditor'); + +const UpdateVmDialogComponent = { + name: 'UpdateVmDialog', + components: { + 'encrypted-env-editor': EncryptedEnvEditor, + 'port-mapping-editor': PortMappingEditor, + 'gpu-config-editor': GpuConfigEditor, + }, + props: { + visible: { type: Boolean, required: true }, + dialog: { type: Object, required: true }, + availableImages: { type: Array, required: true }, + availableGpus: { type: Array, required: true }, + allowAttachAllGpus: { type: Boolean, required: true }, + portMappingEnabled: { type: Boolean, required: true }, + kmsEnabled: { type: Boolean, required: true }, + composeHashPreview: { type: String, required: true }, + }, + emits: ['close', 'submit', 'load-compose'], + template: /* html */ ` +
+
+

Update VM Config

+ +
+
+ + +
+
+ +
+ + +
+
+
+ +
+ +
+ + +
+ Enable "Update compose" to change swap size. +
+ +
+ + +
+ +
+
+ + +
+ +
+ +
+ +
+
+ +
+
+ + or paste below + +
+ +
+
+
+ + +
+
+ Compose Hash: 0x{{ composeHashPreview }} +
+
+ +
+ +
+
+
+ +
+
+
+ +
+
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ + +
+
+ `, +}; + +export = UpdateVmDialogComponent; diff --git a/vmm/ui/src/composables/useVmManager.ts b/vmm/ui/src/composables/useVmManager.ts new file mode 100644 index 00000000..ac598136 --- /dev/null +++ b/vmm/ui/src/composables/useVmManager.ts @@ -0,0 +1,1534 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +declare const Vue: any; +const { ref, computed, watch, onMounted } = Vue; +import type { vmm as VmmTypes } from '../proto/vmm_rpc'; + +// Types based on Rust definitions +type VmConfiguration = VmmTypes.IVmConfiguration; + +type AppCompose = { + manifest_version: number; + name: string; + features: string[]; + runner: string; + docker_compose_file?: string; + public_logs: boolean; + public_sysinfo: boolean; + public_tcbinfo: boolean; + kms_enabled: boolean; + gateway_enabled: boolean; + tproxy_enabled?: boolean; + local_key_provider_enabled: boolean; + key_provider?: KeyProviderKind; + key_provider_id: string; + allowed_envs: string[]; + no_instance_id: boolean; + secure_time: boolean; + storage_fs?: string; + swap_size: number; + launch_token_hash?: string; + pre_launch_script?: string; +}; + +type KeyProviderKind = 'none' | 'kms' | 'local'; + +const x25519 = require('../lib/x25519.js'); +const { getVmmRpcClient } = require('../lib/vmmRpcClient'); + +const vmmRpc = getVmmRpcClient(); + +// System menu state +const systemMenu = ref({ + show: false, +}); +const devMode = ref(localStorage.getItem('devMode') === 'true'); + +type MemoryUnit = 'MB' | 'GB'; + +type JsonRpcCall = (method: string, params?: Record) => Promise; +type Ref = { value: T }; + +type VmListItem = { + id: string; + name: string; + app_id: string; + status: string; + app_url?: string; + uptime?: string; + boot_progress?: string; + shutdown_progress?: string; + image_version?: string; + configuration?: VmConfiguration; + appCompose?: AppCompose; +}; + +type EncryptedEnvEntry = { + key: string; + value: string; +}; + +type PortFormEntry = { + protocol: string; + host_address?: string; + host_port?: number | null; + vm_port?: number | null; +}; + +type VmFormState = { + name: string; + image: string; + dockerComposeFile: string; + preLaunchScript: string; + vcpu: number; + memory: number; + memoryValue: number; + memoryUnit: MemoryUnit; + swap_size: number; + swapValue: number; + swapUnit: MemoryUnit; + disk_size: number; + selectedGpus: string[]; + attachAllGpus: boolean; + ports: PortFormEntry[]; + encryptedEnvs: EncryptedEnvEntry[]; + storage_fs: string; + app_id: string | null; + kms_enabled: boolean; + local_key_provider_enabled: boolean; + key_provider_id: string; + gateway_enabled: boolean; + public_logs: boolean; + public_sysinfo: boolean; + public_tcbinfo: boolean; + no_tee: boolean; + pin_numa: boolean; + hugepages: boolean; + user_config: string; + kms_urls: string[]; + gateway_urls: string[]; + stopped: boolean; +}; + +type UpdateDialogState = { + show: boolean; + vm: VmListItem | null; + updateCompose: boolean; + dockerComposeFile: string; + preLaunchScript: string; + encryptedEnvs: EncryptedEnvEntry[]; + resetSecrets: boolean; + vcpu: number; + memory: number; + memoryValue: number; + memoryUnit: MemoryUnit; + swap_size: number; + swapValue: number; + swapUnit: MemoryUnit; + disk_size: number; + image: string; + ports: PortFormEntry[]; + attachAllGpus: boolean; + selectedGpus: string[]; + updateGpuConfig: boolean; + user_config: string; +}; + +type CloneConfigDialogState = { + show: boolean; + name: string; + compose_file: string; + image: string; + vcpu: number; + memory: number; + disk_size: number; + ports: PortFormEntry[]; + user_config: string; + gpus?: VmmTypes.IGpuConfig; + kms_urls?: string[]; + gateway_urls?: string[]; + hugepages: boolean; + pin_numa: boolean; + no_tee: boolean; + encrypted_env?: Uint8Array; + app_id?: string; + stopped: boolean; +}; + +function createVmFormState(preLaunchScript: string): VmFormState { + return { + name: '', + image: '', + dockerComposeFile: '', + preLaunchScript, + vcpu: 1, + memory: 2048, + memoryValue: 2, + memoryUnit: 'GB', + swap_size: 0, + swapValue: 0, + swapUnit: 'GB', + disk_size: 20, + selectedGpus: [], + attachAllGpus: false, + ports: [], + encryptedEnvs: [], + storage_fs: '', + app_id: null, + kms_enabled: true, + local_key_provider_enabled: false, + key_provider_id: '', + gateway_enabled: true, + public_logs: true, + public_sysinfo: true, + public_tcbinfo: true, + no_tee: false, + pin_numa: false, + hugepages: false, + user_config: '', + kms_urls: [], + gateway_urls: [], + stopped: false, + }; +} + +function createUpdateDialogState(): UpdateDialogState { + return { + show: false, + vm: null, + updateCompose: false, + dockerComposeFile: '', + preLaunchScript: '', + encryptedEnvs: [], + resetSecrets: false, + vcpu: 0, + memory: 0, + memoryValue: 0, + memoryUnit: 'MB', + swap_size: 0, + swapValue: 0, + swapUnit: 'GB', + disk_size: 0, + image: '', + ports: [], + attachAllGpus: false, + selectedGpus: [], + updateGpuConfig: false, + user_config: '', + }; +} + +function createCloneConfigDialogState(): CloneConfigDialogState { + return { + show: false, + name: '', + compose_file: '', + image: '', + vcpu: 0, + memory: 0, + disk_size: 0, + ports: [], + user_config: '', + gpus: undefined, + kms_urls: undefined, + gateway_urls: undefined, + hugepages: false, + pin_numa: false, + no_tee: false, + encrypted_env: undefined, + app_id: undefined, + stopped: false, + }; +} + +function useVmManager() { + const version = ref({ version: '-', commit: '' }); + const vms = ref([] as VmListItem[]); + const expandedVMs = ref(new Set() as Set); + const networkInfo = ref({} as Record); + const searchQuery = ref(''); + const currentPage = ref(1); + const pageInput = ref(1); + const pageSize = ref(Number.parseInt(localStorage.getItem('pageSize') || '50', 10)); + const totalVMs = ref(0); + const hasMorePages = ref(false); + const loadingVMDetails = ref(false); + const maxPage = computed(() => Math.ceil(totalVMs.value / pageSize.value) || 1); + + const preLaunchScript = ` +EXPECTED_TOKEN_HASH=$(jq -j .launch_token_hash app-compose.json) +if [ "$EXPECTED_TOKEN_HASH" == "null" ]; then + echo "Skipped APP_LAUNCH_TOKEN check" +else + ACTUAL_TOKEN_HASH=$(echo -n "$APP_LAUNCH_TOKEN" | sha256sum | cut -d' ' -f1) + if [ "$EXPECTED_TOKEN_HASH" != "$ACTUAL_TOKEN_HASH" ]; then + echo "Error: Incorrect APP_LAUNCH_TOKEN, please make sure set the correct APP_LAUNCH_TOKEN in env" + reboot + exit 1 + else + echo "APP_LAUNCH_TOKEN checked OK" + fi +fi +`; + + const vmForm: Ref = ref(createVmFormState(preLaunchScript)); + + const availableImages = ref([] as Array<{ name: string; version?: string }>); + const availableGpus = ref([] as Array); + const availableGpuProducts = ref([] as Array); + const allowAttachAllGpus = ref(false); + + const updateDialog: Ref = ref(createUpdateDialogState()); + + const updateMessage = ref(''); + const successMessage = ref(''); + const errorMessage = ref(''); + + const cloneConfigDialog: Ref = ref(createCloneConfigDialogState()); + + const showCreateDialog = ref(false); + const config = ref({ portMappingEnabled: false }); + const composeHashPreview = ref(''); + const updateComposeHashPreview = ref(''); + + const BYTES_PER_MB = 1024 * 1024; + + function convertMemoryToMB(value: number, unit: string) { + if (!Number.isFinite(value) || value < 0) { + return 0; + } + if (unit === 'GB') { + return value * 1024; + } + return value; + } + + function convertSwapToBytes(value: number, unit: string) { + const mb = convertMemoryToMB(value, unit); + if (!Number.isFinite(mb) || mb <= 0) { + return 0; + } + return Math.max(0, Math.round(mb * BYTES_PER_MB)); + } + + function bytesToMB(bytes: number) { + if (!bytes) { + return 0; + } + return bytes / BYTES_PER_MB; + } + + function hexToBytes(hex: string) { + if (!hex) { + return new Uint8Array(); + } + const normalized = hex.startsWith('0x') ? hex.slice(2) : hex; + const length = Math.floor(normalized.length / 2); + const result = new Uint8Array(length); + for (let i = 0; i < length; i += 1) { + const byte = normalized.slice(i * 2, i * 2 + 2); + result[i] = Number.parseInt(byte, 16); + } + return result; + } + + const clonePortMappings = (ports: VmmTypes.IPortMapping[] = []): PortFormEntry[] => + ports.map((port) => ({ + protocol: port.protocol || 'tcp', + host_address: port.host_address || '127.0.0.1', + host_port: typeof port.host_port === 'number' ? port.host_port : null, + vm_port: typeof port.vm_port === 'number' ? port.vm_port : null, + })); + + const normalizePorts = (ports: PortFormEntry[] = []): VmmTypes.IPortMapping[] => + ports + .map((port) => { + const protocol = (port.protocol || '').trim(); + const hostPort = + port.host_port === null || port.host_port === undefined ? Number.NaN : Number(port.host_port); + const vmPort = + port.vm_port === null || port.vm_port === undefined ? Number.NaN : Number(port.vm_port); + return { + protocol, + host_address: (port.host_address || '127.0.0.1').trim() || '127.0.0.1', + host_port: hostPort, + vm_port: vmPort, + }; + }) + .filter( + (port) => + port.protocol.length > 0 && + Number.isFinite(port.host_port) && + Number.isFinite(port.vm_port), + ) + .map((port) => ({ + protocol: port.protocol, + host_address: port.host_address, + host_port: port.host_port, + vm_port: port.vm_port, + })); + + function deriveGpuSelection(gpuConfig?: VmmTypes.IGpuConfig) { + if (!gpuConfig) { + return { attachAll: false, selected: [] as string[] }; + } + if (gpuConfig.attach_mode === 'all') { + return { attachAll: true, selected: [] as string[] }; + } + return { + attachAll: false, + selected: (gpuConfig.gpus || []).map((gpu) => gpu.slot).filter(Boolean) as string[], + }; + } + + function recordError(context: string, err: unknown) { + console.error(context, err); + if (err instanceof Error && err.message) { + errorMessage.value = err.message; + } else { + errorMessage.value = String(err); + } + } + + function configGpu(form: { attachAllGpus: boolean; selectedGpus: string[] }, isUpdate: boolean = false): VmmTypes.IGpuConfig | undefined { + if (form.attachAllGpus) { + return { attach_mode: 'all' }; + } + // For updates, always return a config when GPUs are being explicitly updated + // Empty array means no GPUs should be attached + if (isUpdate) { + return { + attach_mode: 'listed', + gpus: (form.selectedGpus || []).map((slot: string) => ({ slot })), + }; + } + // For creation, return undefined if no GPUs are selected + if (form.selectedGpus && form.selectedGpus.length > 0) { + return { + attach_mode: 'listed', + gpus: form.selectedGpus.map((slot: string) => ({ slot })), + }; + } + return undefined; + } + +type CreateVmPayloadSource = { + name: string; + image: string; + compose_file: string; + vcpu: number; + memory: number; + disk_size: number; + ports: PortFormEntry[]; + encrypted_env?: Uint8Array; + app_id?: string | null; + user_config?: string; + hugepages?: boolean; + pin_numa?: boolean; + no_tee?: boolean; + gpus?: VmmTypes.IGpuConfig; + kms_urls?: string[]; + gateway_urls?: string[]; + stopped?: boolean; + }; + + function buildCreateVmPayload(source: CreateVmPayloadSource): VmmTypes.IVmConfiguration { + const normalizedPorts = normalizePorts(source.ports); + return { + name: source.name.trim(), + image: source.image.trim(), + compose_file: source.compose_file, + vcpu: Math.max(1, Number(source.vcpu) || 1), + memory: Math.max(0, Number(source.memory) || 0), + disk_size: Math.max(0, Number(source.disk_size) || 0), + ports: normalizedPorts, + encrypted_env: source.encrypted_env, + app_id: source.app_id || undefined, + user_config: source.user_config || '', + hugepages: !!source.hugepages, + pin_numa: !!source.pin_numa, + no_tee: source.no_tee ?? false, + gpus: source.gpus, + kms_urls: source.kms_urls?.filter((url) => url && url.trim().length) ?? [], + gateway_urls: source.gateway_urls?.filter((url) => url && url.trim().length) ?? [], + stopped: !!source.stopped, + }; + } + + const autoMemoryDisplay = (mb: number): { memoryValue: number; memoryUnit: MemoryUnit } => { + if (mb >= 1024) { + return { + memoryValue: Number((mb / 1024).toFixed(1)), + memoryUnit: 'GB', + }; + } + return { + memoryValue: mb, + memoryUnit: 'MB', + }; + }; + + watch([() => vmForm.value.memoryValue, () => vmForm.value.memoryUnit], () => { + vmForm.value.memory = convertMemoryToMB(vmForm.value.memoryValue, vmForm.value.memoryUnit); + }); + + watch([() => vmForm.value.swapValue, () => vmForm.value.swapUnit], () => { + vmForm.value.swap_size = convertSwapToBytes(vmForm.value.swapValue, vmForm.value.swapUnit); + }); + + watch([() => updateDialog.value.memoryValue, () => updateDialog.value.memoryUnit], () => { + updateDialog.value.memory = convertMemoryToMB(updateDialog.value.memoryValue, updateDialog.value.memoryUnit); + }); + + watch([() => updateDialog.value.swapValue, () => updateDialog.value.swapUnit], () => { + updateDialog.value.swap_size = convertSwapToBytes(updateDialog.value.swapValue, updateDialog.value.swapUnit); + }); + + function makeBaseUrl(pathname: string) { + return `${pathname}?json`; + } + + async function baseRpcCall(pathname: string, params: Record = {}) { + const response = await fetch(makeBaseUrl(pathname), { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(params), + }); + if (!response.ok) { + const error = await response.text(); + errorMessage.value = error; + throw new Error(error); + } + return response; + } + + const guestRpcCall: JsonRpcCall = (method, params) => baseRpcCall(`/guest/${method}`, params); + + async function loadVMList() { + try { + const request: VmmTypes.IStatusRequest = { + brief: true, + keyword: searchQuery.value || undefined, + page: currentPage.value, + page_size: pageSize.value, + }; + const data = await vmmRpc.status(request); + totalVMs.value = data.total || data.vms.length; + hasMorePages.value = data.vms.length === pageSize.value && totalVMs.value > currentPage.value * pageSize.value; + + const previousVmMap = new Map(vms.value.map((vmItem: VmListItem) => [vmItem.id, vmItem])); + vms.value = (data.vms as VmListItem[]).map((vm) => { + const previousVm = previousVmMap.get(vm.id); + if (previousVm) { + return { + ...vm, + configuration: previousVm.configuration, + appCompose: previousVm.appCompose, + }; + } + return vm; + }); + + config.value = { portMappingEnabled: data.port_mapping_enabled }; + + if (expandedVMs.value.size > 0) { + await refreshExpandedVMs(); + } + } catch (error) { + recordError('error loading vm list', error); + } + } + + async function refreshExpandedVMs() { + try { + for (const vmId of Array.from(expandedVMs.value.values()) as string[]) { + await loadVMDetails(vmId); + } + } catch (error) { + recordError('Error refreshing expanded VMs', error); + } + } + + async function loadVMDetails(vmId: string) { + loadingVMDetails.value = true; + try { + const data = await vmmRpc.status({ + brief: false, + ids: [vmId], + }); + if (data.vms && data.vms.length > 0) { + const detailedVM: any = data.vms[0]; + const appCompose = (() => { + try { + return JSON.parse(detailedVM.configuration?.compose_file || '{}'); + } catch (err) { + console.error('Error parsing app config:', err); + return {}; + } + })(); + const index = vms.value.findIndex((vmItem) => vmItem.id === vmId); + if (index !== -1) { + vms.value[index] = { ...detailedVM, appCompose }; + } + } + } catch (error) { + recordError(`Error loading details for VM ${vmId}`, error); + } finally { + loadingVMDetails.value = false; + } + } + + async function ensureVmDetails(vm: VmListItem): Promise { + if (vm.configuration?.compose_file && vm.appCompose) { + return vm; + } + await loadVMDetails(vm.id); + return vms.value.find((item) => item.id === vm.id) || null; + } + + async function loadImages() { + try { + const data = await vmmRpc.listImages({}); + availableImages.value = data.images || []; + } catch (error) { + recordError('error loading images', error); + } + } + + async function loadGpus() { + try { + const data = await vmmRpc.listGpus({}); + const gpus = data.gpus || []; + availableGpus.value = gpus; + availableGpuProducts.value = []; + allowAttachAllGpus.value = data.allow_attach_all; + for (const gpu of gpus) { + if (!availableGpuProducts.value.find((product) => product.product_id === gpu.product_id)) { + availableGpuProducts.value.push(gpu); + } + } + } catch (error) { + recordError('error loading GPUs', error); + } + } + + async function loadVersion() { + const data = await vmmRpc.version({}); + version.value = data; + } + + const imageVersion = (imageName: string) => { + const image = availableImages.value.find((img) => img.name === imageName); + return image?.version; + }; + + const verGE = (versionStr: string, otherVersionStr: string) => { + const versionParts = versionStr.split('.').map(Number); + const otherParts = otherVersionStr.split('.').map(Number); + return ( + versionParts[0] > otherParts[0] || + (versionParts[0] === otherParts[0] && versionParts[1] > otherParts[1]) || + (versionParts[0] === otherParts[0] && versionParts[1] === otherParts[1] && versionParts[2] >= otherParts[2]) + ); + }; + + const imageVersionFeatures = (versionStr: string | undefined) => { + const features = { + progress: false, + graceful_shutdown: false, + network_info: false, + compose_version: 1, + }; + if (!versionStr) { + return features; + } + if (verGE(versionStr, '0.3.3')) { + features.progress = true; + features.graceful_shutdown = true; + features.network_info = true; + features.compose_version = 2; + } + if (verGE(versionStr, '0.4.2')) { + features.compose_version = 3; + } + return features; + }; + + const imageFeatures = (vm: VmListItem) => imageVersionFeatures(vm.image_version); + + const vmStatus = (vm: VmListItem) => { + const features = imageFeatures(vm); + if (!features.progress) { + return vm.status; + } + if (vm.status !== 'running') { + return vm.status; + } + if (vm.shutdown_progress) { + return 'shutting down'; + } + if (vm.boot_progress === 'running') { + return 'running'; + } + if (vm.boot_progress !== 'done') { + return 'booting'; + } + return 'running'; + }; + + const kmsEnabled = (vm: any) => vm.appCompose?.kms_enabled || vm.appCompose?.features?.includes('kms'); + + const gatewayEnabled = (vm: any) => + vm.appCompose?.gateway_enabled || vm.appCompose?.tproxy_enabled || vm.appCompose?.features?.includes('tproxy-net'); + + const defaultTrue = (v: boolean | undefined) => (v === undefined ? true : v); + + function formatMemory(memoryMB?: number) { + if (!memoryMB) { + return '0 MB'; + } + if (memoryMB >= 1024) { + const gbValue = (memoryMB / 1024).toFixed(1); + return `${parseFloat(gbValue)} GB`; + } + return `${memoryMB} MB`; + } + + async function calcComposeHash(appCompose: string) { + const buffer = new TextEncoder().encode(appCompose); + const hashBuffer = await crypto.subtle.digest('SHA-256', buffer); + return Array.from(new Uint8Array(hashBuffer)) + .map((b) => b.toString(16).padStart(2, '0')) + .join(''); + } + + async function makeAppComposeFile() { + const appCompose: Record = { + manifest_version: 2, + name: vmForm.value.name, + runner: 'docker-compose', + docker_compose_file: vmForm.value.dockerComposeFile, + kms_enabled: vmForm.value.kms_enabled, + gateway_enabled: vmForm.value.gateway_enabled, + public_logs: vmForm.value.public_logs, + public_sysinfo: vmForm.value.public_sysinfo, + public_tcbinfo: vmForm.value.public_tcbinfo, + local_key_provider_enabled: vmForm.value.local_key_provider_enabled, + key_provider_id: vmForm.value.key_provider_id, + allowed_envs: vmForm.value.encryptedEnvs.map((env) => env.key), + no_instance_id: !vmForm.value.gateway_enabled, + secure_time: false, + }; + + if (vmForm.value.storage_fs) { + appCompose.storage_fs = vmForm.value.storage_fs; + } + + if (vmForm.value.preLaunchScript?.trim()) { + appCompose.pre_launch_script = vmForm.value.preLaunchScript; + } + + const swapBytes = Math.max(0, Math.round(vmForm.value.swap_size || 0)); + if (swapBytes > 0) { + appCompose.swap_size = swapBytes; + } + + const launchToken = vmForm.value.encryptedEnvs.find((env) => env.key === 'APP_LAUNCH_TOKEN'); + if (launchToken) { + appCompose.launch_token_hash = await calcComposeHash(launchToken.value); + } + + const imgFeatures = imageVersionFeatures(imageVersion(vmForm.value.image)); + if (imgFeatures.compose_version < 2) { + const features: string[] = []; + if (vmForm.value.kms_enabled) features.push('kms'); + if (vmForm.value.gateway_enabled) features.push('tproxy-net'); + appCompose.features = features; + appCompose.manifest_version = 1; + appCompose.version = '1.0.0'; + } + if (imgFeatures.compose_version < 3) { + appCompose.tproxy_enabled = appCompose.gateway_enabled; + delete appCompose.gateway_enabled; + } + return JSON.stringify(appCompose); + } + + async function makeUpdateComposeFile() { + const currentAppCompose = updateDialog.value.vm.appCompose; + const appCompose = { + ...currentAppCompose, + docker_compose_file: updateDialog.value.dockerComposeFile || currentAppCompose.docker_compose_file, + }; + if (updateDialog.value.resetSecrets) { + // Update allowed_envs with the new environment variable keys + appCompose.allowed_envs = updateDialog.value.encryptedEnvs.map(env => env.key); + + const launchToken = updateDialog.value.encryptedEnvs.find((env) => env.key === 'APP_LAUNCH_TOKEN'); + if (launchToken) { + appCompose.launch_token_hash = await calcComposeHash(launchToken.value); + } + } + appCompose.pre_launch_script = updateDialog.value.preLaunchScript?.trim(); + + const swapBytes = Math.max(0, Math.round(updateDialog.value.swap_size || 0)); + if (swapBytes > 0) { + appCompose.swap_size = swapBytes; + } else { + delete appCompose.swap_size; + } + return JSON.stringify(appCompose); + } + + watch( + [ + () => vmForm.value.name, + () => vmForm.value.dockerComposeFile, + () => vmForm.value.preLaunchScript, + () => vmForm.value.kms_enabled, + () => vmForm.value.gateway_enabled, + () => vmForm.value.public_logs, + () => vmForm.value.public_sysinfo, + () => vmForm.value.public_tcbinfo, + () => vmForm.value.local_key_provider_enabled, + () => vmForm.value.key_provider_id, + () => vmForm.value.encryptedEnvs, + () => vmForm.value.storage_fs, + ], + async () => { + try { + const appCompose = await makeAppComposeFile(); + composeHashPreview.value = await calcComposeHash(appCompose); + } catch (error) { + composeHashPreview.value = 'Error calculating hash'; + console.error('Failed to calculate compose hash', error); + } + }, + { deep: true }, + ); + + watch( + [ + () => updateDialog.value.dockerComposeFile, + () => updateDialog.value.preLaunchScript, + () => updateDialog.value.encryptedEnvs, + ], + async () => { + if (!updateDialog.value.updateCompose) { + updateComposeHashPreview.value = ''; + return; + } + try { + const upgradedCompose = await makeUpdateComposeFile(); + updateComposeHashPreview.value = await calcComposeHash(upgradedCompose); + } catch (error) { + updateComposeHashPreview.value = 'Error calculating hash'; + console.error('Failed to calculate compose hash', error); + } + }, + { deep: true }, + ); + + watch(pageSize, (newValue) => { + localStorage.setItem('pageSize', String(newValue)); + }); + + function showDeployDialog() { + showCreateDialog.value = true; + vmForm.value.encryptedEnvs = []; + vmForm.value.app_id = null; + vmForm.value.swapValue = 0; + vmForm.value.swapUnit = 'GB'; + vmForm.value.swap_size = 0; + loadGpus(); + } + + async function showUpdateDialog(vm: VmListItem) { + const detailedVm = await ensureVmDetails(vm); + if (!detailedVm?.configuration?.compose_file || !detailedVm.appCompose) { + alert('Compose file not available for this VM. Please expand its details first.'); + return; + } + const config = detailedVm.configuration; + const memoryDisplay = autoMemoryDisplay(config.memory || 0); + const swapDisplay = autoMemoryDisplay(bytesToMB(detailedVm.appCompose?.swap_size || 0)); + const gpuSelection = deriveGpuSelection(config.gpus); + updateDialog.value = { + show: true, + vm: detailedVm, + updateCompose: false, + dockerComposeFile: detailedVm.appCompose.docker_compose_file || '', + preLaunchScript: detailedVm.appCompose.pre_launch_script || '', + encryptedEnvs: [], + resetSecrets: false, + vcpu: config.vcpu || 0, + memory: config.memory || 0, + memoryValue: memoryDisplay.memoryValue, + memoryUnit: memoryDisplay.memoryUnit, + swap_size: detailedVm.appCompose?.swap_size || 0, + swapValue: swapDisplay.memoryValue, + swapUnit: swapDisplay.memoryUnit, + disk_size: config.disk_size || 0, + image: config.image || '', + ports: clonePortMappings(config.ports || []), + attachAllGpus: gpuSelection.attachAll, + selectedGpus: gpuSelection.selected, + updateGpuConfig: false, + user_config: config.user_config || '', + }; + } + + function parseEnvFile(content: string) { + const lines = content + .split('\n') + .map((line) => line.trim()) + .filter((line) => line && !line.startsWith('#')); + const envs: Record = {}; + for (const line of lines) { + const [key, ...parts] = line.split('='); + if (!key || parts.length === 0) { + continue; + } + envs[key.trim()] = parts.join('=').trim(); + } + return envs; + } + + async function calcAppId(compose: string) { + const composeHash = await calcComposeHash(compose); + return composeHash.slice(0, 40); + } + + async function encryptEnv(envs: EncryptedEnvEntry[], kmsEnabled: boolean, appId: string | null) { + if (!kmsEnabled || envs.length === 0) { + return undefined; + } + let appIdToUse = appId; + if (!appIdToUse) { + const appCompose = await makeAppComposeFile(); + appIdToUse = await calcAppId(appCompose); + } + const keyBytes = hexToBytes(appIdToUse); + const response = await vmmRpc.getAppEnvEncryptPubKey({ app_id: keyBytes }); + return encryptEnvWithKey(envs, response.public_key); + } + + async function encryptEnvWithKey(envs: EncryptedEnvEntry[], publicKeyBytes: Uint8Array) { + const envsJson = JSON.stringify({ env: envs }); + const remotePubkey = publicKeyBytes && publicKeyBytes.length ? publicKeyBytes : new Uint8Array(); + + const seed = crypto.getRandomValues(new Uint8Array(32)); + const keyPair = x25519.generateKeyPair(seed); + const shared = x25519.sharedKey(keyPair.private, remotePubkey); + + const importedShared = await crypto.subtle.importKey( + 'raw', + shared, + { name: 'AES-GCM', length: 256 }, + true, + ['encrypt'], + ); + const iv = crypto.getRandomValues(new Uint8Array(12)); + const encrypted = await crypto.subtle.encrypt( + { name: 'AES-GCM', iv }, + importedShared, + new TextEncoder().encode(envsJson), + ); + + const result = new Uint8Array(iv.length + keyPair.public.byteLength + encrypted.byteLength); + result.set(keyPair.public, 0); + result.set(iv, keyPair.public.byteLength); + result.set(new Uint8Array(encrypted), keyPair.public.byteLength + iv.length); + + return result; + } + + async function createVm() { + try { + vmForm.value.memory = convertMemoryToMB(vmForm.value.memoryValue, vmForm.value.memoryUnit); + const composeFile = await makeAppComposeFile(); + const encryptedEnv = await encryptEnv( + vmForm.value.encryptedEnvs, + vmForm.value.kms_enabled, + vmForm.value.app_id, + ); + const payload = buildCreateVmPayload({ + name: vmForm.value.name, + image: vmForm.value.image, + compose_file: composeFile, + vcpu: vmForm.value.vcpu, + memory: vmForm.value.memory, + disk_size: vmForm.value.disk_size, + ports: vmForm.value.ports, + encrypted_env: encryptedEnv || undefined, + app_id: vmForm.value.app_id || undefined, + user_config: vmForm.value.user_config, + hugepages: vmForm.value.hugepages, + pin_numa: vmForm.value.pin_numa, + no_tee: vmForm.value.no_tee, + gpus: configGpu(vmForm.value) || undefined, + kms_urls: vmForm.value.kms_urls, + gateway_urls: vmForm.value.gateway_urls, + stopped: vmForm.value.stopped, + }); + + await vmmRpc.createVm(payload); + leaveCreateDialog(); + loadVMList(); + } catch (error) { + recordError('Error creating VM', error); + alert('Failed to create VM'); + } + } + + function leaveCreateDialog() { + showCreateDialog.value = false; + } + + function loadComposeFile(event: Event) { + const input = event.target as HTMLInputElement | null; + const file = input?.files?.[0]; + if (!file) { + return; + } + const reader = new FileReader(); + reader.onload = (e: any) => { + vmForm.value.dockerComposeFile = e.target.result; + }; + reader.readAsText(file); + if (input) { + input.value = ''; + } + } + + function loadUpdateFile(event: Event) { + const input = event.target as HTMLInputElement | null; + const file = input?.files?.[0]; + if (!file) { + return; + } + const reader = new FileReader(); + reader.onload = (e: any) => { + updateDialog.value.dockerComposeFile = e.target.result; + }; + reader.readAsText(file); + if (input) { + input.value = ''; + } + } + + async function updateVM() { + try { + const vm = updateDialog.value.vm; + const original = vm.configuration; + const updated = updateDialog.value; + + const body: VmmTypes.IUpdateVmRequest = { + id: vm.id, + }; + + const fieldsToCompare = ['vcpu', 'memory', 'disk_size', 'image']; + if (fieldsToCompare.some((field) => updated[field] !== original[field])) { + body.vcpu = updated.vcpu; + body.memory = updated.memory; + body.disk_size = updated.disk_size; + body.image = updated.image; + } + + const composeWasExplicitlyUpdated = updateDialog.value.updateCompose; + let composeNeedsUpdate = composeWasExplicitlyUpdated; + let encryptedEnvPayload; + if (updateDialog.value.resetSecrets) { + const keyResponse = await vmmRpc.getAppEnvEncryptPubKey({ app_id: hexToBytes(vm.app_id || '') }); + encryptedEnvPayload = await encryptEnvWithKey(updateDialog.value.encryptedEnvs, keyResponse.public_key); + composeNeedsUpdate = true; + } + body.compose_file = composeNeedsUpdate ? await makeUpdateComposeFile() : undefined; + body.encrypted_env = encryptedEnvPayload; + body.user_config = updated.user_config; + body.update_ports = true; + body.ports = normalizePorts(updated.ports); + body.gpus = updateDialog.value.updateGpuConfig ? configGpu(updated, true) : undefined; + + await vmmRpc.updateVm(body); + updateDialog.value.encryptedEnvs = []; + updateDialog.value.show = false; + if (composeWasExplicitlyUpdated) { + updateMessage.value = '✅ Compose file updated!'; + } + loadVMList(); + } catch (error) { + recordError('error upgrading VM', error); + alert('failed to upgrade VM'); + } + } + + async function showCloneConfig(vm: VmListItem) { + const theVm = await ensureVmDetails(vm); + if (!theVm?.configuration?.compose_file) { + alert('Compose file not available for this VM. Please open its details first.'); + return; + } + const config = theVm.configuration; + + // Populate vmForm with current VM data, but clear envs and ports + vmForm.value = { + name: `${config.name || vm.name}-cloned`, + image: config.image || '', + dockerComposeFile: theVm.appCompose?.docker_compose_file || '', + preLaunchScript: theVm.appCompose?.pre_launch_script || '', + vcpu: config.vcpu || 1, + memory: config.memory || 0, + memoryValue: autoMemoryDisplay(config.memory || 0).memoryValue, + memoryUnit: autoMemoryDisplay(config.memory || 0).memoryUnit, + swap_size: theVm.appCompose?.swap_size || 0, + swapValue: autoMemoryDisplay(bytesToMB(theVm.appCompose?.swap_size || 0)).memoryValue, + swapUnit: autoMemoryDisplay(bytesToMB(theVm.appCompose?.swap_size || 0)).memoryUnit, + disk_size: config.disk_size || 0, + selectedGpus: [], + attachAllGpus: false, + encryptedEnvs: [], // Clear environment variables + ports: [], // Clear port mappings + storage_fs: theVm.appCompose?.storage_fs || 'ext4', + app_id: config.app_id || '', + kms_enabled: !!theVm.appCompose?.kms_enabled, + kms_urls: config.kms_urls || [], + local_key_provider_enabled: !!theVm.appCompose?.local_key_provider_enabled, + key_provider_id: theVm.appCompose?.key_provider_id || '', + gateway_enabled: !!theVm.appCompose?.gateway_enabled, + gateway_urls: config.gateway_urls || [], + public_logs: !!theVm.appCompose?.public_logs, + public_sysinfo: !!theVm.appCompose?.public_sysinfo, + public_tcbinfo: !!theVm.appCompose?.public_tcbinfo, + pin_numa: !!config.pin_numa, + hugepages: !!config.hugepages, + no_tee: !!config.no_tee, + user_config: config.user_config || '', + stopped: !!config.stopped, + }; + + // Show Create VM dialog instead of Clone Config dialog + showCreateDialog.value = true; + } + + async function cloneConfig() { + try { + const source = cloneConfigDialog.value; + if (!source.compose_file) { + alert('Compose file not available for this VM. Please open its details first.'); + return; + } + const payload = buildCreateVmPayload({ + name: source.name, + image: source.image, + compose_file: source.compose_file, + vcpu: source.vcpu, + memory: source.memory, + disk_size: source.disk_size, + ports: source.ports, + encrypted_env: source.encrypted_env, + app_id: source.app_id, + user_config: source.user_config, + hugepages: source.hugepages, + pin_numa: source.pin_numa, + no_tee: source.no_tee, + gpus: source.gpus, + kms_urls: source.kms_urls, + gateway_urls: source.gateway_urls, + stopped: source.stopped, + }); + await vmmRpc.createVm(payload); + cloneConfigDialog.value.show = false; + loadVMList(); + } catch (error) { + recordError('Error creating VM', error); + alert('Failed to create VM'); + } + } + + function toggleDetails(vm: VmListItem) { + if (expandedVMs.value.has(vm.id)) { + expandedVMs.value.delete(vm.id); + } else { + // Close all other expanded VMs + expandedVMs.value.clear(); + expandedVMs.value.add(vm.id); + loadVMDetails(vm.id); + refreshNetworkInfo(vm); + } + } + + async function refreshNetworkInfo(vm: VmListItem) { + if (vm.status !== 'running' || !imageFeatures(vm).network_info) { + return; + } + const response = await guestRpcCall('NetworkInfo', { id: vm.id }); + const data = await response.json(); + networkInfo.value[vm.id] = data; + } + + function nextPage() { + if (hasMorePages.value) { + currentPage.value += 1; + pageInput.value = currentPage.value; + loadVMList(); + } + } + + function prevPage() { + if (currentPage.value > 1) { + currentPage.value -= 1; + pageInput.value = currentPage.value; + loadVMList(); + } + } + + function goToPage() { + let page = Number.parseInt(String(pageInput.value), 10); + if (Number.isNaN(page) || page < 1) { + page = 1; + } else if (page > maxPage.value) { + page = maxPage.value; + } + pageInput.value = page; + currentPage.value = page; + loadVMList(); + } + + function closeAllDropdowns() { + document.querySelectorAll('.dropdown-content').forEach((dropdown) => dropdown.classList.remove('show')); + systemMenu.value.show = false; + document.removeEventListener('click', closeAllDropdowns); + } + + function toggleSystemMenu(event: Event) { + event.stopPropagation(); + systemMenu.value.show = !systemMenu.value.show; + + // Close all other dropdowns + document.querySelectorAll('.dropdown-content').forEach((dropdown) => { + dropdown.classList.remove('show'); + }); + + if (systemMenu.value.show) { + document.addEventListener('click', closeAllDropdowns); + } else { + document.removeEventListener('click', closeAllDropdowns); + } + } + + function closeSystemMenu() { + systemMenu.value.show = false; + } + + function openApiDocs() { + closeSystemMenu(); + window.open('/api-docs/docs', '_blank', 'noopener'); + } + + function openLegacyUi() { + closeSystemMenu(); + window.open('/v0', '_blank', 'noopener'); + } + + function shortUptime(uptime?: string | null) { + if (!uptime) { + return '-'; + } + const parts = uptime.split(/\s+/).filter(Boolean); + if (parts.length === 0) { + return uptime; + } + return parts.slice(0, Math.min(2, parts.length)).join(' '); + } + function toggleDevMode() { + devMode.value = !devMode.value; + localStorage.setItem('devMode', devMode.value ? 'true' : 'false'); + closeSystemMenu(); + successMessage.value = devMode.value ? '✅ Dev mode enabled' : 'Dev mode disabled'; + setTimeout(() => { + successMessage.value = ''; + }, 2000); + } + + async function reloadVMs() { + try { + errorMessage.value = ''; + successMessage.value = ''; + + const response = await vmmRpc.reloadVms({}); + + // Show success message with statistics + if (response.loaded > 0 || response.updated > 0 || response.removed > 0) { + let message = 'VM reload completed: '; + const parts = []; + if (response.loaded > 0) parts.push(`${response.loaded} loaded`); + if (response.updated > 0) parts.push(`${response.updated} updated`); + if (response.removed > 0) parts.push(`${response.removed} removed`); + + successMessage.value = message + parts.join(', '); + } else { + successMessage.value = 'VM reload completed: no changes detected'; + } + + // Reload the VM list to show updated data + await loadVMList(); + + // Hide message after 5 seconds + setTimeout(() => { + successMessage.value = ''; + }, 5000); + + } catch (error: any) { + console.error('Failed to reload VMs:', error); + errorMessage.value = `Failed to reload VMs: ${error.message || error.toString()}`; + + // Hide error message after 10 seconds + setTimeout(() => { + errorMessage.value = ''; + }, 10000); + } + } + + function toggleDropdown(event: Event, vm: VmListItem) { + document.querySelectorAll('.dropdown-content').forEach((dropdown) => { + if (dropdown.id !== `dropdown-${vm.id}`) { + dropdown.classList.remove('show'); + } + }); + const dropdownContent = document.getElementById(`dropdown-${vm.id}`); + dropdownContent?.classList.toggle('show'); + + event.stopPropagation(); + + document.addEventListener('click', closeAllDropdowns); + } + + function onPageSizeChange() { + currentPage.value = 1; + pageInput.value = 1; + loadVMList(); + } + + async function startVm(id: string) { + try { + await vmmRpc.startVm({ id }); + loadVMList(); + } catch (error) { + recordError('Failed to start VM', error); + } + } + + async function shutdownVm(id: string) { + try { + await vmmRpc.shutdownVm({ id }); + loadVMList(); + } catch (error) { + recordError('Failed to shutdown VM', error); + } + } + + const dangerConfirmEnabled = () => !devMode.value; + + async function stopVm(vm: VmListItem) { + if (dangerConfirmEnabled() && + !confirm(`You are killing "${vm.name}". This might cause data corruption.`)) { + return; + } + try { + await vmmRpc.stopVm({ id: vm.id }); + loadVMList(); + } catch (error) { + recordError(`Failed to stop ${vm.name}`, error); + } + } + + async function removeVm(vm: VmListItem) { + if (dangerConfirmEnabled() && + !confirm('Remove VM? This action cannot be undone.')) { + return; + } + + try { + if (devMode.value && vm.status === 'running') { + try { + await vmmRpc.stopVm({ id: vm.id }); + } catch (error) { + recordError(`Failed to stop ${vm.name} before removal`, error); + return; + } + } + + await vmmRpc.removeVm({ id: vm.id }); + loadVMList(); + } catch (error) { + recordError(`Failed to remove ${vm.name}`, error); + } + } + + function showLogs(id: string, channel: string) { + window.open(`/logs?id=${encodeURIComponent(id)}&follow=true&ansi=false&lines=200&ch=${channel}`, '_blank'); + } + + function showDashboard(vm: VmListItem) { + if (vm.app_url) { + window.open(vm.app_url, '_blank'); + } else { + alert('No guest agent dashboard URL'); + } + } + + async function watchVmList() { + while (true) { + try { + await loadVMList(); + } catch (error) { + recordError('error loading VM list', error); + } + await new Promise((resolve) => setTimeout(resolve, 3000)); + } + } + + async function copyToClipboard(text: string) { + try { + await navigator.clipboard.writeText(text); + successMessage.value = '✅ Copied to clipboard!'; + setTimeout(() => { + successMessage.value = ''; + }, 2000); + } catch (error) { + console.error('Failed to copy to clipboard', error); + errorMessage.value = 'Failed to copy to clipboard'; + setTimeout(() => { + errorMessage.value = ''; + }, 3000); + } + } + + function downloadFile(filename: string, content: string) { + const blob = new Blob([content], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const link = document.createElement('a'); + link.href = url; + link.download = filename; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + URL.revokeObjectURL(url); + } + + function downloadAppCompose(vm: VmListItem) { + if (vm.configuration?.compose_file) { + downloadFile(`${vm.name}-app-compose.json`, vm.configuration.compose_file); + } + } + + function downloadUserConfig(vm: VmListItem) { + if (vm.configuration?.user_config) { + downloadFile(`${vm.name}-user-config.txt`, vm.configuration.user_config); + } + } + + function getVmFeatures(vm: VmListItem) { + const features = []; + + // Check KMS + const kmsEnabled = vm.appCompose?.kms_enabled || vm.appCompose?.features?.includes('kms') || + vm.configuration?.kms_urls?.length > 0; + if (kmsEnabled) features.push("kms"); + + // Check Gateway/TProxy + const gatewayEnabled = vm.appCompose?.gateway_enabled || vm.appCompose?.tproxy_enabled || + vm.appCompose?.features?.includes('tproxy-net') || vm.configuration?.gateway_urls?.length > 0; + if (gatewayEnabled) features.push("gateway"); + + // Check other features from appCompose + if (vm.appCompose?.public_logs) features.push("logs"); + if (vm.appCompose?.public_sysinfo) features.push("sysinfo"); + if (vm.appCompose?.public_tcbinfo) features.push("tcbinfo"); + + return features.length > 0 ? features.join(', ') : 'None'; + } + + onMounted(() => { + watchVmList(); + loadImages(); + loadGpus(); + loadVersion(); + }); + + return { + version, + vms, + expandedVMs, + networkInfo, + searchQuery, + currentPage, + pageInput, + pageSize, + totalVMs, + hasMorePages, + loadingVMDetails, + maxPage, + vmForm, + availableImages, + availableGpus, + availableGpuProducts, + allowAttachAllGpus, + updateDialog, + updateMessage, + successMessage, + errorMessage, + cloneConfigDialog, + showCreateDialog, + config, + composeHashPreview, + updateComposeHashPreview, + showDeployDialog, + leaveCreateDialog, + loadComposeFile, + loadUpdateFile, + createVm, + updateVM, + cloneConfig, + loadVMList, + toggleDetails, + toggleDropdown, + closeAllDropdowns, + showLogs, + showDashboard, + stopVm, + shutdownVm, + startVm, + removeVm, + showUpdateDialog, + showCloneConfig, + formatMemory, + bytesToMB, + vmStatus, + kmsEnabled, + gatewayEnabled, + goToPage, + nextPage, + prevPage, + onPageSizeChange, + copyToClipboard, + downloadAppCompose, + downloadUserConfig, + getVmFeatures, + systemMenu, + toggleSystemMenu, + closeSystemMenu, + openApiDocs, + openLegacyUi, + reloadVMs, + devMode, + toggleDevMode, + shortUptime, + }; +} + +export { useVmManager }; diff --git a/vmm/ui/src/index.html b/vmm/ui/src/index.html new file mode 100644 index 00000000..2093efb3 --- /dev/null +++ b/vmm/ui/src/index.html @@ -0,0 +1,21 @@ + + + + + + + + {{TITLE}} + + + + +
+ + + + + diff --git a/vmm/ui/src/lib/vmmRpcClient.ts b/vmm/ui/src/lib/vmmRpcClient.ts new file mode 100644 index 00000000..683f499f --- /dev/null +++ b/vmm/ui/src/lib/vmmRpcClient.ts @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +const { vmm } = require('../proto/vmm_rpc.js'); +const { prpc } = require('../proto/prpc.js'); + +const textDecoder = new TextDecoder(); +const EMPTY_BODY = new Uint8Array(); +let cachedClient: any; + +function decodePrpcError(buffer: Uint8Array) { + try { + if (buffer && buffer.length > 0) { + const err = prpc.PrpcError.decode(buffer); + if (err?.message) { + return err.message; + } + } + } catch { + // Ignore decode failures; fall through to text decoding. + } + try { + const text = buffer && buffer.length > 0 ? textDecoder.decode(buffer) : ''; + return text || 'Unknown RPC error'; + } catch { + return 'Unknown RPC error'; + } +} + +function normalizeRequestData(data?: Uint8Array | ArrayBuffer | null) { + if (!data) { + return EMPTY_BODY; + } + if (data instanceof Uint8Array) { + return data; + } + return new Uint8Array(data); +} + +function resolveMethodName(method: any) { + if (!method) { + return ''; + } + const type = typeof method; + if (type === 'string') { + return method; + } + if (type === 'function' || type === 'object') { + if (method.name) { + return method.name.charAt(0).toUpperCase() + method.name.slice(1); + } + if (method.fullName) { + const parts = String(method.fullName).split('.'); + return parts[parts.length - 1]; + } + } + return String(method); +} + +export function getVmmRpcClient(basePath = '/prpc') { + if (cachedClient) { + return cachedClient; + } + + const rpcImpl = (method: any, requestData: Uint8Array, callback: (err?: Error | null, data?: Uint8Array) => void) => { + const methodName = resolveMethodName(method); + const payload = normalizeRequestData(requestData); + fetch(`${basePath}/${methodName}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/octet-stream', + }, + body: payload as unknown as BodyInit, + credentials: 'same-origin', + }) + .then(async (response) => { + const buffer = new Uint8Array(await response.arrayBuffer()); + if (!response.ok) { + callback(new Error(decodePrpcError(buffer))); + return; + } + callback(null, buffer); + }) + .catch((error) => { + callback(error); + }); + }; + + cachedClient = vmm.Vmm.create(rpcImpl, false, false); + return cachedClient; +} diff --git a/vmm/ui/src/lib/x25519.js b/vmm/ui/src/lib/x25519.js new file mode 100644 index 00000000..76311675 --- /dev/null +++ b/vmm/ui/src/lib/x25519.js @@ -0,0 +1,1672 @@ +// SPDX-FileCopyrightText: © 2016 Dmitry Chestnykh, © 2019 Harvey Connor +// SPDX-License-Identifier: MIT + + let _0 = new Uint8Array(16); + let _9 = new Uint8Array(32); + _9[0] = 9; + function gf(init) { + var i, r = new Float64Array(16); + if (init) + for (i = 0; i < init.length; i++) + r[i] = init[i]; + return r; + } + ; + const gf0 = gf(), gf1 = gf([1]), _121665 = gf([0xdb41, 1]), D = gf([ + 0x78a3, + 0x1359, + 0x4dca, + 0x75eb, + 0xd8ab, + 0x4141, + 0x0a4d, + 0x0070, + 0xe898, + 0x7779, + 0x4079, + 0x8cc7, + 0xfe73, + 0x2b6f, + 0x6cee, + 0x5203, + ]), D2 = gf([ + 0xf159, + 0x26b2, + 0x9b94, + 0xebd6, + 0xb156, + 0x8283, + 0x149a, + 0x00e0, + 0xd130, + 0xeef3, + 0x80f2, + 0x198e, + 0xfce7, + 0x56df, + 0xd9dc, + 0x2406, + ]), X = gf([ + 0xd51a, + 0x8f25, + 0x2d60, + 0xc956, + 0xa7b2, + 0x9525, + 0xc760, + 0x692c, + 0xdc5c, + 0xfdd6, + 0xe231, + 0xc0a4, + 0x53fe, + 0xcd6e, + 0x36d3, + 0x2169, + ]), Y = gf([ + 0x6658, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + 0x6666, + ]), I = gf([ + 0xa0b0, + 0x4a0e, + 0x1b27, + 0xc4ee, + 0xe478, + 0xad2f, + 0x1806, + 0x2f43, + 0xd7a7, + 0x3dfb, + 0x0099, + 0x2b4d, + 0xdf0b, + 0x4fc1, + 0x2480, + 0x2b83, + ]); + function ts64(x, i, h, l) { + x[i] = (h >> 24) & 0xff; + x[i + 1] = (h >> 16) & 0xff; + x[i + 2] = (h >> 8) & 0xff; + x[i + 3] = h & 0xff; + x[i + 4] = (l >> 24) & 0xff; + x[i + 5] = (l >> 16) & 0xff; + x[i + 6] = (l >> 8) & 0xff; + x[i + 7] = l & 0xff; + } + function vn(x, xi, y, yi, n) { + var i, d = 0; + for (i = 0; i < n; i++) + d |= x[xi + i] ^ y[yi + i]; + return (1 & ((d - 1) >>> 8)) - 1; + } + function crypto_verify_32(x, xi, y, yi) { + return vn(x, xi, y, yi, 32); + } + function set25519(r, a) { + var i; + for (i = 0; i < 16; i++) + r[i] = a[i] | 0; + } + function car25519(o) { + var i, v, c = 1; + for (i = 0; i < 16; i++) { + v = o[i] + c + 65535; + c = Math.floor(v / 65536); + o[i] = v - c * 65536; + } + o[0] += c - 1 + 37 * (c - 1); + } + function sel25519(p, q, b) { + var t, c = ~(b - 1); + for (var i = 0; i < 16; i++) { + t = c & (p[i] ^ q[i]); + p[i] ^= t; + q[i] ^= t; + } + } + function pack25519(o, n) { + var i, j, b; + var m = gf(), t = gf(); + for (i = 0; i < 16; i++) + t[i] = n[i]; + car25519(t); + car25519(t); + car25519(t); + for (j = 0; j < 2; j++) { + m[0] = t[0] - 0xffed; + for (i = 1; i < 15; i++) { + m[i] = t[i] - 0xffff - ((m[i - 1] >> 16) & 1); + m[i - 1] &= 0xffff; + } + m[15] = t[15] - 0x7fff - ((m[14] >> 16) & 1); + b = (m[15] >> 16) & 1; + m[14] &= 0xffff; + sel25519(t, m, 1 - b); + } + for (i = 0; i < 16; i++) { + o[2 * i] = t[i] & 0xff; + o[2 * i + 1] = t[i] >> 8; + } + } + function neq25519(a, b) { + var c = new Uint8Array(32), d = new Uint8Array(32); + pack25519(c, a); + pack25519(d, b); + return crypto_verify_32(c, 0, d, 0); + } + function par25519(a) { + var d = new Uint8Array(32); + pack25519(d, a); + return d[0] & 1; + } + function unpack25519(o, n) { + var i; + for (i = 0; i < 16; i++) + o[i] = n[2 * i] + (n[2 * i + 1] << 8); + o[15] &= 0x7fff; + } + function A(o, a, b) { + for (var i = 0; i < 16; i++) + o[i] = a[i] + b[i]; + } + function Z(o, a, b) { + for (var i = 0; i < 16; i++) + o[i] = a[i] - b[i]; + } + function M(o, a, b) { + var v, c, t0 = 0, t1 = 0, t2 = 0, t3 = 0, t4 = 0, t5 = 0, t6 = 0, t7 = 0, t8 = 0, t9 = 0, t10 = 0, t11 = 0, t12 = 0, t13 = 0, t14 = 0, t15 = 0, t16 = 0, t17 = 0, t18 = 0, t19 = 0, t20 = 0, t21 = 0, t22 = 0, t23 = 0, t24 = 0, t25 = 0, t26 = 0, t27 = 0, t28 = 0, t29 = 0, t30 = 0, b0 = b[0], b1 = b[1], b2 = b[2], b3 = b[3], b4 = b[4], b5 = b[5], b6 = b[6], b7 = b[7], b8 = b[8], b9 = b[9], b10 = b[10], b11 = b[11], b12 = b[12], b13 = b[13], b14 = b[14], b15 = b[15]; + v = a[0]; + t0 += v * b0; + t1 += v * b1; + t2 += v * b2; + t3 += v * b3; + t4 += v * b4; + t5 += v * b5; + t6 += v * b6; + t7 += v * b7; + t8 += v * b8; + t9 += v * b9; + t10 += v * b10; + t11 += v * b11; + t12 += v * b12; + t13 += v * b13; + t14 += v * b14; + t15 += v * b15; + v = a[1]; + t1 += v * b0; + t2 += v * b1; + t3 += v * b2; + t4 += v * b3; + t5 += v * b4; + t6 += v * b5; + t7 += v * b6; + t8 += v * b7; + t9 += v * b8; + t10 += v * b9; + t11 += v * b10; + t12 += v * b11; + t13 += v * b12; + t14 += v * b13; + t15 += v * b14; + t16 += v * b15; + v = a[2]; + t2 += v * b0; + t3 += v * b1; + t4 += v * b2; + t5 += v * b3; + t6 += v * b4; + t7 += v * b5; + t8 += v * b6; + t9 += v * b7; + t10 += v * b8; + t11 += v * b9; + t12 += v * b10; + t13 += v * b11; + t14 += v * b12; + t15 += v * b13; + t16 += v * b14; + t17 += v * b15; + v = a[3]; + t3 += v * b0; + t4 += v * b1; + t5 += v * b2; + t6 += v * b3; + t7 += v * b4; + t8 += v * b5; + t9 += v * b6; + t10 += v * b7; + t11 += v * b8; + t12 += v * b9; + t13 += v * b10; + t14 += v * b11; + t15 += v * b12; + t16 += v * b13; + t17 += v * b14; + t18 += v * b15; + v = a[4]; + t4 += v * b0; + t5 += v * b1; + t6 += v * b2; + t7 += v * b3; + t8 += v * b4; + t9 += v * b5; + t10 += v * b6; + t11 += v * b7; + t12 += v * b8; + t13 += v * b9; + t14 += v * b10; + t15 += v * b11; + t16 += v * b12; + t17 += v * b13; + t18 += v * b14; + t19 += v * b15; + v = a[5]; + t5 += v * b0; + t6 += v * b1; + t7 += v * b2; + t8 += v * b3; + t9 += v * b4; + t10 += v * b5; + t11 += v * b6; + t12 += v * b7; + t13 += v * b8; + t14 += v * b9; + t15 += v * b10; + t16 += v * b11; + t17 += v * b12; + t18 += v * b13; + t19 += v * b14; + t20 += v * b15; + v = a[6]; + t6 += v * b0; + t7 += v * b1; + t8 += v * b2; + t9 += v * b3; + t10 += v * b4; + t11 += v * b5; + t12 += v * b6; + t13 += v * b7; + t14 += v * b8; + t15 += v * b9; + t16 += v * b10; + t17 += v * b11; + t18 += v * b12; + t19 += v * b13; + t20 += v * b14; + t21 += v * b15; + v = a[7]; + t7 += v * b0; + t8 += v * b1; + t9 += v * b2; + t10 += v * b3; + t11 += v * b4; + t12 += v * b5; + t13 += v * b6; + t14 += v * b7; + t15 += v * b8; + t16 += v * b9; + t17 += v * b10; + t18 += v * b11; + t19 += v * b12; + t20 += v * b13; + t21 += v * b14; + t22 += v * b15; + v = a[8]; + t8 += v * b0; + t9 += v * b1; + t10 += v * b2; + t11 += v * b3; + t12 += v * b4; + t13 += v * b5; + t14 += v * b6; + t15 += v * b7; + t16 += v * b8; + t17 += v * b9; + t18 += v * b10; + t19 += v * b11; + t20 += v * b12; + t21 += v * b13; + t22 += v * b14; + t23 += v * b15; + v = a[9]; + t9 += v * b0; + t10 += v * b1; + t11 += v * b2; + t12 += v * b3; + t13 += v * b4; + t14 += v * b5; + t15 += v * b6; + t16 += v * b7; + t17 += v * b8; + t18 += v * b9; + t19 += v * b10; + t20 += v * b11; + t21 += v * b12; + t22 += v * b13; + t23 += v * b14; + t24 += v * b15; + v = a[10]; + t10 += v * b0; + t11 += v * b1; + t12 += v * b2; + t13 += v * b3; + t14 += v * b4; + t15 += v * b5; + t16 += v * b6; + t17 += v * b7; + t18 += v * b8; + t19 += v * b9; + t20 += v * b10; + t21 += v * b11; + t22 += v * b12; + t23 += v * b13; + t24 += v * b14; + t25 += v * b15; + v = a[11]; + t11 += v * b0; + t12 += v * b1; + t13 += v * b2; + t14 += v * b3; + t15 += v * b4; + t16 += v * b5; + t17 += v * b6; + t18 += v * b7; + t19 += v * b8; + t20 += v * b9; + t21 += v * b10; + t22 += v * b11; + t23 += v * b12; + t24 += v * b13; + t25 += v * b14; + t26 += v * b15; + v = a[12]; + t12 += v * b0; + t13 += v * b1; + t14 += v * b2; + t15 += v * b3; + t16 += v * b4; + t17 += v * b5; + t18 += v * b6; + t19 += v * b7; + t20 += v * b8; + t21 += v * b9; + t22 += v * b10; + t23 += v * b11; + t24 += v * b12; + t25 += v * b13; + t26 += v * b14; + t27 += v * b15; + v = a[13]; + t13 += v * b0; + t14 += v * b1; + t15 += v * b2; + t16 += v * b3; + t17 += v * b4; + t18 += v * b5; + t19 += v * b6; + t20 += v * b7; + t21 += v * b8; + t22 += v * b9; + t23 += v * b10; + t24 += v * b11; + t25 += v * b12; + t26 += v * b13; + t27 += v * b14; + t28 += v * b15; + v = a[14]; + t14 += v * b0; + t15 += v * b1; + t16 += v * b2; + t17 += v * b3; + t18 += v * b4; + t19 += v * b5; + t20 += v * b6; + t21 += v * b7; + t22 += v * b8; + t23 += v * b9; + t24 += v * b10; + t25 += v * b11; + t26 += v * b12; + t27 += v * b13; + t28 += v * b14; + t29 += v * b15; + v = a[15]; + t15 += v * b0; + t16 += v * b1; + t17 += v * b2; + t18 += v * b3; + t19 += v * b4; + t20 += v * b5; + t21 += v * b6; + t22 += v * b7; + t23 += v * b8; + t24 += v * b9; + t25 += v * b10; + t26 += v * b11; + t27 += v * b12; + t28 += v * b13; + t29 += v * b14; + t30 += v * b15; + t0 += 38 * t16; + t1 += 38 * t17; + t2 += 38 * t18; + t3 += 38 * t19; + t4 += 38 * t20; + t5 += 38 * t21; + t6 += 38 * t22; + t7 += 38 * t23; + t8 += 38 * t24; + t9 += 38 * t25; + t10 += 38 * t26; + t11 += 38 * t27; + t12 += 38 * t28; + t13 += 38 * t29; + t14 += 38 * t30; + // t15 left as is + // first car + c = 1; + v = t0 + c + 65535; + c = Math.floor(v / 65536); + t0 = v - c * 65536; + v = t1 + c + 65535; + c = Math.floor(v / 65536); + t1 = v - c * 65536; + v = t2 + c + 65535; + c = Math.floor(v / 65536); + t2 = v - c * 65536; + v = t3 + c + 65535; + c = Math.floor(v / 65536); + t3 = v - c * 65536; + v = t4 + c + 65535; + c = Math.floor(v / 65536); + t4 = v - c * 65536; + v = t5 + c + 65535; + c = Math.floor(v / 65536); + t5 = v - c * 65536; + v = t6 + c + 65535; + c = Math.floor(v / 65536); + t6 = v - c * 65536; + v = t7 + c + 65535; + c = Math.floor(v / 65536); + t7 = v - c * 65536; + v = t8 + c + 65535; + c = Math.floor(v / 65536); + t8 = v - c * 65536; + v = t9 + c + 65535; + c = Math.floor(v / 65536); + t9 = v - c * 65536; + v = t10 + c + 65535; + c = Math.floor(v / 65536); + t10 = v - c * 65536; + v = t11 + c + 65535; + c = Math.floor(v / 65536); + t11 = v - c * 65536; + v = t12 + c + 65535; + c = Math.floor(v / 65536); + t12 = v - c * 65536; + v = t13 + c + 65535; + c = Math.floor(v / 65536); + t13 = v - c * 65536; + v = t14 + c + 65535; + c = Math.floor(v / 65536); + t14 = v - c * 65536; + v = t15 + c + 65535; + c = Math.floor(v / 65536); + t15 = v - c * 65536; + t0 += c - 1 + 37 * (c - 1); + // second car + c = 1; + v = t0 + c + 65535; + c = Math.floor(v / 65536); + t0 = v - c * 65536; + v = t1 + c + 65535; + c = Math.floor(v / 65536); + t1 = v - c * 65536; + v = t2 + c + 65535; + c = Math.floor(v / 65536); + t2 = v - c * 65536; + v = t3 + c + 65535; + c = Math.floor(v / 65536); + t3 = v - c * 65536; + v = t4 + c + 65535; + c = Math.floor(v / 65536); + t4 = v - c * 65536; + v = t5 + c + 65535; + c = Math.floor(v / 65536); + t5 = v - c * 65536; + v = t6 + c + 65535; + c = Math.floor(v / 65536); + t6 = v - c * 65536; + v = t7 + c + 65535; + c = Math.floor(v / 65536); + t7 = v - c * 65536; + v = t8 + c + 65535; + c = Math.floor(v / 65536); + t8 = v - c * 65536; + v = t9 + c + 65535; + c = Math.floor(v / 65536); + t9 = v - c * 65536; + v = t10 + c + 65535; + c = Math.floor(v / 65536); + t10 = v - c * 65536; + v = t11 + c + 65535; + c = Math.floor(v / 65536); + t11 = v - c * 65536; + v = t12 + c + 65535; + c = Math.floor(v / 65536); + t12 = v - c * 65536; + v = t13 + c + 65535; + c = Math.floor(v / 65536); + t13 = v - c * 65536; + v = t14 + c + 65535; + c = Math.floor(v / 65536); + t14 = v - c * 65536; + v = t15 + c + 65535; + c = Math.floor(v / 65536); + t15 = v - c * 65536; + t0 += c - 1 + 37 * (c - 1); + o[0] = t0; + o[1] = t1; + o[2] = t2; + o[3] = t3; + o[4] = t4; + o[5] = t5; + o[6] = t6; + o[7] = t7; + o[8] = t8; + o[9] = t9; + o[10] = t10; + o[11] = t11; + o[12] = t12; + o[13] = t13; + o[14] = t14; + o[15] = t15; + } + function S(o, a) { + M(o, a, a); + } + function inv25519(o, i) { + var c = gf(); + var a; + for (a = 0; a < 16; a++) + c[a] = i[a]; + for (a = 253; a >= 0; a--) { + S(c, c); + if (a !== 2 && a !== 4) + M(c, c, i); + } + for (a = 0; a < 16; a++) + o[a] = c[a]; + } + function pow2523(o, i) { + var c = gf(); + var a; + for (a = 0; a < 16; a++) + c[a] = i[a]; + for (a = 250; a >= 0; a--) { + S(c, c); + if (a !== 1) + M(c, c, i); + } + for (a = 0; a < 16; a++) + o[a] = c[a]; + } + function crypto_scalarmult(q, n, p) { + var z = new Uint8Array(32); + var x = new Float64Array(80), r, i; + var a = gf(), b = gf(), c = gf(), d = gf(), e = gf(), f = gf(); + for (i = 0; i < 31; i++) + z[i] = n[i]; + z[31] = (n[31] & 127) | 64; + z[0] &= 248; + unpack25519(x, p); + for (i = 0; i < 16; i++) { + b[i] = x[i]; + d[i] = a[i] = c[i] = 0; + } + a[0] = d[0] = 1; + for (i = 254; i >= 0; --i) { + r = (z[i >>> 3] >>> (i & 7)) & 1; + sel25519(a, b, r); + sel25519(c, d, r); + A(e, a, c); + Z(a, a, c); + A(c, b, d); + Z(b, b, d); + S(d, e); + S(f, a); + M(a, c, a); + M(c, b, e); + A(e, a, c); + Z(a, a, c); + S(b, a); + Z(c, d, f); + M(a, c, _121665); + A(a, a, d); + M(c, c, a); + M(a, d, f); + M(d, b, x); + S(b, e); + sel25519(a, b, r); + sel25519(c, d, r); + } + for (i = 0; i < 16; i++) { + x[i + 16] = a[i]; + x[i + 32] = c[i]; + x[i + 48] = b[i]; + x[i + 64] = d[i]; + } + var x32 = x.subarray(32); + var x16 = x.subarray(16); + inv25519(x32, x32); + M(x16, x16, x32); + pack25519(q, x16); + return 0; + } + function crypto_scalarmult_base(q, n) { + return crypto_scalarmult(q, n, _9); + } + var K = [ + 0x428a2f98, + 0xd728ae22, + 0x71374491, + 0x23ef65cd, + 0xb5c0fbcf, + 0xec4d3b2f, + 0xe9b5dba5, + 0x8189dbbc, + 0x3956c25b, + 0xf348b538, + 0x59f111f1, + 0xb605d019, + 0x923f82a4, + 0xaf194f9b, + 0xab1c5ed5, + 0xda6d8118, + 0xd807aa98, + 0xa3030242, + 0x12835b01, + 0x45706fbe, + 0x243185be, + 0x4ee4b28c, + 0x550c7dc3, + 0xd5ffb4e2, + 0x72be5d74, + 0xf27b896f, + 0x80deb1fe, + 0x3b1696b1, + 0x9bdc06a7, + 0x25c71235, + 0xc19bf174, + 0xcf692694, + 0xe49b69c1, + 0x9ef14ad2, + 0xefbe4786, + 0x384f25e3, + 0x0fc19dc6, + 0x8b8cd5b5, + 0x240ca1cc, + 0x77ac9c65, + 0x2de92c6f, + 0x592b0275, + 0x4a7484aa, + 0x6ea6e483, + 0x5cb0a9dc, + 0xbd41fbd4, + 0x76f988da, + 0x831153b5, + 0x983e5152, + 0xee66dfab, + 0xa831c66d, + 0x2db43210, + 0xb00327c8, + 0x98fb213f, + 0xbf597fc7, + 0xbeef0ee4, + 0xc6e00bf3, + 0x3da88fc2, + 0xd5a79147, + 0x930aa725, + 0x06ca6351, + 0xe003826f, + 0x14292967, + 0x0a0e6e70, + 0x27b70a85, + 0x46d22ffc, + 0x2e1b2138, + 0x5c26c926, + 0x4d2c6dfc, + 0x5ac42aed, + 0x53380d13, + 0x9d95b3df, + 0x650a7354, + 0x8baf63de, + 0x766a0abb, + 0x3c77b2a8, + 0x81c2c92e, + 0x47edaee6, + 0x92722c85, + 0x1482353b, + 0xa2bfe8a1, + 0x4cf10364, + 0xa81a664b, + 0xbc423001, + 0xc24b8b70, + 0xd0f89791, + 0xc76c51a3, + 0x0654be30, + 0xd192e819, + 0xd6ef5218, + 0xd6990624, + 0x5565a910, + 0xf40e3585, + 0x5771202a, + 0x106aa070, + 0x32bbd1b8, + 0x19a4c116, + 0xb8d2d0c8, + 0x1e376c08, + 0x5141ab53, + 0x2748774c, + 0xdf8eeb99, + 0x34b0bcb5, + 0xe19b48a8, + 0x391c0cb3, + 0xc5c95a63, + 0x4ed8aa4a, + 0xe3418acb, + 0x5b9cca4f, + 0x7763e373, + 0x682e6ff3, + 0xd6b2b8a3, + 0x748f82ee, + 0x5defb2fc, + 0x78a5636f, + 0x43172f60, + 0x84c87814, + 0xa1f0ab72, + 0x8cc70208, + 0x1a6439ec, + 0x90befffa, + 0x23631e28, + 0xa4506ceb, + 0xde82bde9, + 0xbef9a3f7, + 0xb2c67915, + 0xc67178f2, + 0xe372532b, + 0xca273ece, + 0xea26619c, + 0xd186b8c7, + 0x21c0c207, + 0xeada7dd6, + 0xcde0eb1e, + 0xf57d4f7f, + 0xee6ed178, + 0x06f067aa, + 0x72176fba, + 0x0a637dc5, + 0xa2c898a6, + 0x113f9804, + 0xbef90dae, + 0x1b710b35, + 0x131c471b, + 0x28db77f5, + 0x23047d84, + 0x32caab7b, + 0x40c72493, + 0x3c9ebe0a, + 0x15c9bebc, + 0x431d67c4, + 0x9c100d4c, + 0x4cc5d4be, + 0xcb3e42b6, + 0x597f299c, + 0xfc657e2a, + 0x5fcb6fab, + 0x3ad6faec, + 0x6c44198c, + 0x4a475817, + ]; + function crypto_hashblocks_hl(hh, hl, m, n) { + var wh = new Int32Array(16), wl = new Int32Array(16), bh0, bh1, bh2, bh3, bh4, bh5, bh6, bh7, bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7, th, tl, i, j, h, l, a, b, c, d; + var ah0 = hh[0], ah1 = hh[1], ah2 = hh[2], ah3 = hh[3], ah4 = hh[4], ah5 = hh[5], ah6 = hh[6], ah7 = hh[7], al0 = hl[0], al1 = hl[1], al2 = hl[2], al3 = hl[3], al4 = hl[4], al5 = hl[5], al6 = hl[6], al7 = hl[7]; + var pos = 0; + while (n >= 128) { + for (i = 0; i < 16; i++) { + j = 8 * i + pos; + wh[i] = (m[j + 0] << 24) | (m[j + 1] << 16) | (m[j + 2] << 8) | m[j + 3]; + wl[i] = (m[j + 4] << 24) | (m[j + 5] << 16) | (m[j + 6] << 8) | m[j + 7]; + } + for (i = 0; i < 80; i++) { + bh0 = ah0; + bh1 = ah1; + bh2 = ah2; + bh3 = ah3; + bh4 = ah4; + bh5 = ah5; + bh6 = ah6; + bh7 = ah7; + bl0 = al0; + bl1 = al1; + bl2 = al2; + bl3 = al3; + bl4 = al4; + bl5 = al5; + bl6 = al6; + bl7 = al7; + // add + h = ah7; + l = al7; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + // Sigma1 + h = + ((ah4 >>> 14) | (al4 << (32 - 14))) ^ + ((ah4 >>> 18) | (al4 << (32 - 18))) ^ + ((al4 >>> (41 - 32)) | (ah4 << (32 - (41 - 32)))); + l = + ((al4 >>> 14) | (ah4 << (32 - 14))) ^ + ((al4 >>> 18) | (ah4 << (32 - 18))) ^ + ((ah4 >>> (41 - 32)) | (al4 << (32 - (41 - 32)))); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // Ch + h = (ah4 & ah5) ^ (~ah4 & ah6); + l = (al4 & al5) ^ (~al4 & al6); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // K + h = K[i * 2]; + l = K[i * 2 + 1]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // w + h = wh[i % 16]; + l = wl[i % 16]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + th = (c & 0xffff) | (d << 16); + tl = (a & 0xffff) | (b << 16); + // add + h = th; + l = tl; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + // Sigma0 + h = + ((ah0 >>> 28) | (al0 << (32 - 28))) ^ + ((al0 >>> (34 - 32)) | (ah0 << (32 - (34 - 32)))) ^ + ((al0 >>> (39 - 32)) | (ah0 << (32 - (39 - 32)))); + l = + ((al0 >>> 28) | (ah0 << (32 - 28))) ^ + ((ah0 >>> (34 - 32)) | (al0 << (32 - (34 - 32)))) ^ + ((ah0 >>> (39 - 32)) | (al0 << (32 - (39 - 32)))); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // Maj + h = (ah0 & ah1) ^ (ah0 & ah2) ^ (ah1 & ah2); + l = (al0 & al1) ^ (al0 & al2) ^ (al1 & al2); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + bh7 = (c & 0xffff) | (d << 16); + bl7 = (a & 0xffff) | (b << 16); + // add + h = bh3; + l = bl3; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = th; + l = tl; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + bh3 = (c & 0xffff) | (d << 16); + bl3 = (a & 0xffff) | (b << 16); + ah1 = bh0; + ah2 = bh1; + ah3 = bh2; + ah4 = bh3; + ah5 = bh4; + ah6 = bh5; + ah7 = bh6; + ah0 = bh7; + al1 = bl0; + al2 = bl1; + al3 = bl2; + al4 = bl3; + al5 = bl4; + al6 = bl5; + al7 = bl6; + al0 = bl7; + if (i % 16 === 15) { + for (j = 0; j < 16; j++) { + // add + h = wh[j]; + l = wl[j]; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = wh[(j + 9) % 16]; + l = wl[(j + 9) % 16]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // sigma0 + th = wh[(j + 1) % 16]; + tl = wl[(j + 1) % 16]; + h = ((th >>> 1) | (tl << (32 - 1))) ^ ((th >>> 8) | (tl << (32 - 8))) ^ (th >>> 7); + l = ((tl >>> 1) | (th << (32 - 1))) ^ ((tl >>> 8) | (th << (32 - 8))) ^ ((tl >>> 7) | (th << (32 - 7))); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + // sigma1 + th = wh[(j + 14) % 16]; + tl = wl[(j + 14) % 16]; + h = ((th >>> 19) | (tl << (32 - 19))) ^ ((tl >>> (61 - 32)) | (th << (32 - (61 - 32)))) ^ (th >>> 6); + l = + ((tl >>> 19) | (th << (32 - 19))) ^ + ((th >>> (61 - 32)) | (tl << (32 - (61 - 32)))) ^ + ((tl >>> 6) | (th << (32 - 6))); + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + wh[j] = (c & 0xffff) | (d << 16); + wl[j] = (a & 0xffff) | (b << 16); + } + } + } + // add + h = ah0; + l = al0; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[0]; + l = hl[0]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[0] = ah0 = (c & 0xffff) | (d << 16); + hl[0] = al0 = (a & 0xffff) | (b << 16); + h = ah1; + l = al1; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[1]; + l = hl[1]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[1] = ah1 = (c & 0xffff) | (d << 16); + hl[1] = al1 = (a & 0xffff) | (b << 16); + h = ah2; + l = al2; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[2]; + l = hl[2]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[2] = ah2 = (c & 0xffff) | (d << 16); + hl[2] = al2 = (a & 0xffff) | (b << 16); + h = ah3; + l = al3; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[3]; + l = hl[3]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[3] = ah3 = (c & 0xffff) | (d << 16); + hl[3] = al3 = (a & 0xffff) | (b << 16); + h = ah4; + l = al4; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[4]; + l = hl[4]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[4] = ah4 = (c & 0xffff) | (d << 16); + hl[4] = al4 = (a & 0xffff) | (b << 16); + h = ah5; + l = al5; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[5]; + l = hl[5]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[5] = ah5 = (c & 0xffff) | (d << 16); + hl[5] = al5 = (a & 0xffff) | (b << 16); + h = ah6; + l = al6; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[6]; + l = hl[6]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[6] = ah6 = (c & 0xffff) | (d << 16); + hl[6] = al6 = (a & 0xffff) | (b << 16); + h = ah7; + l = al7; + a = l & 0xffff; + b = l >>> 16; + c = h & 0xffff; + d = h >>> 16; + h = hh[7]; + l = hl[7]; + a += l & 0xffff; + b += l >>> 16; + c += h & 0xffff; + d += h >>> 16; + b += a >>> 16; + c += b >>> 16; + d += c >>> 16; + hh[7] = ah7 = (c & 0xffff) | (d << 16); + hl[7] = al7 = (a & 0xffff) | (b << 16); + pos += 128; + n -= 128; + } + return n; + } + function crypto_hash(out, m, n) { + var hh = new Int32Array(8), hl = new Int32Array(8), x = new Uint8Array(256), i, b = n; + hh[0] = 0x6a09e667; + hh[1] = 0xbb67ae85; + hh[2] = 0x3c6ef372; + hh[3] = 0xa54ff53a; + hh[4] = 0x510e527f; + hh[5] = 0x9b05688c; + hh[6] = 0x1f83d9ab; + hh[7] = 0x5be0cd19; + hl[0] = 0xf3bcc908; + hl[1] = 0x84caa73b; + hl[2] = 0xfe94f82b; + hl[3] = 0x5f1d36f1; + hl[4] = 0xade682d1; + hl[5] = 0x2b3e6c1f; + hl[6] = 0xfb41bd6b; + hl[7] = 0x137e2179; + crypto_hashblocks_hl(hh, hl, m, n); + n %= 128; + for (i = 0; i < n; i++) + x[i] = m[b - n + i]; + x[n] = 128; + n = 256 - 128 * (n < 112 ? 1 : 0); + x[n - 9] = 0; + ts64(x, n - 8, (b / 0x20000000) | 0, b << 3); + crypto_hashblocks_hl(hh, hl, x, n); + for (i = 0; i < 8; i++) + ts64(out, 8 * i, hh[i], hl[i]); + return 0; + } + function add(p, q) { + var a = gf(), b = gf(), c = gf(), d = gf(), e = gf(), f = gf(), g = gf(), h = gf(), t = gf(); + Z(a, p[1], p[0]); + Z(t, q[1], q[0]); + M(a, a, t); + A(b, p[0], p[1]); + A(t, q[0], q[1]); + M(b, b, t); + M(c, p[3], q[3]); + M(c, c, D2); + M(d, p[2], q[2]); + A(d, d, d); + Z(e, b, a); + Z(f, d, c); + A(g, d, c); + A(h, b, a); + M(p[0], e, f); + M(p[1], h, g); + M(p[2], g, f); + M(p[3], e, h); + } + function cswap(p, q, b) { + var i; + for (i = 0; i < 4; i++) { + sel25519(p[i], q[i], b); + } + } + function pack(r, p) { + var tx = gf(), ty = gf(), zi = gf(); + inv25519(zi, p[2]); + M(tx, p[0], zi); + M(ty, p[1], zi); + pack25519(r, ty); + r[31] ^= par25519(tx) << 7; + } + function scalarmult(p, q, s) { + var b, i; + set25519(p[0], gf0); + set25519(p[1], gf1); + set25519(p[2], gf1); + set25519(p[3], gf0); + for (i = 255; i >= 0; --i) { + b = (s[(i / 8) | 0] >> (i & 7)) & 1; + cswap(p, q, b); + add(q, p); + add(p, p); + cswap(p, q, b); + } + } + function scalarbase(p, s) { + var q = [gf(), gf(), gf(), gf()]; + set25519(q[0], X); + set25519(q[1], Y); + set25519(q[2], gf1); + M(q[3], X, Y); + scalarmult(p, q, s); + } + var L = new Float64Array([ + 0xed, + 0xd3, + 0xf5, + 0x5c, + 0x1a, + 0x63, + 0x12, + 0x58, + 0xd6, + 0x9c, + 0xf7, + 0xa2, + 0xde, + 0xf9, + 0xde, + 0x14, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0x10, + ]); + function modL(r, x) { + var carry, i, j, k; + for (i = 63; i >= 32; --i) { + carry = 0; + for (j = i - 32, k = i - 12; j < k; ++j) { + x[j] += carry - 16 * x[i] * L[j - (i - 32)]; + carry = (x[j] + 128) >> 8; + x[j] -= carry * 256; + } + x[j] += carry; + x[i] = 0; + } + carry = 0; + for (j = 0; j < 32; j++) { + x[j] += carry - (x[31] >> 4) * L[j]; + carry = x[j] >> 8; + x[j] &= 255; + } + for (j = 0; j < 32; j++) + x[j] -= carry * L[j]; + for (i = 0; i < 32; i++) { + x[i + 1] += x[i] >> 8; + r[i] = x[i] & 255; + } + } + function reduce(r) { + var x = new Float64Array(64), i; + for (i = 0; i < 64; i++) + x[i] = r[i]; + for (i = 0; i < 64; i++) + r[i] = 0; + modL(r, x); + } + // Like crypto_sign, but uses secret key directly in hash. + function crypto_sign_direct(sm, m, n, sk) { + var h = new Uint8Array(64), r = new Uint8Array(64); + var i, j, x = new Float64Array(64); + var p = [gf(), gf(), gf(), gf()]; + for (i = 0; i < n; i++) + sm[64 + i] = m[i]; + for (i = 0; i < 32; i++) + sm[32 + i] = sk[i]; + crypto_hash(r, sm.subarray(32), n + 32); + reduce(r); + scalarbase(p, r); + pack(sm, p); + for (i = 0; i < 32; i++) + sm[i + 32] = sk[32 + i]; + crypto_hash(h, sm, n + 64); + reduce(h); + for (i = 0; i < 64; i++) + x[i] = 0; + for (i = 0; i < 32; i++) + x[i] = r[i]; + for (i = 0; i < 32; i++) { + for (j = 0; j < 32; j++) { + x[i + j] += h[i] * sk[j]; + } + } + modL(sm.subarray(32), x); + return n + 64; + } + // Note: sm must be n+128. + function crypto_sign_direct_rnd(sm, m, n, sk, rnd) { + var h = new Uint8Array(64), r = new Uint8Array(64); + var i, j, x = new Float64Array(64); + var p = [gf(), gf(), gf(), gf()]; + // Hash separation. + sm[0] = 0xfe; + for (i = 1; i < 32; i++) + sm[i] = 0xff; + // Secret key. + for (i = 0; i < 32; i++) + sm[32 + i] = sk[i]; + // Message. + for (i = 0; i < n; i++) + sm[64 + i] = m[i]; + // Random suffix. + for (i = 0; i < 64; i++) + sm[n + 64 + i] = rnd[i]; + crypto_hash(r, sm, n + 128); + reduce(r); + scalarbase(p, r); + pack(sm, p); + for (i = 0; i < 32; i++) + sm[i + 32] = sk[32 + i]; + crypto_hash(h, sm, n + 64); + reduce(h); + // Wipe out random suffix. + for (i = 0; i < 64; i++) + sm[n + 64 + i] = 0; + for (i = 0; i < 64; i++) + x[i] = 0; + for (i = 0; i < 32; i++) + x[i] = r[i]; + for (i = 0; i < 32; i++) { + for (j = 0; j < 32; j++) { + x[i + j] += h[i] * sk[j]; + } + } + modL(sm.subarray(32, n + 64), x); + return n + 64; + } + function curve25519_sign(sm, m, n, sk, opt_rnd) { + // If opt_rnd is provided, sm must have n + 128, + // otherwise it must have n + 64 bytes. + // Convert Curve25519 secret key into Ed25519 secret key (includes pub key). + var edsk = new Uint8Array(64); + var p = [gf(), gf(), gf(), gf()]; + for (var i = 0; i < 32; i++) + edsk[i] = sk[i]; + // Ensure private key is in the correct format. + edsk[0] &= 248; + edsk[31] &= 127; + edsk[31] |= 64; + scalarbase(p, edsk); + pack(edsk.subarray(32), p); + // Remember sign bit. + var signBit = edsk[63] & 128; + var smlen; + if (opt_rnd) { + smlen = crypto_sign_direct_rnd(sm, m, n, edsk, opt_rnd); + } + else { + smlen = crypto_sign_direct(sm, m, n, edsk); + } + // Copy sign bit from public key into signature. + sm[63] |= signBit; + return smlen; + } + function unpackneg(r, p) { + var t = gf(), chk = gf(), num = gf(), den = gf(), den2 = gf(), den4 = gf(), den6 = gf(); + set25519(r[2], gf1); + unpack25519(r[1], p); + S(num, r[1]); + M(den, num, D); + Z(num, num, r[2]); + A(den, r[2], den); + S(den2, den); + S(den4, den2); + M(den6, den4, den2); + M(t, den6, num); + M(t, t, den); + pow2523(t, t); + M(t, t, num); + M(t, t, den); + M(t, t, den); + M(r[0], t, den); + S(chk, r[0]); + M(chk, chk, den); + if (neq25519(chk, num)) + M(r[0], r[0], I); + S(chk, r[0]); + M(chk, chk, den); + if (neq25519(chk, num)) + return -1; + if (par25519(r[0]) === p[31] >> 7) + Z(r[0], gf0, r[0]); + M(r[3], r[0], r[1]); + return 0; + } + function crypto_sign_open(m, sm, n, pk) { + var i, mlen; + var t = new Uint8Array(32), h = new Uint8Array(64); + var p = [gf(), gf(), gf(), gf()], q = [gf(), gf(), gf(), gf()]; + mlen = -1; + if (n < 64) + return -1; + if (unpackneg(q, pk)) + return -1; + for (i = 0; i < n; i++) + m[i] = sm[i]; + for (i = 0; i < 32; i++) + m[i + 32] = pk[i]; + crypto_hash(h, m, n); + reduce(h); + scalarmult(p, q, h); + scalarbase(q, sm.subarray(32)); + add(p, q); + pack(t, p); + n -= 64; + if (crypto_verify_32(sm, 0, t, 0)) { + for (i = 0; i < n; i++) + m[i] = 0; + return -1; + } + for (i = 0; i < n; i++) + m[i] = sm[i + 64]; + mlen = n; + return mlen; + } + // Converts Curve25519 public key back to Ed25519 public key. + // edwardsY = (montgomeryX - 1) / (montgomeryX + 1) + function convertPublicKey(pk) { + var z = new Uint8Array(32), x = gf(), a = gf(), b = gf(); + unpack25519(x, pk); + A(a, x, gf1); + Z(b, x, gf1); + inv25519(a, a); + M(a, a, b); + pack25519(z, a); + return z; + } + function curve25519_sign_open(m, sm, n, pk) { + // Convert Curve25519 public key into Ed25519 public key. + var edpk = convertPublicKey(pk); + // Restore sign bit from signature. + edpk[31] |= sm[63] & 128; + // Remove sign bit from signature. + sm[63] &= 127; + // Verify signed message. + return crypto_sign_open(m, sm, n, edpk); + } + /* High-level API */ + function checkArrayTypes(...args) { + var t, i; + for (i = 0; i < arguments.length; i++) { + if ((t = Object.prototype.toString.call(arguments[i])) !== '[object Uint8Array]') + throw new TypeError('unexpected type ' + t + ', use Uint8Array'); + } + } + /** + * Returns a raw shared key between own private key and peer's public key (in other words, this is an ECC Diffie-Hellman function X25519, performing scalar multiplication). + * + * The result should not be used directly as a key, but should be processed with a one-way function (e.g. HSalsa20 as in NaCl, or any secure cryptographic hash function, such as SHA-256, or key derivation function, such as HKDF). + * + * @export + * @param {Uint8Array} secretKey + * @param {Uint8Array} publicKey + * @returns Uint8Array + */ + function sharedKey(secretKey, publicKey) { + checkArrayTypes(publicKey, secretKey); + if (publicKey.length !== 32) + throw new Error('wrong public key length'); + if (secretKey.length !== 32) + throw new Error('wrong secret key length'); + var sharedKey = new Uint8Array(32); + crypto_scalarmult(sharedKey, secretKey, publicKey); + return sharedKey; + } + /** + * Signs the given message using the private key and returns a signed message (signature concatenated with the message copy). + * + * Optional random data argument (which must have 64 random bytes) turns on hash separation and randomization to make signatures non-deterministic. + * + * @export + * @param {Uint8Array} secretKey + * @param {*} msg + * @param {Uint8Array} opt_random + * @returns + */ + function signMessage(secretKey, msg, opt_random) { + checkArrayTypes(msg, secretKey); + if (secretKey.length !== 32) + throw new Error('wrong secret key length'); + if (opt_random) { + checkArrayTypes(opt_random); + if (opt_random.length !== 64) + throw new Error('wrong random data length'); + var buf = new Uint8Array(128 + msg.length); + curve25519_sign(buf, msg, msg.length, secretKey, opt_random); + return new Uint8Array(buf.subarray(0, 64 + msg.length)); + } + else { + var signedMsg = new Uint8Array(64 + msg.length); + curve25519_sign(signedMsg, msg, msg.length, secretKey); + return signedMsg; + } + } + /** + * Verifies signed message with the public key and returns the original message without signature if it's correct or null if verification fails. + * + * @export + * @param {Uint8Array} publicKey + * @param {*} signedMsg + * @returns Message + */ + function openMessage(publicKey, signedMsg) { + checkArrayTypes(signedMsg, publicKey); + if (publicKey.length !== 32) + throw new Error('wrong public key length'); + var tmp = new Uint8Array(signedMsg.length); + var mlen = curve25519_sign_open(tmp, signedMsg, signedMsg.length, publicKey); + if (mlen < 0) + return null; + var m = new Uint8Array(mlen); + for (var i = 0; i < m.length; i++) + m[i] = tmp[i]; + return m; + } + /** + * Signs the given message using the private key and returns signature. + * + * Optional random data argument (which must have 64 random bytes) turns on hash separation and randomization to make signatures non-deterministic. + * + * @export + * @param {Uint8Array} secretKey + * @param {*} msg + * @param {Uint8Array} opt_random + * @returns + */ + function sign(secretKey, msg, opt_random) { + checkArrayTypes(secretKey, msg); + if (secretKey.length !== 32) + throw new Error('wrong secret key length'); + if (opt_random) { + checkArrayTypes(opt_random); + if (opt_random.length !== 64) + throw new Error('wrong random data length'); + } + var buf = new Uint8Array((opt_random ? 128 : 64) + msg.length); + curve25519_sign(buf, msg, msg.length, secretKey, opt_random); + var signature = new Uint8Array(64); + for (var i = 0; i < signature.length; i++) + signature[i] = buf[i]; + return signature; + } + /** + * Verifies the given signature for the message using the given private key. Returns true if the signature is valid, false otherwise. + * + * @export + * @param {Uint8Array} publicKey + * @param {*} msg + * @param {*} signature + * @returns + */ + function verify(publicKey, msg, signature) { + checkArrayTypes(msg, signature, publicKey); + if (signature.length !== 64) + throw new Error('wrong signature length'); + if (publicKey.length !== 32) + throw new Error('wrong public key length'); + var sm = new Uint8Array(64 + msg.length); + var m = new Uint8Array(64 + msg.length); + var i; + for (i = 0; i < 64; i++) + sm[i] = signature[i]; + for (i = 0; i < msg.length; i++) + sm[i + 64] = msg[i]; + return curve25519_sign_open(m, sm, sm.length, publicKey) >= 0; + } + /** + * Generates a new key pair from the given 32-byte secret seed (which should be generated with a CSPRNG) and returns it as object. + * + * The returned keys can be used for signing and key agreement. + * + * @export + * @param {Uint8Array} seed required + * @returns + */ + function generateKeyPair(seed) { + checkArrayTypes(seed); + if (seed.length !== 32) + throw new Error('wrong seed length'); + var sk = new Uint8Array(32); + var pk = new Uint8Array(32); + for (var i = 0; i < 32; i++) + sk[i] = seed[i]; + crypto_scalarmult_base(pk, sk); + // Turn secret key into the correct format. + sk[0] &= 248; + sk[31] &= 127; + sk[31] |= 64; + // Remove sign bit from public key. + pk[31] &= 127; + return { + public: pk, + private: sk, + }; + } + +module.exports = { + sharedKey, + signMessage, + openMessage, + sign, + verify, + generateKeyPair, +}; diff --git a/vmm/ui/src/main.ts b/vmm/ui/src/main.ts new file mode 100644 index 00000000..c2dbfe9d --- /dev/null +++ b/vmm/ui/src/main.ts @@ -0,0 +1,9 @@ +// SPDX-FileCopyrightText: © 2025 Phala Network +// SPDX-License-Identifier: Apache-2.0 + +declare const Vue: any; + +const { createApp } = Vue; +const App = require('./App'); + +createApp(App).mount('#app'); diff --git a/vmm/ui/src/styles/main.css b/vmm/ui/src/styles/main.css new file mode 100644 index 00000000..45b267a9 --- /dev/null +++ b/vmm/ui/src/styles/main.css @@ -0,0 +1,1553 @@ +/* SPDX-FileCopyrightText: © 2025 Phala Network + SPDX-License-Identifier: Apache-2.0 */ + +:root { + --color-primary: #2563eb; + --color-primary-hover: #1d4ed8; + --color-success: #16a34a; + --color-warning: #ea580c; + --color-danger: #dc2626; + --color-text-primary: #0f172a; + --color-text-secondary: #475569; + --color-text-tertiary: #94a3b8; + --color-bg-primary: #ffffff; + --color-bg-secondary: #f8fafc; + --color-bg-tertiary: #f1f5f9; + --color-border: #e2e8f0; + --color-border-light: #f1f5f9; + --shadow-sm: 0 1px 2px 0 rgb(0 0 0 / 0.05); + --shadow-md: 0 4px 6px -1px rgb(0 0 0 / 0.1); + --shadow-lg: 0 10px 15px -3px rgb(0 0 0 / 0.1); + --shadow-xl: 0 20px 25px -5px rgb(0 0 0 / 0.1); + --radius-sm: 6px; + --radius-md: 8px; + --radius-lg: 12px; + color-scheme: light; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', sans-serif; +} + +* { + box-sizing: border-box; +} + +body { + margin: 0; + padding: 0; + background: var(--color-bg-secondary); + color: var(--color-text-primary); + line-height: 1.6; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +h1, h2, h3, h4, h5, h6 { + margin: 0; + font-weight: 600; + color: var(--color-text-primary); +} + +.console-root { + min-height: 100vh; + display: flex; + flex-direction: column; +} + +.app-header { + background: var(--color-bg-primary); + border-bottom: 1px solid var(--color-border); + box-shadow: var(--shadow-sm); + position: sticky; + top: 0; + z-index: 100; +} + +.header-content { + max-width: 1400px; + margin: 0 auto; + padding: 16px 24px; + display: flex; + justify-content: space-between; + align-items: center; + gap: 24px; +} + +.header-left { + display: flex; + align-items: center; + gap: 12px; +} + +.app-title { + font-size: 20px; + font-weight: 700; + color: var(--color-text-primary); +} + +.version-badge { + background: var(--color-bg-tertiary); + color: var(--color-text-secondary); + padding: 4px 10px; + border-radius: 12px; + font-size: 12px; + font-weight: 600; +} + +.header-right { + display: flex; + align-items: center; + gap: 12px; +} + +.btn-primary { + background: var(--color-primary); + color: white; + border: none; + padding: 10px 20px; + border-radius: var(--radius-md); + font-size: 14px; + font-weight: 600; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 8px; + transition: all 0.2s ease; + box-shadow: var(--shadow-sm); +} + +.btn-primary:hover { + background: var(--color-primary-hover); + box-shadow: var(--shadow-md); + transform: translateY(-1px); +} + +.btn-primary:active { + transform: translateY(0); +} + +.btn-secondary { + background: var(--color-bg-primary); + color: var(--color-text-secondary); + border: 1px solid var(--color-border); + padding: 8px 16px; + border-radius: var(--radius-md); + font-size: 14px; + font-weight: 500; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 6px; + transition: all 0.2s ease; +} + +.btn-secondary:hover { + background: var(--color-bg-tertiary); + border-color: var(--color-text-tertiary); +} + +.btn-secondary:active { + transform: scale(0.98); +} + +.toolbar { + max-width: 1400px; + margin: 24px auto; + padding: 0 24px; + display: flex; + flex-wrap: wrap; + gap: 16px; + justify-content: space-between; + align-items: center; +} + +.toolbar-section { + display: flex; + align-items: center; + gap: 16px; + flex-wrap: wrap; +} + +.search-box { + position: relative; + display: flex; + align-items: center; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 0 12px; + gap: 8px; + min-width: 320px; + box-shadow: var(--shadow-sm); +} + +.search-icon { + color: var(--color-text-tertiary); + flex-shrink: 0; +} + +.search-box input { + border: none; + outline: none; + padding: 10px 0; + font-size: 14px; + background: transparent; + flex: 1; + min-width: 0; + color: var(--color-text-primary); +} + +.search-box input::placeholder { + color: var(--color-text-tertiary); +} + +.btn-search { + background: var(--color-primary); + color: white; + border: none; + padding: 6px 14px; + border-radius: var(--radius-sm); + font-size: 13px; + font-weight: 600; + cursor: pointer; + white-space: nowrap; + transition: background 0.2s ease; +} + +.btn-search:hover { + background: var(--color-primary-hover); +} + +.vm-count { + display: flex; + align-items: center; + gap: 8px; + padding: 10px 16px; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + box-shadow: var(--shadow-sm); +} + +.count-label { + color: var(--color-text-secondary); + font-size: 13px; + font-weight: 500; +} + +.count-value { + color: var(--color-primary); + font-size: 15px; + font-weight: 700; +} + +.pagination-controls { + display: flex; + align-items: center; + gap: 8px; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 6px; + box-shadow: var(--shadow-sm); +} + +.btn-pagination { + background: transparent; + border: none; + padding: 6px 8px; + border-radius: var(--radius-sm); + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + color: var(--color-text-secondary); + transition: all 0.2s ease; +} + +.btn-pagination:hover:not(:disabled) { + background: var(--color-bg-tertiary); + color: var(--color-text-primary); +} + +.btn-pagination:disabled { + opacity: 0.4; + cursor: not-allowed; +} + +.page-display { + display: flex; + align-items: center; + gap: 6px; + padding: 0 8px; +} + +.page-input { + width: 50px; + padding: 4px 8px; + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + font-size: 14px; + text-align: center; + outline: none; +} + +.page-input:focus { + border-color: var(--color-primary); +} + +.page-separator { + color: var(--color-text-tertiary); + font-weight: 500; +} + +.page-total { + color: var(--color-text-secondary); + font-weight: 600; + min-width: 24px; + text-align: center; +} + +.page-size-select { + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + padding: 6px 10px; + font-size: 13px; + outline: none; + cursor: pointer; + background: var(--color-bg-primary); + color: var(--color-text-secondary); + font-weight: 500; +} + +.page-size-select:hover { + border-color: var(--color-text-tertiary); +} + +.vm-table { + max-width: 900px; + width: 900px; + margin: 0 auto 24px; + padding: 0 24px; +} + +.vm-table-header { + display: grid; + grid-template-columns: 24px 2fr 100px 120px 180px 60px; + gap: 16px; + padding: 12px 16px; + background: var(--color-bg-primary); + border-bottom: 2px solid var(--color-border); + font-weight: 600; + font-size: 13px; + color: var(--color-text-secondary); + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.vm-row { + background: var(--color-bg-primary); + border-bottom: 1px solid var(--color-border-light); + transition: background 0.15s ease; +} + +.vm-row:hover { + background: var(--color-bg-secondary); +} + +.vm-row-main { + display: grid; + grid-template-columns: 24px 2fr 100px 120px 180px 60px; + gap: 16px; + padding: 16px; + align-items: center; + cursor: pointer; + transition: background-color 0.15s ease; +} + +.vm-col-expand, +.vm-col-name, +.vm-col-status, +.vm-col-uptime, +.vm-col-view, +.vm-col-actions { + display: flex; + align-items: center; +} + +.vm-col-view, +.vm-col-actions { + cursor: default; +} + +.vm-col-expand { + justify-content: flex-start; +} + +.vm-col-view { + gap: 12px; +} + +.btn-expand { + width: 24px; + height: 24px; + border: 1px solid transparent; + background: transparent; + border-radius: var(--radius-sm); + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + color: var(--color-text-tertiary); + transition: all 0.2s ease; + opacity: 0.7; +} + +.btn-expand:hover { + background: var(--color-bg-tertiary); + color: var(--color-text-primary); + opacity: 1; +} + +.btn-expand.expanded { + background: var(--color-primary); + color: white; + opacity: 1; +} + +.vm-name { + font-size: 15px; + font-weight: 600; + color: var(--color-text-primary); +} + +.view-link { + color: var(--color-primary); + text-decoration: none; + font-size: 14px; + font-weight: 500; + transition: color 0.15s ease; +} + +.view-link:hover { + color: var(--color-primary-hover); + text-decoration: underline; +} + +.btn-actions { + width: 36px; + height: 36px; + border: 1px solid var(--color-border); + background: var(--color-bg-primary); + border-radius: var(--radius-sm); + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + color: var(--color-text-secondary); + transition: all 0.2s ease; +} + +.btn-actions:hover { + background: var(--color-bg-tertiary); + border-color: var(--color-text-tertiary); + color: var(--color-text-primary); +} + +.status-badge { + display: inline-flex; + align-items: center; + gap: 6px; + padding: 6px 12px; + border-radius: 16px; + font-size: 13px; + font-weight: 600; + text-transform: capitalize; +} + +.status-dot { + width: 8px; + height: 8px; + border-radius: 50%; + animation: pulse 2s infinite; +} + +@keyframes pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.5; } +} + +.status-running { + background: #dcfce7; + color: var(--color-success); +} + +.status-running .status-dot { + background: var(--color-success); +} + +.status-stopping, +.status-shutting-down { + background: #fed7aa; + color: var(--color-warning); +} + +.status-stopping .status-dot, +.status-shutting-down .status-dot { + background: var(--color-warning); +} + +.status-exited { + background: var(--color-bg-tertiary); + color: var(--color-text-tertiary); +} + +.status-exited .status-dot { + background: var(--color-text-tertiary); + animation: none; +} + +.status-stopped { + background: #fee2e2; + color: var(--color-danger); +} + +.status-stopped .status-dot { + background: var(--color-danger); + animation: none; +} + +.vm-details { + padding: 24px; + background: var(--color-bg-secondary); + border-top: 1px solid var(--color-border); + display: flex; + flex-direction: column; + gap: 20px; +} + +.details-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); + gap: 16px; +} + +.detail-item { + display: flex; + flex-direction: column; + gap: 6px; + min-width: 0; +} + +.detail-label { + font-size: 12px; + font-weight: 600; + color: var(--color-text-tertiary); + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.detail-value { + font-size: 14px; + color: var(--color-text-primary); + font-weight: 500; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + cursor: help; +} + +.gpu-chip-list { + display: flex; + flex-wrap: wrap; + gap: 6px; +} + +.gpu-chip { + font-size: 12px; + line-height: 1.4; + padding: 4px 10px; + border-radius: 999px; + border: 1px solid var(--color-border); + background: var(--color-bg-primary); + color: var(--color-text-secondary); + white-space: nowrap; +} + +.gpu-chip--all { + font-weight: 600; + color: var(--color-text-primary); + border-style: dashed; +} + +.port-mappings { + display: flex; + flex-direction: column; + gap: 12px; +} + +.port-mappings h4 { + font-size: 14px; + font-weight: 600; + color: var(--color-text-primary); + margin-bottom: 4px; +} + +.port-item { + padding: 12px 16px; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + display: flex; + justify-content: space-between; + align-items: center; + font-size: 14px; + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; +} + +.features-section { + margin-top: 24px; +} + +.features-section h4 { + font-size: 14px; + font-weight: 600; + color: var(--color-text-primary); + margin-bottom: 8px; +} + +.features-text { + font-size: 14px; + color: var(--color-text-secondary); + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 12px 16px; + display: inline-block; +} + +.detail-value-with-copy { + display: flex; + align-items: center; + gap: 8px; + flex: 1; + min-width: 0; +} + +.detail-value-with-copy .detail-value { + flex: 1; + min-width: 0; +} + +.copy-btn { + flex-shrink: 0; + padding: 4px; + background: transparent; + border: 1px solid var(--color-border); + border-radius: 4px; + cursor: pointer; + color: var(--color-text-secondary); + transition: all 0.15s ease; + display: flex; + align-items: center; + justify-content: center; +} + +.copy-btn:hover { + background: var(--color-bg-tertiary); + border-color: var(--color-primary); + color: var(--color-primary); +} + +.section-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 12px; +} + +.section-actions { + display: flex; + align-items: center; + gap: 8px; +} + +.copy-btn-small { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + padding: 6px 8px; + border-radius: var(--radius-sm); + cursor: pointer; + color: var(--color-text-secondary); + transition: all 0.15s ease; + display: flex; + align-items: center; + justify-content: center; + min-width: 32px; + height: 32px; +} + +.copy-btn-small:hover { + background: var(--color-bg-tertiary); + border-color: var(--color-primary); + color: var(--color-primary); +} + +.section-header h4 { + font-size: 14px; + font-weight: 600; + color: var(--color-text-primary); + margin: 0; +} + +.compose-section, +.user-config-section { + margin-top: 24px; +} + +.compose-content { + width: 100%; + max-width: 100%; + overflow-x: auto; +} + +.compose-content pre, +.user-config-content { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 16px; + font-size: 13px; + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + line-height: 1.6; + max-height: 400px; + overflow-y: auto; + margin: 0; + white-space: pre-wrap; + word-break: break-all; + color: var(--color-text-primary); + width: 100%; + box-sizing: border-box; +} + +.network-section { + display: flex; + flex-direction: column; + gap: 16px; +} + +.section-title { + font-size: 15px; + font-weight: 600; + color: var(--color-text-primary); + margin: 0; + display: flex; + align-items: center; + gap: 8px; +} + +.network-interfaces { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(320px, 360px)); + gap: 12px; +} + +.network-interface-card { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + overflow: hidden; +} + +.interface-header { + background: var(--color-bg-tertiary); + padding: 12px 16px; + border-bottom: 1px solid var(--color-border); +} + +.interface-name { + display: flex; + align-items: center; + gap: 8px; + font-weight: 600; + color: var(--color-text-primary); + font-size: 14px; +} + +.interface-name svg { + color: var(--color-primary); + flex-shrink: 0; +} + +.interface-details { + padding: 16px; + display: flex; + flex-direction: column; + gap: 12px; +} + +.interface-detail-row { + display: flex; + justify-content: space-between; + align-items: baseline; + gap: 12px; + padding-bottom: 8px; + border-bottom: 1px solid var(--color-border-light); +} + +.interface-detail-row:last-of-type { + border-bottom: none; + padding-bottom: 0; +} + +.interface-detail-row .detail-label { + font-size: 12px; + font-weight: 600; + color: var(--color-text-tertiary); + text-transform: uppercase; + letter-spacing: 0.5px; + flex-shrink: 0; +} + +.interface-detail-row .detail-value { + font-size: 13px; + color: var(--color-text-primary); + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + word-break: break-all; + text-align: right; +} + +.interface-stats { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 12px; + margin-top: 8px; + padding-top: 12px; + border-top: 1px solid var(--color-border-light); +} + +.stat-item { + display: flex; + gap: 10px; + align-items: flex-start; +} + +.stat-icon { + width: 28px; + height: 28px; + border-radius: var(--radius-sm); + display: flex; + align-items: center; + justify-content: center; + flex-shrink: 0; +} + +.stat-icon.rx { + background: #dcfce7; + color: var(--color-success); +} + +.stat-icon.tx { + background: #dbeafe; + color: var(--color-primary); +} + +.stat-content { + display: flex; + flex-direction: column; + gap: 2px; + min-width: 0; +} + +.stat-label { + font-size: 11px; + font-weight: 700; + color: var(--color-text-tertiary); + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.stat-value { + font-size: 13px; + font-weight: 600; + color: var(--color-text-primary); + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; +} + +.stat-errors { + font-size: 11px; + color: var(--color-danger); + font-weight: 500; +} + +.wireguard-section { + display: flex; + flex-direction: column; + gap: 12px; +} + +.wireguard-info-text { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 16px; + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + font-size: 12px; + overflow-x: auto; + line-height: 1.6; + color: var(--color-text-secondary); + margin: 0; +} + +.vm-log-tabs { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.vm-log-button { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + padding: 8px 16px; + border-radius: var(--radius-md); + font-size: 14px; + cursor: pointer; + transition: all 0.2s ease; +} + +.vm-log-button:hover { + background: var(--color-bg-tertiary); + border-color: var(--color-text-tertiary); +} + +.dropdown { + position: relative; + display: inline-block; +} + +.dropdown-content { + display: none; + position: absolute; + right: 0; + top: calc(100% + 4px); + z-index: 300; + min-width: 200px; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + box-shadow: var(--shadow-xl); + overflow: hidden; +} + +.dropdown-content.show { + display: block; + animation: fadeIn 0.15s ease; +} + +@keyframes fadeIn { + from { + opacity: 0; + transform: translateY(-8px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.dropdown-content button { + width: 100%; + padding: 12px 16px; + border: none; + background: transparent; + text-align: left; + cursor: pointer; + font-size: 14px; + font-weight: 500; + color: var(--color-text-primary); + transition: all 0.15s ease; + display: flex; + align-items: center; + gap: 12px; + border-bottom: 1px solid var(--color-border); +} + +.dropdown-content button:last-child { + border-bottom: none; +} + +.dropdown-content button:hover { + background: var(--color-bg-tertiary); + padding-left: 20px; + color: var(--color-primary); +} + +.dropdown-content button svg { + flex-shrink: 0; + opacity: 0.7; + transition: opacity 0.15s ease; +} + +.dropdown-content button:hover svg { + opacity: 1; +} + +/* System Menu Styles */ +.system-menu { + position: relative; + display: inline-block; + margin-right: 12px; +} + +.btn-icon { + background: transparent; + color: var(--color-text-secondary); + border: 1px solid var(--color-border); + padding: 8px; + border-radius: var(--radius-md); + font-size: 14px; + font-weight: 500; + cursor: pointer; + display: inline-flex; + align-items: center; + justify-content: center; + transition: all 0.15s ease; + min-width: 36px; + min-height: 36px; +} + +.btn-icon:hover { + background: var(--color-bg-tertiary); + color: var(--color-primary); + border-color: var(--color-primary); + box-shadow: var(--shadow-sm); + transform: translateY(-1px); +} + +.btn-icon:active { + transform: translateY(0); +} + +.system-dropdown { + display: none; + position: absolute; + right: 0; + top: calc(100% + 8px); + z-index: 300; + min-width: 180px; + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + box-shadow: var(--shadow-xl); + animation: fadeIn 0.15s ease; +} + +.system-dropdown.show { + display: block; +} + +.dropdown-item { + width: 100%; + padding: 12px 16px; + border: none; + background: transparent; + text-align: left; + cursor: pointer; + font-size: 14px; + font-weight: 500; + color: var(--color-text-primary); + transition: all 0.15s ease; + display: flex; + align-items: center; + gap: 12px; + border-bottom: 1px solid var(--color-border-light); +} + +.dropdown-item:last-child { + border-bottom: none; +} + +.dropdown-item:hover { + background: var(--color-bg-tertiary); + padding-left: 20px; + color: var(--color-primary); +} + +.dropdown-item svg { + flex-shrink: 0; + opacity: 0.7; + transition: opacity 0.15s ease; +} + +.dropdown-item:hover svg { + opacity: 1; +} + +.message-container { + position: fixed; + top: 20px; + right: 20px; + z-index: 1001; + display: flex; + flex-direction: column; + gap: 8px; + max-width: 400px; +} + +.message { + position: relative; + padding: 12px 40px 12px 16px; + border-radius: var(--radius-md); + font-size: 13px; + box-shadow: var(--shadow-lg); + animation: slideInRight 0.3s ease; + word-wrap: break-word; +} + +@keyframes slideInRight { + from { + opacity: 0; + transform: translateX(20px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +.success-message { + background: #dcfce7; + color: #166534; + border-left: 4px solid var(--color-success); +} + +.error-message { + background: #fee2e2; + color: #991b1b; + border-left: 4px solid var(--color-danger); +} + +.close-btn { + position: absolute; + top: 8px; + right: 8px; + border: none; + background: transparent; + font-size: 18px; + cursor: pointer; + color: inherit; + opacity: 0.6; + transition: opacity 0.2s ease; + width: 20px; + height: 20px; + display: flex; + align-items: center; + justify-content: center; + border-radius: 50%; +} + +.close-btn:hover { + opacity: 1; + background: rgba(0, 0, 0, 0.1); +} + +.dialog-overlay { + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.5); + display: flex; + align-items: center; + justify-content: center; + padding: 24px; + z-index: 1000; + backdrop-filter: blur(4px); +} + +.dialog { + background: var(--color-bg-primary); + border-radius: var(--radius-lg); + max-width: 960px; + width: 100%; + max-height: 90vh; + overflow-y: auto; + padding: 32px; + box-shadow: var(--shadow-xl); + animation: dialogIn 0.2s ease; +} + +@keyframes dialogIn { + from { + opacity: 0; + transform: scale(0.95); + } + to { + opacity: 1; + transform: scale(1); + } +} + +.dialog h2 { + margin-bottom: 24px; + font-size: 24px; +} + +.dialog-footer { + display: flex; + justify-content: flex-end; + gap: 12px; + margin-top: 32px; + padding-top: 24px; + border-top: 1px solid var(--color-border); +} + +.form-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(280px, 1fr)); + gap: 20px; + margin-bottom: 24px; +} + +.form-group { + display: flex; + flex-direction: column; + gap: 8px; +} + +.form-group.full-width { + grid-column: 1 / -1; +} + +.form-group label { + font-weight: 600; + color: var(--color-text-primary); + font-size: 14px; +} + +.form-group input, +.form-group select, +.form-group textarea { + width: 100%; + padding: 10px 12px; + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + font-size: 14px; + outline: none; + background: var(--color-bg-primary); + color: var(--color-text-primary); + transition: all 0.2s ease; +} + +.form-group input:focus, +.form-group select:focus, +.form-group textarea:focus { + border-color: var(--color-primary); + box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1); +} + +.form-group textarea { + min-height: 120px; + resize: vertical; + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; +} + +.hint { + color: var(--color-text-tertiary); + font-size: 13px; +} + +.help-icon { + display: inline-flex; + justify-content: center; + align-items: center; + width: 18px; + height: 18px; + font-size: 12px; + font-weight: 700; + color: white; + background: var(--color-text-tertiary); + border-radius: 50%; + cursor: help; +} + +.action-btn { + background: var(--color-primary); + color: white; + border: none; + padding: 10px 18px; + border-radius: var(--radius-md); + font-size: 14px; + font-weight: 600; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 8px; + transition: all 0.2s ease; +} + +.action-btn:hover { + background: var(--color-primary-hover); + transform: translateY(-1px); + box-shadow: var(--shadow-md); +} + +.action-btn:disabled { + background: var(--color-text-tertiary); + cursor: not-allowed; + transform: none; + opacity: 0.6; +} + +.action-btn.primary { + background: var(--color-success); +} + +.action-btn.primary:hover { + background: #15803d; +} + +.action-btn.danger { + background: var(--color-danger); +} + +.action-btn.danger:hover { + background: #b91c1c; +} + +.inline-field { + display: flex; + align-items: center; + gap: 12px; +} + +.file-input-row { + display: flex; + flex-direction: column; + gap: 12px; +} + +.file-input-actions { + display: flex; + align-items: center; + gap: 12px; +} + +.file-input-actions input[type="file"] { + display: none; +} + +.help-text { + color: var(--color-text-tertiary); + font-size: 14px; +} + +.feature-checkboxes, +.checkbox-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 12px; + row-gap: 16px; +} + +.gpu-checkbox-grid { + display: grid; + grid-template-columns: 1fr; + gap: 12px; + row-gap: 16px; +} + +.feature-checkboxes label, +.checkbox-grid label, +.gpu-checkbox-grid label { + display: flex; + align-items: center; + gap: 8px; + cursor: pointer; + white-space: nowrap; +} + +.feature-checkboxes input[type="checkbox"], +.checkbox-grid input[type="checkbox"], +.gpu-checkbox-grid input[type="checkbox"] { + flex-shrink: 0; + cursor: pointer; + margin: 0; + width: 16px; + height: 16px; +} + +.encrypted-env-editor, +.port-mapping-editor, +.gpu-config-editor { + display: flex; + flex-direction: column; + gap: 16px; +} + +.port-mapping-editor > button { + width: fit-content; + min-width: 120px; +} + +.gpu-section-label { + font-size: 14px; + font-weight: 600; + color: var(--color-text-primary); + margin-bottom: -8px; +} + +.gpu-config-list-header { + font-size: 13px; + font-weight: 500; + color: var(--color-text-secondary); + margin-bottom: 8px; +} + +.gpu-config-hint { + font-size: 13px; + color: var(--color-text-secondary); + padding: 12px 16px; + background: var(--color-bg-secondary); + border-radius: var(--radius-md); + border-left: 3px solid var(--color-primary); +} + +.env-editor-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 12px; +} + +.env-editor-title { + font-size: 14px; + font-weight: 600; + color: var(--color-text-primary); + margin: 0; +} + +.env-mode-toggle { + display: flex; + gap: 0; + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + overflow: hidden; +} + +.mode-btn { + padding: 6px 16px; + border: none; + background: var(--color-bg-primary); + color: var(--color-text-secondary); + font-size: 13px; + font-weight: 500; + cursor: pointer; + transition: all 0.2s ease; + border-right: 1px solid var(--color-border); +} + +.mode-btn:last-child { + border-right: none; +} + +.mode-btn:hover { + background: var(--color-bg-tertiary); +} + +.mode-btn.active { + background: var(--color-primary); + color: white; +} + +.env-form-mode { + display: flex; + flex-direction: column; + gap: 12px; +} + +.env-text-mode { + display: flex; + flex-direction: column; + gap: 8px; +} + +.env-text-mode textarea { + width: 100%; + padding: 12px; + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + font-size: 14px; + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + resize: vertical; + line-height: 1.5; +} + +.env-text-mode textarea:focus { + outline: none; + border-color: var(--color-primary); + box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1); +} + +.env-editor-empty { + padding: 16px; + background: var(--color-bg-tertiary); + border: 1px dashed var(--color-border); + border-radius: var(--radius-md); +} + +.env-editor-empty .hint { + margin: 0; + color: var(--color-text-secondary); + font-size: 14px; + line-height: 1.5; +} + +.encrypted-env-row, +.port-row { + display: grid; + gap: 12px; + align-items: center; +} + +.encrypted-env-row { + grid-template-columns: 200px 1fr auto; +} + +.encrypted-env-actions { + display: flex; + align-items: center; + gap: 12px; +} + +.encrypted-env-actions input[type="file"] { + display: none; +} + +.port-row { + grid-template-columns: 90px 100px 120px 120px 100px; +} + +.gpu-config-items { + max-height: 240px; + overflow-y: auto; + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: 16px; + background: var(--color-bg-secondary); +} + +.gpu-checkbox-grid label { + font-size: 14px; +} + +.warning-text { + color: var(--color-danger); + font-weight: 600; +} + +.app-id-preview { + font-family: 'SF Mono', 'Monaco', 'Cascadia Code', monospace; + font-size: 13px; + color: var(--color-text-secondary); + background: var(--color-bg-tertiary); + padding: 8px 12px; + border-radius: var(--radius-sm); +} diff --git a/vmm/ui/src/templates/app.html b/vmm/ui/src/templates/app.html new file mode 100644 index 00000000..b55550fa --- /dev/null +++ b/vmm/ui/src/templates/app.html @@ -0,0 +1,457 @@ + + +
+
+
+
+

dstack-vmm

+ + v{{ version.version }} + + +
+
+ +
+ + +
+
+
+
+ + + + + + + +
+
+ +
+ Total Instances: + {{ totalVMs }} +
+
+
+
+ +
+ + / + {{ maxPage || 1 }} +
+ + +
+
+
+ +
+
+
+
Name
+
Status
+
Uptime
+
View
+
Actions
+
+ +
+
+
+ +
+
+ {{ vm.name }} +
+
+ + + {{ vmStatus(vm) }} + +
+
{{ vm.status !== 'stopped' ? shortUptime(vm.uptime) : '-' }}
+
+ Logs + Stderr + Board +
+
+ +
+
+ +
+
+
+ VM ID +
+ {{ vm.id }} + +
+
+
+ Instance ID +
+ {{ vm.instance_id }} + +
+ - +
+
+ App ID +
+ {{ vm.app_id }} + +
+ - +
+
+ Image + {{ vm.configuration?.image }} +
+
+ vCPUs + {{ vm.configuration?.vcpu }} +
+
+ Memory + {{ formatMemory(vm.configuration?.memory) }} +
+
+ Swap + {{ formatMemory(bytesToMB(vm.configuration.swap_size)) }} +
+
+ Disk Size + {{ vm.configuration?.disk_size }} GB +
+
+ Disk Type + {{ vm.configuration?.disk_type || 'virtio-pci' }} +
+
+ TEE + {{ vm.configuration?.no_tee ? 'Disabled' : 'Enabled' }} +
+
+ GPUs +
+ + All GPUs + +
+
+ + {{ gpu.slot || gpu.product_id || ('GPU #' + (index + 1)) }} + +
+
+ None +
+
+
+ +
+

Port Mappings

+
+ {{ port.host_address === '127.0.0.1' ? 'Local' : 'Public' }} + {{ port.protocol.toUpperCase() }}: {{ port.host_port }} → {{ port.vm_port }} +
+
+ +
+

Features

+ {{ getVmFeatures(vm) }} +
+ +
+

Network Interfaces

+
+
+
+
+ + + + + {{ iface.name }} +
+
+
+
+ MAC Address + {{ iface.mac || '-' }} +
+
+ IP Address + {{ iface.addresses.map(addr => addr.address + '/' + addr.prefix).join(', ') || '-' }} +
+
+
+
+ + + +
+
+ RX + {{ iface.rx_bytes }} bytes + ({{ iface.rx_errors }} errors) +
+
+
+
+ + + +
+
+ TX + {{ iface.tx_bytes }} bytes + ({{ iface.tx_errors }} errors) +
+
+
+
+
+
+
+

+ + + + + WireGuard Info +

+
{{ networkInfo[vm.id].wg_info }}
+
+
+ +
+
+

App Compose

+
+ + +
+
+
+
{{ vm.appCompose?.docker_compose_file || 'Docker Compose content not available' }}
+
+
+ +
+
+

User Config

+ +
+
{{ vm.configuration.user_config }}
+
+ +
+ + + +
+
+
+
+ +
+
+ +
+
+
+ +
+
+
+ {{ errorMessage }} + +
+
+
diff --git a/vmm/ui/tsconfig.json b/vmm/ui/tsconfig.json new file mode 100644 index 00000000..92257638 --- /dev/null +++ b/vmm/ui/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "moduleResolution": "node", + "resolveJsonModule": true, + "lib": ["ES2018", "DOM"], + "strict": false, + "esModuleInterop": false, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "build/ts", + "rootDir": "src", + "allowJs": true, + "types": ["node"] + }, + "include": ["src/**/*.ts", "src/proto/**/*.js", "src/lib/**/*.js"] +} diff --git a/vmm/ui/vendor/README.md b/vmm/ui/vendor/README.md new file mode 100644 index 00000000..d2b21367 --- /dev/null +++ b/vmm/ui/vendor/README.md @@ -0,0 +1,4 @@ +Place `vue.global.prod.js` from a matching Vue 3 release in this directory. + +The build script inlines the file when present; otherwise the generated +HTML keeps the external CDN reference. diff --git a/vmm/vmm.toml b/vmm/vmm.toml index 4058ed74..f4d504e0 100644 --- a/vmm/vmm.toml +++ b/vmm/vmm.toml @@ -11,6 +11,8 @@ log_level = "debug" address = "unix:./vmm.sock" reuse = true kms_url = "http://127.0.0.1:8081" +event_buffer_size = 20 +node_name = "" [cvm] qemu_path = "" @@ -30,8 +32,9 @@ user = "" use_mrconfigid = true # QEMU flags -qemu_single_pass_add_pages = false -qemu_pic = true +#qemu_single_pass_add_pages = false +#qemu_pic = true +#qemu_version = "" qemu_pci_hole64_size = 0 qemu_hotplug_off = false