diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 0000000000..9704601ae9
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.24
+
+RUN apt-get update && apt-get install -y sudo
+RUN curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - && \
+ apt-get install -y nodejs
+
+ADD scripts /scripts
+RUN bash /scripts/install.sh
+RUN bash /scripts/godeps.sh
+
+ENV ENCORE_GOROOT=/encore-release/encore-go
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000000..e5a5e6c09d
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,10 @@
+{
+ "build": {"dockerfile": "Dockerfile"},
+ "containerEnv": {
+ "ENCORE_DAEMON_DEV": "1",
+ "ENCORE_RUNTIMES_PATH": "${containerWorkspaceFolder}/runtimes"
+ },
+ "extensions": ["golang.go"],
+ "postCreateCommand": "bash /scripts/prepare.sh",
+ "forwardPorts": [4000, 9400]
+}
diff --git a/.devcontainer/scripts/godeps.sh b/.devcontainer/scripts/godeps.sh
new file mode 100644
index 0000000000..f887beb36a
--- /dev/null
+++ b/.devcontainer/scripts/godeps.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env
+set -ex
+
+go install github.com/uudashr/gopkgs/v2/cmd/gopkgs@latest
+go install github.com/ramya-rao-a/go-outline@latest
+go install github.com/cweill/gotests/gotests@latest
+go install github.com/fatih/gomodifytags@latest
+go install github.com/josharian/impl@latest
+go install github.com/haya14busa/goplay/cmd/goplay@latest
+go install github.com/go-delve/delve/cmd/dlv@latest
+go install honnef.co/go/tools/cmd/staticcheck@master
+go install golang.org/x/tools/gopls@latest
+
+GOBIN=/tmp/ go install github.com/go-delve/delve/cmd/dlv@master
+mv /tmp/dlv $GOPATH/bin/dlv-dap
diff --git a/.devcontainer/scripts/install.sh b/.devcontainer/scripts/install.sh
new file mode 100644
index 0000000000..b9b9fcf314
--- /dev/null
+++ b/.devcontainer/scripts/install.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+set -e
+
+target="$(go env GOOS)_$(go env GOARCH)"
+
+encore_uri=$(curl -sSf -N "https://encore.dev/api/releases?target=${target}&show=url")
+if [ ! "$encore_uri" ]; then
+ echo "Error: Unable to determine latest Encore release." 1>&2
+ exit 1
+fi
+
+encore_install="/encore-release"
+bin_dir="$encore_install/bin"
+exe="$bin_dir/encore"
+tar="$encore_install/encore.tar.gz"
+
+if [ ! -d "$bin_dir" ]; then
+ mkdir -p "$bin_dir"
+fi
+
+curl --fail --location --progress-bar --output "$tar" "$encore_uri"
+cd "$encore_install"
+tar -C "$encore_install" -xzf "$tar"
+chmod +x "$bin_dir"/*
+rm "$tar"
+
+"$exe" version
+
+echo "Encore was installed successfully to $exe"
+if command -v encore >/dev/null; then
+ echo "Run 'encore --help' to get started"
+else
+ case $SHELL in
+ /bin/zsh) shell_profile=".zshrc" ;;
+ *) shell_profile=".bash_profile" ;;
+ esac
+ echo "Manually add the directory to your \$HOME/$shell_profile (or similar)"
+ echo " export ENCORE_INSTALL=\"$encore_install\""
+ echo " export PATH=\"\$ENCORE_INSTALL/bin:\$PATH\""
+ echo "Run '$exe --help' to get started"
+fi
\ No newline at end of file
diff --git a/.devcontainer/scripts/prepare.sh b/.devcontainer/scripts/prepare.sh
new file mode 100644
index 0000000000..dce83401f9
--- /dev/null
+++ b/.devcontainer/scripts/prepare.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+go mod download
diff --git a/.github/dockerimg/Dockerfile b/.github/dockerimg/Dockerfile
new file mode 100644
index 0000000000..34ccf199e6
--- /dev/null
+++ b/.github/dockerimg/Dockerfile
@@ -0,0 +1,17 @@
+# syntax=docker/dockerfile:1.4
+FROM --platform=$TARGETPLATFORM ubuntu:22.04 AS build
+ARG TARGETPLATFORM
+ARG RELEASE_VERSION
+RUN mkdir /encore
+ADD rename-binary-if-needed.bash rename-binary-if-needed.bash
+ADD artifacts /artifacts
+RUN /bin/bash -c 'SRC=encore-$(echo $TARGETPLATFORM | tr '/' '_'); tar -C /encore -xzf /artifacts/$SRC.tar.gz'
+RUN /bin/bash rename-binary-if-needed.bash
+
+FROM --platform=$TARGETPLATFORM ubuntu:22.04
+RUN apt-get update && apt-get install -y -f ca-certificates
+ENV PATH="/encore/bin:${PATH}"
+WORKDIR /src
+ADD encore-entrypoint.bash /bin/encore-entrypoint.bash
+ENTRYPOINT ["/bin/encore-entrypoint.bash"]
+COPY --from=build /encore /encore
diff --git a/.github/dockerimg/encore-entrypoint.bash b/.github/dockerimg/encore-entrypoint.bash
new file mode 100755
index 0000000000..00ede15a7d
--- /dev/null
+++ b/.github/dockerimg/encore-entrypoint.bash
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+set -eo pipefail
+
+# If the ENCORE_AUTHKEY environment variable is set, log in with it.
+if [ -n "$ENCORE_AUTHKEY" ]; then
+ echo "Logging in to Encore using provided auth key..."
+ encore auth login --auth-key "$ENCORE_AUTHKEY"
+fi
+
+# Run the encore command.
+encore "$@"
diff --git a/.github/dockerimg/rename-binary-if-needed.bash b/.github/dockerimg/rename-binary-if-needed.bash
new file mode 100755
index 0000000000..8dfca402ce
--- /dev/null
+++ b/.github/dockerimg/rename-binary-if-needed.bash
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+set -eo pipefail
+
+# Check if `encore-nightly`, `encore-beta` or `encore-develop` are present, and if one of them are, rename it to `encore`.
+for binary in encore-nightly encore-beta encore-develop; do
+ if [ -f "/encore/bin/$binary" ]; then
+ echo "Renaming $binary to encore..."
+ mv /encore/bin/$binary /encore/bin/encore
+ fi
+done
+
+# Sanity check that /ecore/bin/encore exists.
+if [ ! -f "/encore/bin/encore" ]; then
+ echo "ERROR: /encore/bin/encore does not exist. Did you mount the Encore binary directory to /encore/bin?"
+ exit 1
+fi
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000000..b382785bd0
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,274 @@
+name: CI
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+ schedule:
+ - cron: "30 2 * * *" # Every night at 2:30am UTC (if you change this schedule, also change the if statement in the test steps)
+
+jobs:
+ build:
+ name: "Build"
+ runs-on: ubuntu-24.04
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ path: encr.dev
+
+ - name: Set up Node
+ uses: actions/setup-node@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: "encr.dev/go.mod"
+ check-latest: true
+ cache-dependency-path: "encr.dev/go.sum"
+
+ - name: Build
+ run: cd encr.dev && go build ./...
+
+ - name: Build for Windows
+ run: cd encr.dev && go build ./...
+ env:
+ GOOS: windows
+
+ test:
+ name: "Test"
+ runs-on: ubuntu-24.04
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ path: encr.dev
+
+ - name: Set up Node
+ uses: actions/setup-node@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: "encr.dev/go.mod"
+ check-latest: true
+ cache-dependency-path: "encr.dev/go.sum"
+
+ - name: Set up Rust
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ toolchain: stable
+ - name: Install Protoc
+ uses: arduino/setup-protoc@a8b67ba40b37d35169e222f3bb352603327985b6 # v2
+ - name: Set up cargo cache
+ uses: actions/cache@v3
+ continue-on-error: false
+ with:
+ path: |
+ ~/.cargo/bin/
+ ~/.cargo/registry/index/
+ ~/.cargo/registry/cache/
+ ~/.cargo/git/db/
+ target/
+ key: ${{ runner.os }}-cargo
+
+ - name: Install encore-go
+ run: |
+ URL=$(curl -s https://api.github.com/repos/encoredev/go/releases/latest | grep "browser_download_url.*linux_x86-64.tar.gz" | cut -d : -f 2,3 | tr -d \" | tr -d '[:space:]')
+ curl --fail -L -o encore-go.tar.gz $URL && tar -C . -xzf ./encore-go.tar.gz
+
+ - name: Install tsparser
+ run: cargo install --path encr.dev/tsparser --force --debug
+
+ # If we're not running on a schedule, we only want to run tests on changed code
+ - name: Run tests on changed code on the CLI
+ run: cd encr.dev && go test -short -tags=dev_build 2>&1 ./...
+ if: github.event.schedule != '30 2 * * *'
+ env:
+ ENCORE_GOROOT: ${{ github.workspace }}/encore-go
+ ENCORE_RUNTIMES_PATH: ${{ github.workspace }}/encr.dev/runtimes
+
+ - name: Run tests on changed runtime code
+ run: cd encr.dev/runtimes/go && go test -short -tags=dev_build ./...
+ if: github.event.schedule != '30 2 * * *'
+
+ # Each night we want to run all tests multiple times to catch any flaky tests
+ # We will shuffle the order in which tests are run and run them 25 times looking
+ # for failures. We will also fail fast so that we don't waste time running tests
+ # that are already failing.
+ - name: Run all tests multiple times on the CLI
+ run: cd encr.dev && go test -v --count=5 -failfast -shuffle=on -timeout=30m -tags=dev_build ./...
+ if: github.event.schedule == '30 2 * * *'
+ env:
+ ENCORE_GOROOT: ${{ github.workspace }}/encore-go
+ ENCORE_RUNTIMES_PATH: ${{ github.workspace }}/encr.dev/runtimes
+
+ - name: Run all tests multiple times on the runtime
+ run: cd encr.dev/runtimes/go && go test -v --count=5 -failfast -shuffle=on -timeout=30m -tags=dev_build ./...
+ if: github.event.schedule == '30 2 * * *'
+
+ - name: Report Nightly Failure
+ uses: ravsamhq/notify-slack-action@bca2d7f5660b833a27bda4f6b8bef389ebfefd25
+ if: ${{ failure() && github.event.schedule == '30 2 * * *' }}
+ with:
+ status: ${{ job.status }} # required
+ notification_title: "{workflow} has {status_message}"
+ message_format: "{emoji} *{workflow}* {status_message} in <{repo_url}|{repo}>"
+ footer: "Linked Repo <{repo_url}|{repo}> | <{workflow_url}|View Workflow>"
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK_URL }} # required
+
+ test-e2e:
+ name: "Test e2e"
+ runs-on: ubuntu-24.04
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ path: encr.dev
+
+ - name: Set up Node
+ uses: actions/setup-node@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: "encr.dev/go.mod"
+ check-latest: true
+ cache-dependency-path: "encr.dev/go.sum"
+
+ - name: Set up Rust
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ toolchain: stable
+ - name: Install Protoc
+ uses: arduino/setup-protoc@a8b67ba40b37d35169e222f3bb352603327985b6 # v2
+ - name: Set up cargo cache
+ uses: actions/cache@v3
+ continue-on-error: false
+ with:
+ path: |
+ ~/.cargo/bin/
+ ~/.cargo/registry/index/
+ ~/.cargo/registry/cache/
+ ~/.cargo/git/db/
+ target/
+ key: ${{ runner.os }}-cargo
+
+ - name: Install encore-go
+ run: |
+ URL=$(curl -s https://api.github.com/repos/encoredev/go/releases/latest | grep "browser_download_url.*linux_x86-64.tar.gz" | cut -d : -f 2,3 | tr -d \" | tr -d '[:space:]')
+ curl --fail -L -o encore-go.tar.gz $URL && tar -C . -xzf ./encore-go.tar.gz
+
+ - name: Install tsparser
+ run: cargo install --path encr.dev/tsparser --force --debug
+
+ - name: Install tsbundler
+ run: cd encr.dev && go install ./cli/cmd/tsbundler-encore
+
+ - name: Build jsruntime
+ run: cd encr.dev && go run ./pkg/encorebuild/cmd/build-local-binary encore-runtime.node
+
+ # If we're not running on a schedule, we only want to run tests on changed code
+ - name: Run tests on changed code on the CLI
+ run: cd encr.dev && go test -short -tags=e2e 2>&1 ./e2e-tests
+ if: github.event.schedule != '30 2 * * *'
+ env:
+ ENCORE_GOROOT: ${{ github.workspace }}/encore-go
+ ENCORE_RUNTIMES_PATH: ${{ github.workspace }}/encr.dev/runtimes
+
+ # Each night we want to run all tests multiple times to catch any flaky tests
+ # We will shuffle the order in which tests are run and run them 25 times looking
+ # for failures. We will also fail fast so that we don't waste time running tests
+ # that are already failing.
+ - name: Run all tests multiple times on the CLI
+ run: cd encr.dev && go test -v --count=5 -failfast -shuffle=on -timeout=30m -tags=e2e ./e2e-tests
+ if: github.event.schedule == '30 2 * * *'
+ env:
+ ENCORE_GOROOT: ${{ github.workspace }}/encore-go
+ ENCORE_RUNTIMES_PATH: ${{ github.workspace }}/encr.dev/runtimes
+
+ - name: Report Nightly Failure
+ uses: ravsamhq/notify-slack-action@bca2d7f5660b833a27bda4f6b8bef389ebfefd25
+ if: ${{ failure() && github.event.schedule == '30 2 * * *' }}
+ with:
+ status: ${{ job.status }} # required
+ notification_title: "{workflow} has {status_message}"
+ message_format: "{emoji} *{workflow}* {status_message} in <{repo_url}|{repo}>"
+ footer: "Linked Repo <{repo_url}|{repo}> | <{workflow_url}|View Workflow>"
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK_URL }} # required
+
+ # Run static analysis on the PR
+ static-analysis:
+ name: "Static Analysis"
+ # We're using buildjet for this as it's very slow on Github's own runners
+ runs-on: buildjet-4vcpu-ubuntu-2204
+
+ # Skip any PR created by dependabot to avoid permission issues:
+ if: (github.actor != 'dependabot[bot]')
+
+ permissions:
+ checks: write
+ contents: read
+ pull-requests: write
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install jq
+ uses: dcarbone/install-jq-action@91d8da7268538e8a0ae0c8b72af44f1763228455
+
+ - name: Install semgrep
+ run: |
+ python3 -m pip install semgrep
+ python3 -m pip install --upgrade requests
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: "go.mod"
+ cache: false
+
+ - name: Install ci tools
+ run: |
+ go install honnef.co/go/tools/cmd/staticcheck@master
+ go install github.com/kisielk/errcheck@latest
+ go install github.com/gordonklaus/ineffassign@latest
+
+ rust_core:
+ name: "Test core runtime"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout codebase
+ uses: actions/checkout@v4
+ - name: Set up Rust
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ toolchain: stable
+ components: rustfmt,clippy
+ - name: Install Protoc
+ uses: arduino/setup-protoc@a8b67ba40b37d35169e222f3bb352603327985b6 # v2
+ - name: Set up cargo cache
+ uses: actions/cache@v3
+ continue-on-error: false
+ with:
+ path: |
+ ~/.cargo/bin/
+ ~/.cargo/registry/index/
+ ~/.cargo/registry/cache/
+ ~/.cargo/git/db/
+ target/
+ key: ${{ runner.os }}-cargo
+ - uses: taiki-e/install-action@nextest
+ - name: Run test
+ run: cargo nextest run
+ env:
+ CARGO_TERM_COLOR: always
+ - name: Run rustfmt
+ run: cargo fmt --all --check
+ - name: Run clippy
+ run: cargo clippy --all-targets --all-features -- -D warnings
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
deleted file mode 100644
index c94b3bd520..0000000000
--- a/.github/workflows/go.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: Go
-
-on:
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
-jobs:
-
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- with:
- path: encr.dev
-
- - name: Set up Node
- uses: actions/setup-node@v2.1.5
-
- - name: Set up Go
- uses: actions/setup-go@v2
- with:
- go-version: 1.16
-
- - id: go-cache-paths
- run: |
- echo "::set-output name=go-build::$(go env GOCACHE)"
- echo "::set-output name=go-mod::$(go env GOMODCACHE)"
-
- - name: Install encore-go
- run: curl -o encore-go.tar.gz https://d2f391esomvqpi.cloudfront.net/encore-go-0.9.6-linux_amd64.tar.gz && tar -C . -xzf ./encore-go.tar.gz
-
- - name: Go Build Cache
- uses: actions/cache@v2
- with:
- path: ${{ steps.go-cache-paths.outputs.go-build }}
- key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.sum') }}
-
- - name: Go Mod Cache
- uses: actions/cache@v2
- with:
- path: ${{ steps.go-cache-paths.outputs.go-mod }}
- key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
-
- - name: Build dashapp
- run: cd encr.dev/cli/daemon/dash/dashapp && npm install && npm run build
-
- - name: Build
- run: cd encr.dev && go build ./...
-
- - name: Test
- run: cd encr.dev && go test -short ./...
- env:
- ENCORE_GOROOT: ${{ github.workspace }}/encore-go
diff --git a/.github/workflows/makefile b/.github/workflows/makefile
new file mode 100644
index 0000000000..da16fb58b7
--- /dev/null
+++ b/.github/workflows/makefile
@@ -0,0 +1,59 @@
+# This makefile is used inconjunction with the .reviewdog.yml file in the root of the repo
+.PHONY: list-modules go-vet staticcheck errcheck ineffassign go-fmt
+
+# Automatically gather all information
+ALL_SRC := $(shell find ../../ -name "*.go")
+ALL_MODS = $(shell find ../../ -name go.mod)
+MOD_DIRS = $(sort $(realpath $(dir $(ALL_MODS))))
+REPO_DIR := $(realpath ../../)
+SEMGREP_DIR := "$(REPO_DIR)/tools/semgrep-rules"
+
+# List modules reports all found Go modules within the repository
+list-modules:
+ @echo $(MOD_DIRS)
+
+# Function to run a command in each Go module with appropriate build tags
+#
+# REL_DIR is the relative path to the file from the repository root
+# it is computed by removing the REPO_DIR prefix from the $dir variable,
+# then we remove the prefix "/" to make it relative
+# and finally escaping the slashes so we can use it in sed
+define run_for_each_module
+ @for dir in $(MOD_DIRS); do \
+ TAGS=""; \
+ if [ "$$dir" != "$(REPO_DIR)" ]; then \
+ TAGS="-tags encore,encore_internal,encore_app"; \
+ fi; \
+ REL_DIR=$$(echo "$${dir#$(REPO_DIR)}/" | sed 's/^\///' | sed 's/\//\\\//g'); \
+ (cd "$$dir" && $(1) $$TAGS $(2) | sed "s/^\.\//$$REL_DIR/"); \
+ done;
+endef
+
+# Run Go vet
+go-vet: $(ALL_SRC)
+ # The sed statements are:
+ #
+ # 1. Remove any lines starting with "#" (go vet uses these for each package)
+ # 2. Remove any "vet: " prefix from the output (sometimes we get this sometimes we dont)
+ # 3. Remove any "./" prefix from the output (we'll get this for files which exist directly in the module root folder - this is done so we don't double up next)
+ # 4. Add a "./" prefix to the output (this is so the sed within the run_for_each_module function can add the module path to each line)
+ $(call run_for_each_module,go vet,./... 2>&1 | sed '/^#/d' | sed 's/^vet: //' | sed 's/^\.\///' | sed "s/^/\.\//")
+
+## Run staticcheck
+staticcheck: $(ALL_SRC)
+ $(call run_for_each_module,staticcheck -tests=false -f=json,./... | jq -f "$(REPO_DIR)/.github/workflows/staticcheck-to-rdjsonl.jq" -c)
+
+# Run errcheck
+errcheck: $(ALL_SRC)
+ $(call run_for_each_module,errcheck -abspath,./...)
+
+
+## Run ineffassign
+ineffassign: $(ALL_SRC)
+ $(call run_for_each_module,ineffassign,./... 2>&1)
+
+semgrep: $(ALL_SRC)
+ @cd $(REPO_DIR) && semgrep scan --quiet --config=auto --config=$(SEMGREP_DIR) --json | jq -f "$(REPO_DIR)/.github/workflows/semgrep-to-rdjson.jq" -c
+
+go-fmt: $(ALL_SRC)
+ @cd $(REPO_DIR) && gofmt -s -d . || exit 0
diff --git a/.github/workflows/release-2.yml b/.github/workflows/release-2.yml
new file mode 100644
index 0000000000..a56791f8ba
--- /dev/null
+++ b/.github/workflows/release-2.yml
@@ -0,0 +1,119 @@
+name: Release (2.0)
+
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: 'Version to build ("v1.2.3", "v1.2.3-nightly.20231231", "v1.2.3-beta.1" or "v0.0.0-develop+[commitHash]")'
+ type: string
+ required: true
+
+jobs:
+ release:
+ name: "Run Release Script"
+ runs-on: self-hosted
+ env:
+ GOROOT: /usr/local/go-1.21.4
+ RUSTUP_HOME: /usr/local/rust/rustup
+
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@v4
+ with:
+ path: encr.dev
+
+ - name: Trigger release script
+ env:
+ NPM_PUBLISH_TOKEN: ${{ secrets.NPM_PUBLISH_TOKEN }}
+ run: |
+ cd ${{ github.workspace }}/encr.dev
+ go run ./pkg/encorebuild/cmd/make-release/ -dst "${{ github.workspace }}/build" -v "${{ github.event.inputs.version }}" -publish-npm=true
+
+ - name: Publish artifact (darwin_amd64)
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-darwin_amd64
+ path: ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-darwin_amd64.tar.gz
+
+ - name: Publish artifact (darwin_arm64)
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-darwin_arm64
+ path: ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-darwin_arm64.tar.gz
+
+ - name: Publish artifact (linux_amd64)
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-linux_amd64
+ path: ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-linux_amd64.tar.gz
+
+ - name: Publish artifact (linux_arm64)
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-linux_arm64
+ path: ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-linux_arm64.tar.gz
+
+ - name: Publish artifact (windows_amd64)
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-windows_amd64
+ path: ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-windows_amd64.tar.gz
+
+ - name: Setup Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Registry
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Copy linux artifacts to docker context folder
+ run: |
+ mkdir -p ${{ github.workspace }}/encr.dev/.github/dockerimg/artifacts
+ cp ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-linux_amd64.tar.gz ${{ github.workspace }}/encr.dev/.github/dockerimg/artifacts/encore-linux_amd64.tar.gz
+ cp ${{ github.workspace }}/build/artifacts/encore-${{ github.event.inputs.version }}-linux_arm64.tar.gz ${{ github.workspace }}/encr.dev/.github/dockerimg/artifacts/encore-linux_arm64.tar.gz
+
+ - name: Create metadata (tags, labels) for Docker image
+ id: docker-meta
+ uses: docker/metadata-action@v5
+ with:
+ images: encoredotdev/encore
+ labels: |
+ org.opencontainers.image.title=Encore
+ org.opencontainers.image.vendor=encore.dev
+ org.opencontainers.image.authors=support@encore.dev
+ org.opencontainers.image.description=Encore is the end-to-end Backend Development Platform that lets you escape cloud complexity.
+ tags: |
+ type=raw,value=latest,enable=${{ !contains(github.event.inputs.version, '-') }}
+ type=semver,pattern={{version}},value=${{ github.event.inputs.version }}
+ type=sha
+ type=schedule,pattern=nightly,enable=${{ contains(github.event.inputs.version, '-nightly.') }}
+ type=semver,pattern={{major}}.{{minor}},value=${{ github.event.inputs.version }},enable=${{ !contains(github.event.inputs.version, '-') }}
+ type=semver,pattern={{major}},value=${{ github.event.inputs.version }},enable=${{ !contains(github.event.inputs.version, '-') }}
+
+ - name: Build and push docker images
+ uses: docker/build-push-action@v4
+ with:
+ context: encr.dev/.github/dockerimg
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: ${{ steps.docker-meta.outputs.tags }}
+ labels: ${{ steps.docker-meta.outputs.labels }}
+ cache-from: type=inline
+ cache-to: type=inline
+ build-args: |
+ RELEASE_VERSION=${{ github.event.inputs.version }}
+
+ notify_release_success:
+ name: "Notify release system of successful build"
+ runs-on: self-hosted
+ needs:
+ - release
+ steps:
+ - name: Webhook
+ uses: distributhor/workflow-webhook@f5a294e144d6ef44cfac4d3d5e20b613bcee0d4b # v3.0.7
+ env:
+ webhook_type: "json"
+ webhook_url: ${{ secrets.RELEASE_WEBHOOK }}
+ data: '{ "version": "${{ github.event.inputs.version }}", "run_id": "${{ github.run_id }}" }'
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 08f419b8b2..765ba0ec72 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -7,7 +7,7 @@ on:
description: 'Version to build ("1.2.3")'
required: true
encorego_version:
- description: 'Encore-Go version to use ("1.2.3")'
+ description: 'Encore-Go version to use ("encore-go1.17.7")'
required: true
jobs:
@@ -15,62 +15,142 @@ jobs:
strategy:
matrix:
include:
- - builder: ubuntu-latest
+ - builder: ubuntu-24.04
goos: linux
goarch: amd64
- - builder: macos-latest
+ release_key: linux_x86-64
+ - builder: ubuntu-24.04
+ goos: linux
+ goarch: arm64
+ release_key: linux_arm64
+ - builder: macos-11
goos: darwin
goarch: amd64
- - builder: macos-latest
+ release_key: macos_x86-64
+ - builder: macos-11
goos: darwin
goarch: arm64
+ release_key: macos_arm64
- builder: windows-latest
goos: windows
goarch: amd64
-
+ release_key: windows_x86-64
+
runs-on: ${{ matrix.builder }}
steps:
- - name: Check out repo
- uses: actions/checkout@v2
- with:
- path: encr.dev
-
- - name: Set up Go
- uses: actions/setup-go@v2
- with:
- go-version: 1.16
-
- - id: go-cache-paths
- run: |
- echo "::set-output name=go-mod::$(go env GOMODCACHE)"
-
- - name: Install encore-go
- run: curl -o encore-go.tar.gz https://d2f391esomvqpi.cloudfront.net/encore-go-${{ github.event.inputs.encorego_version }}-${{ matrix.goos }}_${{ matrix.goarch }}.tar.gz && tar -C ${{ github.workspace }} -xzf ./encore-go.tar.gz
-
- - name: Go Mod Cache
- uses: actions/cache@v2
- with:
- path: ${{ steps.go-cache-paths.outputs.go-mod }}
- key: ${{ matrix.goos }}-${{ matrix.goarch }}-go-mod-${{ hashFiles('**/go.sum') }}
-
- - name: Build
- run: cd encr.dev && go run ./pkg/make-release/make-release.go -v="${{ github.event.inputs.version }}" -dst=dist -goos=${{ matrix.goos }} -goarch=${{ matrix.goarch }} -encore-go="../encore-go"
- env:
- GO111MODULE: "on"
- if: runner.os != 'windows'
-
- - name: Build
- run: cd encr.dev && .\pkg\make-release\windows\build.bat
- env:
- GO111MODULE: "on"
- ENCORE_VERSION: "${{ github.event.inputs.version }}"
- ENCORE_GOROOT: "../encore-go"
- if: runner.os == 'windows'
-
- - name: 'Tar artifacts'
- run: tar -czvf encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}.tar.gz -C encr.dev/dist/${{ matrix.goos }}_${{ matrix.goarch }} .
- - name: Publish artifact
- uses: actions/upload-artifact@v2
- with:
- name: encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}
- path: encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}.tar.gz
+ - name: Check out repo
+ uses: actions/checkout@v4
+ with:
+ path: encr.dev
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version-file: "encr.dev/go.mod"
+ check-latest: true
+ cache-dependency-path: "encr.dev/go.sum"
+
+ - name: Set up Zig
+ uses: goto-bus-stop/setup-zig@7ab2955eb728f5440978d5824358023be3a2802d # v2.2.0
+ with:
+ version: 0.10.1
+
+ - name: Install encore-go
+ run: curl --fail -o encore-go.tar.gz -L https://github.com/encoredev/go/releases/download/${{ github.event.inputs.encorego_version }}/${{ matrix.release_key }}.tar.gz && tar -C ${{ github.workspace }} -xzf ./encore-go.tar.gz
+
+ - name: Build
+ run: cd encr.dev && go run ./pkg/make-release/make-release.go -v="${{ github.event.inputs.version }}" -dst=dist -goos=${{ matrix.goos }} -goarch=${{ matrix.goarch }} -encore-go="../encore-go"
+ env:
+ GO111MODULE: "on"
+ if: runner.os != 'windows'
+
+ - name: Build
+ run: cd encr.dev && .\pkg\make-release\windows\build.bat
+ env:
+ GO111MODULE: "on"
+ ENCORE_VERSION: "${{ github.event.inputs.version }}"
+ ENCORE_GOROOT: "../encore-go"
+ if: runner.os == 'windows'
+
+ - name: "Tar artifacts"
+ run: tar -czvf encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}.tar.gz -C encr.dev/dist/${{ matrix.goos }}_${{ matrix.goarch }} .
+ - name: Publish artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}
+ path: encore-${{ github.event.inputs.version }}-${{ matrix.goos }}_${{ matrix.goarch }}.tar.gz
+
+ publish-docker-images:
+ name: "publish docker images"
+ runs-on: ubuntu-24.04
+ needs: build
+ permissions:
+ contents: read
+ packages: write
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ sparse-checkout: .github
+ - name: Download Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: .github/dockerimg/artifacts
+ - name: Setup Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Login to Docker Registry
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Cache Docker layers
+ uses: actions/cache@v2
+ with:
+ path: /tmp/.buildx-cache
+ key: ${{ runner.os }}-buildx-${{ github.sha }}
+ restore-keys: |
+ ${{ runner.os }}-buildx-
+
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@v4
+ with:
+ images: encoredotdev/encore
+ labels: |
+ org.opencontainers.image.title=Encore
+ org.opencontainers.image.vendor=encore.dev
+ org.opencontainers.image.authors=support@encore.dev
+ org.opencontainers.image.description=Encore is the end-to-end Backend Development Platform that lets you escape cloud complexity.
+ tags: |
+ type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }}
+ type=semver,pattern={{version}},value=v${{ github.event.inputs.version }}
+ type=semver,pattern={{major}}.{{minor}},value=v${{ github.event.inputs.version }}
+ type=semver,pattern={{major}},value=v${{ github.event.inputs.version }}
+
+ - name: Build and push
+ uses: docker/build-push-action@v4
+ with:
+ context: .github/dockerimg
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ build-args: |
+ RELEASE_VERSION=${{ github.event.inputs.version }}
+
+ notify_release_success:
+ needs:
+ - build
+ - publish-docker-images
+ runs-on: ubuntu-24.04
+ steps:
+ - name: Webhook
+ uses: distributhor/workflow-webhook@v3.0.7
+ env:
+ webhook_type: "json"
+ webhook_url: ${{ secrets.RELEASE_WEBHOOK }}
+ data: '{ "version": "${{ github.event.inputs.version }}", "run_id": "${{ github.run_id }}" }'
diff --git a/.github/workflows/semgrep-to-rdjson.jq b/.github/workflows/semgrep-to-rdjson.jq
new file mode 100644
index 0000000000..e374fb476e
--- /dev/null
+++ b/.github/workflows/semgrep-to-rdjson.jq
@@ -0,0 +1,43 @@
+# See https://github.com/reviewdog/reviewdog/tree/master/proto/rdf
+{
+ source: {
+ name: "semgrep",
+ url: "https://semgrep.dev/",
+ },
+ diagnostics: [
+ .results[] | {
+ code: {
+ value: .check_id,
+ url: [
+ .extra.metadata.shortlink?,
+ .extra.metadata.source?,
+ .extra."semgrep.dev".rule.url?,
+ "https://github.com/encoredev/encore/blob/main/\(.check_id | gsub("\\."; "/")).yml"
+ ] | map(select(. != null)) | first,
+ },
+ message: .extra.message,
+ location: {
+ path: .path,
+ range: {
+ start: {
+ line: .start.line,
+ column: .start.col
+ },
+ end: {
+ line: .end.line,
+ column: .end.col
+ },
+ },
+ },
+ severity: .extra.severity,
+
+ # Temporary variable we store to track the fix
+ _res: .
+ } |
+ if ._res.extra.fix then .suggestions = [{
+ range: .location.range,
+ text: ._res.extra.fix,
+ }] else . end |
+ del(._res)
+ ]
+}
diff --git a/.github/workflows/staticcheck-to-rdjsonl.jq b/.github/workflows/staticcheck-to-rdjsonl.jq
new file mode 100644
index 0000000000..2ca6946eae
--- /dev/null
+++ b/.github/workflows/staticcheck-to-rdjsonl.jq
@@ -0,0 +1,19 @@
+# See https://github.com/reviewdog/reviewdog/tree/master/proto/rdf
+{
+ source: {
+ name: "staticcheck",
+ url: "https://staticcheck.io"
+ },
+ message: .message,
+ code: {value: .code, url: "https://staticcheck.io/docs/checks#\(.code)"},
+ location: {
+ path: .location.file,
+ range: {
+ start: {
+ line: .location.line,
+ column: .location.column
+ }
+ }
+ },
+ severity: ((.severity|ascii_upcase|select(match("ERROR|WARNING|INFO")))//null)
+}
diff --git a/.gitignore b/.gitignore
index 30a3d5eb4c..8ef7422ae3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,10 +2,25 @@
/dist
/encore
/git-remote-encore
+/target
# Don't commit dotfiles
/.encore
/.vscode
# Build artifact that must be placed alongside go files for Windows
-*.syso
\ No newline at end of file
+*.syso
+
+# JetBrains
+.idea
+.fleet
+.run
+
+# MacOS
+.DS_Store
+
+runtimes/supervisor-encore
+
+runtimes/supervisor-encore-linux-amd64
+
+encore-runtime.node-linux-amd64
diff --git a/.prettierrc.toml b/.prettierrc.toml
new file mode 100644
index 0000000000..66c04fd783
--- /dev/null
+++ b/.prettierrc.toml
@@ -0,0 +1 @@
+trailingComma = "none"
diff --git a/.reviewdog.yml b/.reviewdog.yml
new file mode 100644
index 0000000000..74a5d9e16a
--- /dev/null
+++ b/.reviewdog.yml
@@ -0,0 +1,33 @@
+# Encore's reviewdog configuration file.
+#
+# This runs in our CI pipeline when you open a PR. To run this locally
+# and get the same results as our CI pipeline, run: `./check.bash`
+#
+# We use a makefile rather than the commands directly as this repo
+# has multiple Go modules within it and most tools only look at the
+# module in the current directory. Thus our make file runs the tool
+# for each module, combining the results into a single standardised
+# that review dog can then parse and display as a single "run" for
+# each tool.
+runner:
+ go-vet:
+ cmd: make -s -C .github/workflows go-vet
+ format: govet
+ go-fmt:
+ cmd: make -s -C .github/workflows go-fmt
+ format: diff
+# Disable staticcheck until it supports Go 1.21: https://github.com/dominikh/go-tools/issues/1431
+# staticcheck:
+# cmd: make -s -C .github/workflows staticcheck
+# format: rdjsonl
+ errcheck:
+ cmd: make -s -C .github/workflows errcheck
+ errorformat:
+ - "%f:%l:%c:\t%m"
+ ineffassign:
+ cmd: make -s -C .github/workflows ineffassign
+ errorformat:
+ - "%f:%l:%c: %m"
+ semgrep:
+ cmd: make -s -C .github/workflows semgrep
+ format: rdjson
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..1304d55a1c
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,92 @@
+# Contributing to Encore
+
+We're so excited that you are interested in contributing to Encore!
+All contributions are welcome, and there are several valuable ways to contribute.
+
+Below is a technical walkthrough of developing the `encore` command for contributing code
+to the Encore project. Head over to the community section for [more ways to contribute](https://encore.dev/docs/community/contribute)!
+
+## GitHub Codespaces / VS Code Remote Containers
+The easiest way to get started with developing Encore is using
+GitHub Codespaces. Simply open this repository in a new Codespace
+and your development environment will be set up with everything preconfigured for building the `encore` CLI and running applications with it.
+
+This also works just as well with [Visual Studio Code's Remote Development](https://code.visualstudio.com/docs/remote/remote-overview).
+
+
+## Building the encore command from source
+To build from the source simply run `go build ./cli/cmd/encore` and `go install ./cli/cmd/git-remote-encore`.
+
+Running an Encore application requires both the Encore runtime (the `encore.dev` package) as well as a custom-built
+[Go runtime](https://github.com/encoredev/go) to implement Encore's request semantics and automated instrumentation.
+
+As a result, the Encore Daemon must know where these two things exist on the filesystem to compile the Encore application properly.
+
+This must be done in one of two ways: embedding the installation path at compile time (similar to `GOROOT`)
+or by setting an environment variable at runtime.
+
+The environment variables are:
+- `ENCORE_RUNTIMES_PATH` – the path to the `encore.dev` runtime implementation.
+- `ENCORE_GOROOT` – the path to encore-go on disk
+
+**ENCORE_RUNTIMES_PATH**
+
+This must be set to the location of the `encore.dev` runtime package.
+It's located in this Git repository in the `runtimes` directory:
+
+```bash
+export ENCORE_RUNTIMES_PATH=/path/to/encore/runtimes
+```
+
+**ENCORE_GOROOT**
+
+The `ENCORE_GOROOT` must be set to the path to the [Encore Go runtime](https://github.com/encoredev/go).
+Unless you want to make changes to the Go runtime it's easiest to point this to an existing Encore installation.
+
+To do that, run `encore daemon env` and grab the value of `ENCORE_GOROOT`. For example (yours is probably different):
+
+```bash
+export ENCORE_GOROOT=/opt/homebrew/Cellar/encore/0.16.2/libexec/encore-go
+```
+
+### Running applications when building from source
+Once you've built your own `encore` binary and set the environment variables above, you're ready to go!
+
+Start the daemon with the built binary: `./encore daemon -f`
+
+Note that when you run commands like `encore run` must use the same `encore` binary the daemon is running.
+
+
+### Testing the Daemon run logic
+The codegen tests in the `internal/clientgen/client_test.go` file uses many auto generated files from the
+`e2e-tests/testdata` directory. To generate the client files and other test files, run `go test -golden-update` from
+the `e2e-tests` directory. This will generate client files for all the supported client generation languages.
+
+Running `go test ./internal/clientgen` will now work and use the most recent client generated files. If
+you change the client or content of the `testdata` folder, you may need to regenerate the client files again.
+
+## Architecture
+
+The code base is divided into several parts:
+
+### cli
+The `encore` command line interface. The encore background daemon
+is located at `cli/daemon` and is responsible for managing processes,
+setting up databases and talking with the Encore servers for operations like
+fetching production logs.
+
+### parser
+The Encore Parser statically analyzes Encore apps to build up a model
+of the application dubbed the Encore Syntax Tree (EST) that lives in
+`parser/est`.
+
+For speed the parser does not perform traditional type-checking; it does
+limited type-checking for enforcing Encore-specific rules but otherwise
+relies on the underlying Go compiler to perform type-checking as part of
+building the application.
+
+### compiler
+The Encore Compiler rewrites the source code based on the parsed
+Encore Syntax Tree to create a fully functioning application.
+It rewrites API calls & API handlers, injects instrumentation
+and secret values, and more.
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000000..462758d190
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,7647 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "Inflector"
+version = "0.11.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
+dependencies = [
+ "lazy_static",
+ "regex",
+]
+
+[[package]]
+name = "addr2line"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
+
+[[package]]
+name = "ahash"
+version = "0.8.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
+dependencies = [
+ "cfg-if",
+ "getrandom 0.2.15",
+ "once_cell",
+ "version_check",
+ "zerocopy",
+]
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "alloc-no-stdlib"
+version = "2.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3"
+
+[[package]]
+name = "alloc-stdlib"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece"
+dependencies = [
+ "alloc-no-stdlib",
+]
+
+[[package]]
+name = "allocator-api2"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
+
+[[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "anstream"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
+dependencies = [
+ "anstyle",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
+dependencies = [
+ "backtrace",
+]
+
+[[package]]
+name = "approx"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "arc-swap"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
+
+[[package]]
+name = "array-init"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc"
+
+[[package]]
+name = "arrayvec"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
+
+[[package]]
+name = "assert_fs"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7efdb1fdb47602827a342857666feb372712cbc64b414172bd6b167a02927674"
+dependencies = [
+ "anstyle",
+ "doc-comment",
+ "globwalk",
+ "predicates 3.1.3",
+ "predicates-core",
+ "predicates-tree",
+ "tempfile",
+]
+
+[[package]]
+name = "assert_matches"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
+
+[[package]]
+name = "ast_node"
+version = "0.9.5"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "pmutil",
+ "proc-macro2",
+ "quote",
+ "swc_macros_common",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "async-channel"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
+dependencies = [
+ "concurrent-queue",
+ "event-listener",
+ "futures-core",
+]
+
+[[package]]
+name = "async-compression"
+version = "0.3.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a"
+dependencies = [
+ "flate2",
+ "futures-core",
+ "memchr",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "async-stream"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
+dependencies = [
+ "async-stream-impl",
+ "futures-core",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "async-stream-impl"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "async-trait"
+version = "0.1.85"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
+
+[[package]]
+name = "aws-config"
+version = "1.5.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c03a50b30228d3af8865ce83376b4e99e1ffa34728220fe2860e4df0bb5278d6"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-sdk-sso",
+ "aws-sdk-ssooidc",
+ "aws-sdk-sts",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes",
+ "fastrand",
+ "hex",
+ "http 0.2.12",
+ "ring 0.17.8",
+ "time",
+ "tokio",
+ "tracing",
+ "url",
+ "zeroize",
+]
+
+[[package]]
+name = "aws-credential-types"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da"
+dependencies = [
+ "aws-smithy-async",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "zeroize",
+]
+
+[[package]]
+name = "aws-runtime"
+version = "1.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b16d1aa50accc11a4b4d5c50f7fb81cc0cf60328259c587d0e6b0f11385bde46"
+dependencies = [
+ "aws-credential-types",
+ "aws-sigv4",
+ "aws-smithy-async",
+ "aws-smithy-eventstream",
+ "aws-smithy-http",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes",
+ "fastrand",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "tracing",
+ "uuid",
+]
+
+[[package]]
+name = "aws-sdk-s3"
+version = "1.68.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc5ddf1dc70287dc9a2f953766a1fe15e3e74aef02fd1335f2afa475c9b4f4fc"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-sigv4",
+ "aws-smithy-async",
+ "aws-smithy-checksums",
+ "aws-smithy-eventstream",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-smithy-xml",
+ "aws-types",
+ "bytes",
+ "fastrand",
+ "hex",
+ "hmac",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "lru",
+ "once_cell",
+ "percent-encoding",
+ "regex-lite",
+ "sha2",
+ "tracing",
+ "url",
+]
+
+[[package]]
+name = "aws-sdk-sns"
+version = "1.54.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b71d976a6c87d15fd3ceab6fcaa17e0d02a05a06d30f5e268e4160b25dbde26d"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-query",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-smithy-xml",
+ "aws-types",
+ "http 0.2.12",
+ "once_cell",
+ "regex-lite",
+ "tracing",
+]
+
+[[package]]
+name = "aws-sdk-sqs"
+version = "1.53.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6493ce2b27a2687b0d8a2453bf6ad2499012e9720c3367cb1206496ede475443"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes",
+ "http 0.2.12",
+ "once_cell",
+ "regex-lite",
+ "tracing",
+]
+
+[[package]]
+name = "aws-sdk-sso"
+version = "1.53.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1605dc0bf9f0a4b05b451441a17fcb0bda229db384f23bf5cead3adbab0664ac"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes",
+ "http 0.2.12",
+ "once_cell",
+ "regex-lite",
+ "tracing",
+]
+
+[[package]]
+name = "aws-sdk-ssooidc"
+version = "1.54.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59f3f73466ff24f6ad109095e0f3f2c830bfb4cd6c8b12f744c8e61ebf4d3ba1"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-types",
+ "bytes",
+ "http 0.2.12",
+ "once_cell",
+ "regex-lite",
+ "tracing",
+]
+
+[[package]]
+name = "aws-sdk-sts"
+version = "1.54.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "249b2acaa8e02fd4718705a9494e3eb633637139aa4bb09d70965b0448e865db"
+dependencies = [
+ "aws-credential-types",
+ "aws-runtime",
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-query",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "aws-smithy-xml",
+ "aws-types",
+ "http 0.2.12",
+ "once_cell",
+ "regex-lite",
+ "tracing",
+]
+
+[[package]]
+name = "aws-sigv4"
+version = "1.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d3820e0c08d0737872ff3c7c1f21ebbb6693d832312d6152bf18ef50a5471c2"
+dependencies = [
+ "aws-credential-types",
+ "aws-smithy-eventstream",
+ "aws-smithy-http",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "bytes",
+ "crypto-bigint 0.5.5",
+ "form_urlencoded",
+ "hex",
+ "hmac",
+ "http 0.2.12",
+ "http 1.2.0",
+ "once_cell",
+ "p256",
+ "percent-encoding",
+ "ring 0.17.8",
+ "sha2",
+ "subtle",
+ "time",
+ "tracing",
+ "zeroize",
+]
+
+[[package]]
+name = "aws-smithy-async"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "427cb637d15d63d6f9aae26358e1c9a9c09d5aa490d64b09354c8217cfef0f28"
+dependencies = [
+ "futures-util",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "aws-smithy-checksums"
+version = "0.60.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba1a71073fca26775c8b5189175ea8863afb1c9ea2cceb02a5de5ad9dfbaa795"
+dependencies = [
+ "aws-smithy-http",
+ "aws-smithy-types",
+ "bytes",
+ "crc32c",
+ "crc32fast",
+ "hex",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "md-5",
+ "pin-project-lite",
+ "sha1",
+ "sha2",
+ "tracing",
+]
+
+[[package]]
+name = "aws-smithy-eventstream"
+version = "0.60.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cef7d0a272725f87e51ba2bf89f8c21e4df61b9e49ae1ac367a6d69916ef7c90"
+dependencies = [
+ "aws-smithy-types",
+ "bytes",
+ "crc32fast",
+]
+
+[[package]]
+name = "aws-smithy-http"
+version = "0.60.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6"
+dependencies = [
+ "aws-smithy-eventstream",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "bytes",
+ "bytes-utils",
+ "futures-core",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "pin-utils",
+ "tracing",
+]
+
+[[package]]
+name = "aws-smithy-json"
+version = "0.61.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee4e69cc50921eb913c6b662f8d909131bb3e6ad6cb6090d3a39b66fc5c52095"
+dependencies = [
+ "aws-smithy-types",
+]
+
+[[package]]
+name = "aws-smithy-query"
+version = "0.60.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2fbd61ceb3fe8a1cb7352e42689cec5335833cd9f94103a61e98f9bb61c64bb"
+dependencies = [
+ "aws-smithy-types",
+ "urlencoding",
+]
+
+[[package]]
+name = "aws-smithy-runtime"
+version = "1.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a05dd41a70fc74051758ee75b5c4db2c0ca070ed9229c3df50e9475cda1cb985"
+dependencies = [
+ "aws-smithy-async",
+ "aws-smithy-http",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "bytes",
+ "fastrand",
+ "h2 0.3.26",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "http-body 1.0.1",
+ "httparse",
+ "hyper 0.14.32",
+ "hyper-rustls 0.24.2",
+ "once_cell",
+ "pin-project-lite",
+ "pin-utils",
+ "rustls 0.21.12",
+ "tokio",
+ "tracing",
+]
+
+[[package]]
+name = "aws-smithy-runtime-api"
+version = "1.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd"
+dependencies = [
+ "aws-smithy-async",
+ "aws-smithy-types",
+ "bytes",
+ "http 0.2.12",
+ "http 1.2.0",
+ "pin-project-lite",
+ "tokio",
+ "tracing",
+ "zeroize",
+]
+
+[[package]]
+name = "aws-smithy-types"
+version = "1.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38ddc9bd6c28aeb303477170ddd183760a956a03e083b3902a990238a7e3792d"
+dependencies = [
+ "base64-simd",
+ "bytes",
+ "bytes-utils",
+ "futures-core",
+ "http 0.2.12",
+ "http 1.2.0",
+ "http-body 0.4.6",
+ "http-body 1.0.1",
+ "http-body-util",
+ "itoa",
+ "num-integer",
+ "pin-project-lite",
+ "pin-utils",
+ "ryu",
+ "serde",
+ "time",
+ "tokio",
+ "tokio-util",
+]
+
+[[package]]
+name = "aws-smithy-xml"
+version = "0.60.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc"
+dependencies = [
+ "xmlparser",
+]
+
+[[package]]
+name = "aws-types"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef"
+dependencies = [
+ "aws-credential-types",
+ "aws-smithy-async",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "rustc_version",
+ "tracing",
+]
+
+[[package]]
+name = "axum"
+version = "0.6.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
+dependencies = [
+ "async-trait",
+ "axum-core 0.3.4",
+ "bitflags 1.3.2",
+ "bytes",
+ "futures-util",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "hyper 0.14.32",
+ "itoa",
+ "matchit",
+ "memchr",
+ "mime",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustversion",
+ "serde",
+ "sync_wrapper 0.1.2",
+ "tower 0.4.13",
+ "tower-layer",
+ "tower-service",
+]
+
+[[package]]
+name = "axum"
+version = "0.7.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f"
+dependencies = [
+ "async-trait",
+ "axum-core 0.4.5",
+ "base64 0.22.1",
+ "bytes",
+ "futures-util",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "http-body-util",
+ "hyper 1.5.2",
+ "hyper-util",
+ "itoa",
+ "matchit",
+ "memchr",
+ "mime",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustversion",
+ "serde",
+ "serde_json",
+ "serde_path_to_error",
+ "serde_urlencoded",
+ "sha1",
+ "sync_wrapper 1.0.2",
+ "tokio",
+ "tokio-tungstenite 0.24.0",
+ "tower 0.5.2",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "axum-core"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
+dependencies = [
+ "async-trait",
+ "bytes",
+ "futures-util",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "mime",
+ "rustversion",
+ "tower-layer",
+ "tower-service",
+]
+
+[[package]]
+name = "axum-core"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
+dependencies = [
+ "async-trait",
+ "bytes",
+ "futures-util",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "http-body-util",
+ "mime",
+ "pin-project-lite",
+ "rustversion",
+ "sync_wrapper 1.0.2",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "backoff"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "721c249ab59cbc483ad4294c9ee2671835c1e43e9ffc277e6b4ecfef733cfdc5"
+dependencies = [
+ "instant",
+ "rand 0.7.3",
+]
+
+[[package]]
+name = "backtrace"
+version = "0.3.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
+dependencies = [
+ "addr2line",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "base16ct"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce"
+
+[[package]]
+name = "base32"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23ce669cd6c8588f79e15cf450314f9638f967fc5770ff1c7c1deb0925ea7cfa"
+
+[[package]]
+name = "base64"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
+
+[[package]]
+name = "base64"
+version = "0.21.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
+
+[[package]]
+name = "base64"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
+
+[[package]]
+name = "base64-simd"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195"
+dependencies = [
+ "outref",
+ "vsimd",
+]
+
+[[package]]
+name = "base64ct"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
+
+[[package]]
+name = "bb8"
+version = "0.8.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8"
+dependencies = [
+ "async-trait",
+ "futures-util",
+ "parking_lot",
+ "tokio",
+]
+
+[[package]]
+name = "bb8-postgres"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56ac82c42eb30889b5c4ee4763a24b8c566518171ebea648cd7e3bc532c60680"
+dependencies = [
+ "async-trait",
+ "bb8",
+ "tokio",
+ "tokio-postgres",
+]
+
+[[package]]
+name = "better_scoped_tls"
+version = "0.1.1"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "scoped-tls",
+]
+
+[[package]]
+name = "bindgen"
+version = "0.66.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7"
+dependencies = [
+ "bitflags 2.9.4",
+ "cexpr",
+ "clang-sys",
+ "lazy_static",
+ "lazycell",
+ "log",
+ "peeking_take_while",
+ "prettyplease",
+ "proc-macro2",
+ "quote",
+ "regex",
+ "rustc-hash",
+ "shlex",
+ "syn 2.0.95",
+ "which",
+]
+
+[[package]]
+name = "bit-set"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
+dependencies = [
+ "bit-vec",
+]
+
+[[package]]
+name = "bit-vec"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitflags"
+version = "2.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
+
+[[package]]
+name = "blake2"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
+dependencies = [
+ "digest",
+]
+
+[[package]]
+name = "block-buffer"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "brotli"
+version = "3.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391"
+dependencies = [
+ "alloc-no-stdlib",
+ "alloc-stdlib",
+ "brotli-decompressor",
+]
+
+[[package]]
+name = "brotli-decompressor"
+version = "2.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f"
+dependencies = [
+ "alloc-no-stdlib",
+ "alloc-stdlib",
+]
+
+[[package]]
+name = "bstr"
+version = "1.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0"
+dependencies = [
+ "memchr",
+ "regex-automata 0.4.9",
+ "serde",
+]
+
+[[package]]
+name = "built"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b99c4cdc7b2c2364182331055623bdf45254fcb679fea565c40c3c11c101889a"
+dependencies = [
+ "cargo-lock",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
+
+[[package]]
+name = "bytemuck"
+version = "1.23.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677"
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "bytes"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
+
+[[package]]
+name = "bytes-utils"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35"
+dependencies = [
+ "bytes",
+ "either",
+]
+
+[[package]]
+name = "cargo-lock"
+version = "9.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e11c675378efb449ed3ce8de78d75d0d80542fc98487c26aba28eb3b82feac72"
+dependencies = [
+ "semver",
+ "serde",
+ "toml",
+ "url",
+]
+
+[[package]]
+name = "cc"
+version = "1.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7"
+dependencies = [
+ "jobserver",
+ "libc",
+ "shlex",
+]
+
+[[package]]
+name = "cexpr"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
+dependencies = [
+ "nom",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.39"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
+dependencies = [
+ "android-tzdata",
+ "iana-time-zone",
+ "js-sys",
+ "num-traits",
+ "serde",
+ "wasm-bindgen",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "cidr"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd1b64030216239a2e7c364b13cd96a2097ebf0dfe5025f2dedee14a23f2ab60"
+
+[[package]]
+name = "clang-sys"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
+dependencies = [
+ "glob",
+ "libc",
+ "libloading",
+]
+
+[[package]]
+name = "clap"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
+dependencies = [
+ "atty",
+ "bitflags 1.3.2",
+ "clap_derive",
+ "clap_lex",
+ "indexmap 1.9.3",
+ "once_cell",
+ "strsim 0.10.0",
+ "termcolor",
+ "textwrap",
+]
+
+[[package]]
+name = "clap_derive"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
+dependencies = [
+ "heck 0.4.1",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "clean-path"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaa6b4b263a5d737e9bf6b7c09b72c41a5480aec4d7219af827f6564e950b6a5"
+
+[[package]]
+name = "cmake"
+version = "0.1.52"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "colorchoice"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
+
+[[package]]
+name = "colored"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
+dependencies = [
+ "lazy_static",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "concurrent-queue"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "console"
+version = "0.15.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b"
+dependencies = [
+ "encode_unicode",
+ "libc",
+ "once_cell",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "const-oid"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
+
+[[package]]
+name = "constant_time_eq"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
+
+[[package]]
+name = "convert_case"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
+dependencies = [
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "cookie"
+version = "0.18.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747"
+dependencies = [
+ "time",
+ "version_check",
+]
+
+[[package]]
+name = "core-foundation"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
+
+[[package]]
+name = "cpufeatures"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "crc32c"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47"
+dependencies = [
+ "rustc_version",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "cron-parser"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa8b40ccc59e09cc54bec675bea0e79dae34f5b900c8c5e06d8d6db884107adb"
+dependencies = [
+ "chrono",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.3.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
+
+[[package]]
+name = "crypto-bigint"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef"
+dependencies = [
+ "generic-array",
+ "rand_core 0.6.4",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "crypto-bigint"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76"
+dependencies = [
+ "rand_core 0.6.4",
+ "subtle",
+]
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "ctor"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501"
+dependencies = [
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "daemonize"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "darling"
+version = "0.20.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
+dependencies = [
+ "darling_core",
+ "darling_macro",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.20.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim 0.11.1",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.20.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "dashmap"
+version = "5.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
+dependencies = [
+ "cfg-if",
+ "hashbrown 0.14.5",
+ "lock_api",
+ "once_cell",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "data-encoding"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
+
+[[package]]
+name = "der"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de"
+dependencies = [
+ "const-oid",
+ "zeroize",
+]
+
+[[package]]
+name = "der"
+version = "0.7.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
+dependencies = [
+ "const-oid",
+ "pem-rfc7468",
+ "zeroize",
+]
+
+[[package]]
+name = "deranged"
+version = "0.3.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
+dependencies = [
+ "powerfmt",
+ "serde",
+]
+
+[[package]]
+name = "derivative"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "difflib"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
+
+[[package]]
+name = "digest"
+version = "0.10.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
+dependencies = [
+ "block-buffer",
+ "const-oid",
+ "crypto-common",
+ "subtle",
+]
+
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "doc-comment"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
+
+[[package]]
+name = "duct"
+version = "0.13.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c"
+dependencies = [
+ "libc",
+ "once_cell",
+ "os_pipe",
+ "shared_child",
+]
+
+[[package]]
+name = "duration-string"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fcc1d9ae294a15ed05aeae8e11ee5f2b3fe971c077d45a42fb20825fba6ee13"
+
+[[package]]
+name = "ecdsa"
+version = "0.14.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c"
+dependencies = [
+ "der 0.6.1",
+ "elliptic-curve",
+ "rfc6979",
+ "signature 1.6.4",
+]
+
+[[package]]
+name = "either"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "elliptic-curve"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3"
+dependencies = [
+ "base16ct",
+ "crypto-bigint 0.4.9",
+ "der 0.6.1",
+ "digest",
+ "ff",
+ "generic-array",
+ "group",
+ "pkcs8 0.9.0",
+ "rand_core 0.6.4",
+ "sec1",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "email_address"
+version = "0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "encode_unicode"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
+
+[[package]]
+name = "encoding_rs"
+version = "0.8.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "encore-js-runtime"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "axum 0.7.9",
+ "bytes",
+ "chrono",
+ "encore-runtime-core",
+ "futures",
+ "log",
+ "malachite",
+ "mappable-rc",
+ "napi",
+ "napi-build",
+ "napi-derive",
+ "num_cpus",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
+ "serde_json",
+ "tokio",
+ "tokio-util",
+]
+
+[[package]]
+name = "encore-runtime-core"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "assert_matches",
+ "async-stream",
+ "aws-config",
+ "aws-credential-types",
+ "aws-sdk-s3",
+ "aws-sdk-sns",
+ "aws-sdk-sqs",
+ "aws-smithy-types",
+ "axum 0.7.9",
+ "backtrace",
+ "base32",
+ "base64 0.21.7",
+ "bb8",
+ "bb8-postgres",
+ "byteorder",
+ "bytes",
+ "chrono",
+ "cidr",
+ "colored",
+ "cookie",
+ "duct",
+ "email_address",
+ "env_logger 0.10.2",
+ "flate2",
+ "form_urlencoded",
+ "futures",
+ "futures-core",
+ "futures-util",
+ "gjson",
+ "google-cloud-gax",
+ "google-cloud-googleapis",
+ "google-cloud-pubsub",
+ "google-cloud-storage",
+ "hex",
+ "hmac",
+ "http 1.2.0",
+ "http-body-util",
+ "httpdate",
+ "hyper 1.5.2",
+ "indexmap 2.7.0",
+ "insta",
+ "jsonwebtoken",
+ "log",
+ "malachite",
+ "matchit",
+ "md5",
+ "mime",
+ "native-tls",
+ "once_cell",
+ "openssl",
+ "openssl-probe",
+ "percent-encoding",
+ "pgvector",
+ "pingora",
+ "postgres-native-tls",
+ "postgres-protocol",
+ "proptest",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "prost-types 0.12.6",
+ "quickcheck",
+ "radix_fmt",
+ "rand 0.8.5",
+ "regex",
+ "reqwest 0.12.12",
+ "rsa",
+ "serde",
+ "serde_json",
+ "serde_path_to_error",
+ "serde_urlencoded",
+ "serde_with",
+ "sha2",
+ "sha3",
+ "subtle",
+ "thiserror 1.0.69",
+ "tokio",
+ "tokio-nsq",
+ "tokio-postgres",
+ "tokio-retry",
+ "tokio-stream",
+ "tokio-tungstenite 0.21.0",
+ "tokio-util",
+ "tower-http",
+ "tower-service",
+ "tracing",
+ "tracing-subscriber",
+ "url",
+ "urlencoding",
+ "uuid",
+ "xid",
+]
+
+[[package]]
+name = "encore-supervisor"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "axum 0.7.9",
+ "base64 0.21.7",
+ "bytes",
+ "env_logger 0.11.6",
+ "flate2",
+ "futures",
+ "http 1.2.0",
+ "hyper 1.5.2",
+ "libc",
+ "log",
+ "openssl",
+ "pingora",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "prost-types 0.12.6",
+ "reqwest 0.12.12",
+ "serde",
+ "serde_json",
+ "tokio",
+ "tokio-retry",
+ "tokio-util",
+ "url",
+]
+
+[[package]]
+name = "encore-tsparser"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "assert_fs",
+ "assert_matches",
+ "chrono",
+ "clean-path",
+ "convert_case",
+ "cron-parser",
+ "duct",
+ "env_logger 0.10.2",
+ "handlebars",
+ "indexmap 2.7.0",
+ "insta",
+ "itertools 0.13.0",
+ "junction",
+ "litparser",
+ "litparser-derive",
+ "log",
+ "matchit",
+ "once_cell",
+ "pg_query",
+ "prost 0.12.6",
+ "prost-build 0.12.6",
+ "regex",
+ "semver",
+ "serde",
+ "serde_json",
+ "serde_yaml 0.9.34+deprecated",
+ "swc_common",
+ "swc_ecma_ast",
+ "swc_ecma_loader",
+ "swc_ecma_parser",
+ "swc_ecma_transforms_base",
+ "swc_ecma_visit",
+ "symlink",
+ "tempdir",
+ "thiserror 1.0.69",
+ "tracing",
+ "tracing-subscriber",
+ "txtar",
+ "url",
+ "walkdir",
+]
+
+[[package]]
+name = "env_filter"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
+dependencies = [
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
+dependencies = [
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
+dependencies = [
+ "humantime",
+ "is-terminal",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.11.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "env_filter",
+ "humantime",
+ "log",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
+
+[[package]]
+name = "erased-serde"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d"
+dependencies = [
+ "serde",
+ "typeid",
+]
+
+[[package]]
+name = "errno"
+version = "0.3.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "event-listener"
+version = "2.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
+
+[[package]]
+name = "fallible-iterator"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
+
+[[package]]
+name = "fastrand"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
+
+[[package]]
+name = "ff"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160"
+dependencies = [
+ "rand_core 0.6.4",
+ "subtle",
+]
+
+[[package]]
+name = "fixedbitset"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
+
+[[package]]
+name = "flate2"
+version = "1.0.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c"
+dependencies = [
+ "crc32fast",
+ "libz-ng-sys",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "float-cmp"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "foldhash"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f"
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "form_urlencoded"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "from_variant"
+version = "0.1.6"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "pmutil",
+ "proc-macro2",
+ "swc_macros_common",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "fs_extra"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+
+[[package]]
+name = "futures"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
+
+[[package]]
+name = "futures-task"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
+
+[[package]]
+name = "futures-util"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+]
+
+[[package]]
+name = "generic-array"
+version = "0.14.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "geo-types"
+version = "0.7.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6f47c611187777bbca61ea7aba780213f5f3441fd36294ab333e96cfa791b65"
+dependencies = [
+ "approx",
+ "num-traits",
+ "serde",
+]
+
+[[package]]
+name = "gethostname"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1ebd34e35c46e00bb73e81363248d627782724609fe1b6396f553f68fe3862e"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.1.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi 0.9.0+wasi-snapshot-preview1",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "r-efi",
+ "wasi 0.14.2+wasi-0.2.4",
+]
+
+[[package]]
+name = "gimli"
+version = "0.31.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
+
+[[package]]
+name = "gjson"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43503cc176394dd30a6525f5f36e838339b8b5619be33ed9a7783841580a97b6"
+
+[[package]]
+name = "glob"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
+
+[[package]]
+name = "globset"
+version = "0.4.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19"
+dependencies = [
+ "aho-corasick",
+ "bstr",
+ "log",
+ "regex-automata 0.4.9",
+ "regex-syntax 0.8.5",
+]
+
+[[package]]
+name = "globwalk"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757"
+dependencies = [
+ "bitflags 2.9.4",
+ "ignore",
+ "walkdir",
+]
+
+[[package]]
+name = "google-cloud-auth"
+version = "0.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3bf7cb7864f08a92e77c26bb230d021ea57691788fb5dd51793f96965d19e7f9"
+dependencies = [
+ "async-trait",
+ "base64 0.21.7",
+ "google-cloud-metadata 0.4.0",
+ "google-cloud-token",
+ "home",
+ "jsonwebtoken",
+ "reqwest 0.11.27",
+ "serde",
+ "serde_json",
+ "thiserror 1.0.69",
+ "time",
+ "tokio",
+ "tracing",
+ "urlencoding",
+]
+
+[[package]]
+name = "google-cloud-auth"
+version = "0.17.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e57a13fbacc5e9c41ded3ad8d0373175a6b7a6ad430d99e89d314ac121b7ab06"
+dependencies = [
+ "async-trait",
+ "base64 0.21.7",
+ "google-cloud-metadata 0.5.0",
+ "google-cloud-token",
+ "home",
+ "jsonwebtoken",
+ "reqwest 0.12.12",
+ "serde",
+ "serde_json",
+ "thiserror 1.0.69",
+ "time",
+ "tokio",
+ "tracing",
+ "urlencoding",
+]
+
+[[package]]
+name = "google-cloud-gax"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8cb60314136e37de9e2a05ddb427b9c5a39c3d188de2e2f026c6af74425eef44"
+dependencies = [
+ "google-cloud-token",
+ "http 0.2.12",
+ "thiserror 1.0.69",
+ "tokio",
+ "tokio-retry",
+ "tonic",
+ "tower 0.4.13",
+ "tracing",
+]
+
+[[package]]
+name = "google-cloud-googleapis"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db8a478015d079296167e3f08e096dc99cffc2cb50fa203dd38aaa9dd37f8354"
+dependencies = [
+ "prost 0.12.6",
+ "prost-types 0.12.6",
+ "tonic",
+]
+
+[[package]]
+name = "google-cloud-metadata"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc279bfb50487d7bcd900e8688406475fc750fe474a835b2ab9ade9eb1fc90e2"
+dependencies = [
+ "reqwest 0.11.27",
+ "thiserror 1.0.69",
+ "tokio",
+]
+
+[[package]]
+name = "google-cloud-metadata"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f"
+dependencies = [
+ "reqwest 0.12.12",
+ "thiserror 1.0.69",
+ "tokio",
+]
+
+[[package]]
+name = "google-cloud-pubsub"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f6e4fdcd8303ad0d0cdb8b5722aa3a57de9534af27d4da71fc4d3179174a896"
+dependencies = [
+ "async-channel",
+ "async-stream",
+ "google-cloud-auth 0.13.2",
+ "google-cloud-gax",
+ "google-cloud-googleapis",
+ "google-cloud-token",
+ "prost-types 0.12.6",
+ "thiserror 1.0.69",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "google-cloud-storage"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7347a3d65cd64db51e5b4aebf0c68c484042948c6d53f856f58269bc9816360"
+dependencies = [
+ "anyhow",
+ "async-stream",
+ "async-trait",
+ "base64 0.21.7",
+ "bytes",
+ "futures-util",
+ "google-cloud-auth 0.17.2",
+ "google-cloud-metadata 0.5.0",
+ "google-cloud-token",
+ "hex",
+ "once_cell",
+ "percent-encoding",
+ "pkcs8 0.10.2",
+ "regex",
+ "reqwest 0.12.12",
+ "reqwest-middleware",
+ "ring 0.17.8",
+ "serde",
+ "serde_json",
+ "sha2",
+ "thiserror 1.0.69",
+ "time",
+ "tokio",
+ "tracing",
+ "url",
+]
+
+[[package]]
+name = "google-cloud-token"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f"
+dependencies = [
+ "async-trait",
+]
+
+[[package]]
+name = "group"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
+dependencies = [
+ "ff",
+ "rand_core 0.6.4",
+ "subtle",
+]
+
+[[package]]
+name = "h2"
+version = "0.3.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
+dependencies = [
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "futures-util",
+ "http 0.2.12",
+ "indexmap 2.7.0",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "h2"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "http 1.2.0",
+ "indexmap 2.7.0",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "handlebars"
+version = "4.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225"
+dependencies = [
+ "log",
+ "pest",
+ "pest_derive",
+ "serde",
+ "serde_json",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hashbrown"
+version = "0.14.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
+
+[[package]]
+name = "hashbrown"
+version = "0.15.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
+dependencies = [
+ "allocator-api2",
+ "equivalent",
+ "foldhash",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "heck"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
+
+[[package]]
+name = "hermit-abi"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "hmac"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
+dependencies = [
+ "digest",
+]
+
+[[package]]
+name = "home"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "hostname"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
+dependencies = [
+ "libc",
+ "match_cfg",
+ "winapi",
+]
+
+[[package]]
+name = "hstr"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "63d6824358c0fd9a68bb23999ed2ef76c84f79408a26ef7ae53d5f370c94ad36"
+dependencies = [
+ "hashbrown 0.14.5",
+ "new_debug_unreachable",
+ "once_cell",
+ "phf 0.11.3",
+ "rustc-hash",
+ "triomphe",
+]
+
+[[package]]
+name = "http"
+version = "0.2.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
+[[package]]
+name = "http"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
+[[package]]
+name = "http-body"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
+dependencies = [
+ "bytes",
+ "http 0.2.12",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "http-body"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
+dependencies = [
+ "bytes",
+ "http 1.2.0",
+]
+
+[[package]]
+name = "http-body-util"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
+dependencies = [
+ "bytes",
+ "futures-util",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "http-range-header"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
+
+[[package]]
+name = "httparse"
+version = "1.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
+
+[[package]]
+name = "httpdate"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "hyper"
+version = "0.14.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7"
+dependencies = [
+ "bytes",
+ "futures-channel",
+ "futures-core",
+ "futures-util",
+ "h2 0.3.26",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "httparse",
+ "httpdate",
+ "itoa",
+ "pin-project-lite",
+ "socket2",
+ "tokio",
+ "tower-service",
+ "tracing",
+ "want",
+]
+
+[[package]]
+name = "hyper"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0"
+dependencies = [
+ "bytes",
+ "futures-channel",
+ "futures-util",
+ "h2 0.4.7",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "httparse",
+ "httpdate",
+ "itoa",
+ "pin-project-lite",
+ "smallvec",
+ "tokio",
+ "want",
+]
+
+[[package]]
+name = "hyper-rustls"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
+dependencies = [
+ "futures-util",
+ "http 0.2.12",
+ "hyper 0.14.32",
+ "log",
+ "rustls 0.21.12",
+ "rustls-native-certs 0.6.3",
+ "tokio",
+ "tokio-rustls 0.24.1",
+]
+
+[[package]]
+name = "hyper-rustls"
+version = "0.27.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2"
+dependencies = [
+ "futures-util",
+ "http 1.2.0",
+ "hyper 1.5.2",
+ "hyper-util",
+ "rustls 0.23.20",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls 0.26.1",
+ "tower-service",
+]
+
+[[package]]
+name = "hyper-timeout"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
+dependencies = [
+ "hyper 0.14.32",
+ "pin-project-lite",
+ "tokio",
+ "tokio-io-timeout",
+]
+
+[[package]]
+name = "hyper-tls"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
+dependencies = [
+ "bytes",
+ "hyper 0.14.32",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+]
+
+[[package]]
+name = "hyper-tls"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
+dependencies = [
+ "bytes",
+ "http-body-util",
+ "hyper 1.5.2",
+ "hyper-util",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+]
+
+[[package]]
+name = "hyper-util"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
+dependencies = [
+ "bytes",
+ "futures-channel",
+ "futures-util",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "hyper 1.5.2",
+ "pin-project-lite",
+ "socket2",
+ "tokio",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "iana-time-zone"
+version = "0.1.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "windows-core",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "icu_collections"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_locid_transform_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"
+
+[[package]]
+name = "icu_normalizer"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "utf16_iter",
+ "utf8_iter",
+ "write16",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"
+
+[[package]]
+name = "icu_properties"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_locid_transform",
+ "icu_properties_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"
+
+[[package]]
+name = "icu_provider"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_provider_macros",
+ "stable_deref_trait",
+ "tinystr",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_provider_macros"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "ident_case"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
+
+[[package]]
+name = "idna"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
+dependencies = [
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
+]
+
+[[package]]
+name = "ignore"
+version = "0.4.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b"
+dependencies = [
+ "crossbeam-deque",
+ "globset",
+ "log",
+ "memchr",
+ "regex-automata 0.4.9",
+ "same-file",
+ "walkdir",
+ "winapi-util",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+ "serde",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.15.2",
+ "serde",
+]
+
+[[package]]
+name = "insta"
+version = "1.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6513e4067e16e69ed1db5ab56048ed65db32d10ba5fc1217f5393f8f17d8b5a5"
+dependencies = [
+ "console",
+ "globset",
+ "linked-hash-map",
+ "once_cell",
+ "serde",
+ "similar",
+ "walkdir",
+]
+
+[[package]]
+name = "instant"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "ipnet"
+version = "2.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708"
+
+[[package]]
+name = "is-macro"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d57a3e447e24c22647738e4607f1df1e0ec6f72e16182c4cd199f647cdfb0e4"
+dependencies = [
+ "heck 0.5.0",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "is-terminal"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
+dependencies = [
+ "hermit-abi 0.4.0",
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itertools"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itertools"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itertools"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
+
+[[package]]
+name = "jobserver"
+version = "0.1.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
+dependencies = [
+ "once_cell",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "jsonwebtoken"
+version = "9.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f"
+dependencies = [
+ "base64 0.21.7",
+ "js-sys",
+ "pem",
+ "ring 0.17.8",
+ "serde",
+ "serde_json",
+ "simple_asn1",
+]
+
+[[package]]
+name = "junction"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72bbdfd737a243da3dfc1f99ee8d6e166480f17ab4ac84d7c34aacd73fc7bd16"
+dependencies = [
+ "scopeguard",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "keccak"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654"
+dependencies = [
+ "cpufeatures",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+dependencies = [
+ "spin 0.9.8",
+]
+
+[[package]]
+name = "lazycell"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
+
+[[package]]
+name = "libc"
+version = "0.2.169"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
+
+[[package]]
+name = "libloading"
+version = "0.8.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
+dependencies = [
+ "cfg-if",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "libm"
+version = "0.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa"
+
+[[package]]
+name = "libz-ng-sys"
+version = "1.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cee1488e961a80d172564fd6fcda11d8a4ac6672c06fe008e9213fa60520c2b"
+dependencies = [
+ "cmake",
+ "libc",
+]
+
+[[package]]
+name = "linked-hash-map"
+version = "0.5.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
+
+[[package]]
+name = "litemap"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
+
+[[package]]
+name = "litparser"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "clean-path",
+ "duration-string",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
+ "swc_common",
+ "swc_ecma_ast",
+]
+
+[[package]]
+name = "litparser-derive"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "litparser",
+ "prettyplease",
+ "proc-macro2",
+ "quote",
+ "swc_common",
+ "swc_ecma_ast",
+ "swc_ecma_parser",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+dependencies = [
+ "serde",
+ "value-bag",
+]
+
+[[package]]
+name = "lru"
+version = "0.12.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
+dependencies = [
+ "hashbrown 0.15.2",
+]
+
+[[package]]
+name = "malachite"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec410515e231332b14cd986a475d1c3323bcfa4c7efc038bfa1d5b410b1c57e4"
+dependencies = [
+ "malachite-base",
+ "malachite-nz",
+ "malachite-q",
+]
+
+[[package]]
+name = "malachite-base"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c738d3789301e957a8f7519318fcbb1b92bb95863b28f6938ae5a05be6259f34"
+dependencies = [
+ "hashbrown 0.15.2",
+ "itertools 0.14.0",
+ "libm",
+ "ryu",
+]
+
+[[package]]
+name = "malachite-nz"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1707c9a1fa36ce21749b35972bfad17bbf34cf5a7c96897c0491da321e387d3b"
+dependencies = [
+ "itertools 0.14.0",
+ "libm",
+ "malachite-base",
+ "wide",
+]
+
+[[package]]
+name = "malachite-q"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d764801aa4e96bbb69b389dcd03b50075345131cd63ca2e380bca71cc37a3675"
+dependencies = [
+ "itertools 0.14.0",
+ "malachite-base",
+ "malachite-nz",
+]
+
+[[package]]
+name = "mappable-rc"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "204651f31b0a6a7b2128d2b92c372cd94607b210c3a6b6e542c57a8cfd4db996"
+
+[[package]]
+name = "match_cfg"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
+
+[[package]]
+name = "matchers"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
+dependencies = [
+ "regex-automata 0.1.10",
+]
+
+[[package]]
+name = "matches"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
+
+[[package]]
+name = "matchit"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
+
+[[package]]
+name = "md-5"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
+dependencies = [
+ "cfg-if",
+ "digest",
+]
+
+[[package]]
+name = "md5"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "mime"
+version = "0.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
+
+[[package]]
+name = "mime_guess"
+version = "2.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
+dependencies = [
+ "mime",
+ "unicase",
+]
+
+[[package]]
+name = "minimal-lexical"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394"
+dependencies = [
+ "adler2",
+]
+
+[[package]]
+name = "mio"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd"
+dependencies = [
+ "libc",
+ "wasi 0.11.0+wasi-snapshot-preview1",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "multimap"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
+
+[[package]]
+name = "napi"
+version = "2.16.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "214f07a80874bb96a8433b3cdfc84980d56c7b02e1a0d7ba4ba0db5cef785e2b"
+dependencies = [
+ "anyhow",
+ "bitflags 2.9.4",
+ "ctor",
+ "napi-derive",
+ "napi-sys",
+ "once_cell",
+ "serde",
+ "serde_json",
+ "tokio",
+]
+
+[[package]]
+name = "napi-build"
+version = "2.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db836caddef23662b94e16bf1f26c40eceb09d6aee5d5b06a7ac199320b69b19"
+
+[[package]]
+name = "napi-derive"
+version = "2.16.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cbe2585d8ac223f7d34f13701434b9d5f4eb9c332cccce8dee57ea18ab8ab0c"
+dependencies = [
+ "cfg-if",
+ "convert_case",
+ "napi-derive-backend",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "napi-derive-backend"
+version = "1.0.75"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1639aaa9eeb76e91c6ae66da8ce3e89e921cd3885e99ec85f4abacae72fc91bf"
+dependencies = [
+ "convert_case",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "regex",
+ "semver",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "napi-sys"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "427802e8ec3a734331fec1035594a210ce1ff4dc5bc1950530920ab717964ea3"
+dependencies = [
+ "libloading",
+]
+
+[[package]]
+name = "native-tls"
+version = "0.2.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466"
+dependencies = [
+ "libc",
+ "log",
+ "openssl",
+ "openssl-probe",
+ "openssl-sys",
+ "schannel",
+ "security-framework",
+ "security-framework-sys",
+ "tempfile",
+]
+
+[[package]]
+name = "new_debug_unreachable"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086"
+
+[[package]]
+name = "nix"
+version = "0.24.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069"
+dependencies = [
+ "bitflags 1.3.2",
+ "cfg-if",
+ "libc",
+ "memoffset",
+]
+
+[[package]]
+name = "nom"
+version = "7.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
+dependencies = [
+ "memchr",
+ "minimal-lexical",
+]
+
+[[package]]
+name = "normalize-line-endings"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
+
+[[package]]
+name = "normpath"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a9da8c9922c35a1033d76f7272dfc2e7ee20392083d75aeea6ced23c6266578"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "nu-ansi-term"
+version = "0.46.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
+dependencies = [
+ "overload",
+ "winapi",
+]
+
+[[package]]
+name = "num-bigint"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
+dependencies = [
+ "num-integer",
+ "num-traits",
+ "serde",
+]
+
+[[package]]
+name = "num-bigint-dig"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
+dependencies = [
+ "byteorder",
+ "lazy_static",
+ "libm",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "rand 0.8.5",
+ "smallvec",
+ "zeroize",
+]
+
+[[package]]
+name = "num-conv"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
+[[package]]
+name = "num-integer"
+version = "0.1.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-iter"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+ "libm",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
+dependencies = [
+ "hermit-abi 0.3.9",
+ "libc",
+]
+
+[[package]]
+name = "object"
+version = "0.36.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
+
+[[package]]
+name = "openssl"
+version = "0.10.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5"
+dependencies = [
+ "bitflags 2.9.4",
+ "cfg-if",
+ "foreign-types",
+ "libc",
+ "once_cell",
+ "openssl-macros",
+ "openssl-sys",
+]
+
+[[package]]
+name = "openssl-macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "openssl-probe"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
+
+[[package]]
+name = "openssl-src"
+version = "300.4.1+3.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "openssl-sys"
+version = "0.9.104"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741"
+dependencies = [
+ "cc",
+ "libc",
+ "openssl-src",
+ "pkg-config",
+ "vcpkg",
+]
+
+[[package]]
+name = "os_pipe"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ffd2b0a5634335b135d5728d84c5e0fd726954b87111f7506a61c502280d982"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "os_str_bytes"
+version = "6.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
+
+[[package]]
+name = "outref"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a"
+
+[[package]]
+name = "overload"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
+
+[[package]]
+name = "p256"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594"
+dependencies = [
+ "ecdsa",
+ "elliptic-curve",
+ "sha2",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+
+[[package]]
+name = "path-clean"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecba01bf2678719532c5e3059e0b5f0811273d94b397088b82e3bd0a78c78fdd"
+
+[[package]]
+name = "pathdiff"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3"
+
+[[package]]
+name = "peeking_take_while"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
+
+[[package]]
+name = "pem"
+version = "3.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae"
+dependencies = [
+ "base64 0.22.1",
+ "serde",
+]
+
+[[package]]
+name = "pem-rfc7468"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
+dependencies = [
+ "base64ct",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+
+[[package]]
+name = "pest"
+version = "2.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc"
+dependencies = [
+ "memchr",
+ "thiserror 2.0.10",
+ "ucd-trie",
+]
+
+[[package]]
+name = "pest_derive"
+version = "2.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e"
+dependencies = [
+ "pest",
+ "pest_generator",
+]
+
+[[package]]
+name = "pest_generator"
+version = "2.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b"
+dependencies = [
+ "pest",
+ "pest_meta",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "pest_meta"
+version = "2.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea"
+dependencies = [
+ "once_cell",
+ "pest",
+ "sha2",
+]
+
+[[package]]
+name = "petgraph"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
+dependencies = [
+ "fixedbitset",
+ "indexmap 2.7.0",
+]
+
+[[package]]
+name = "pg_query"
+version = "6.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ca6fdb8f9d32182abf17328789f87f305dd8c8ce5bf48c5aa2b5cffc94e1c04"
+dependencies = [
+ "bindgen",
+ "cc",
+ "fs_extra",
+ "glob",
+ "itertools 0.10.5",
+ "prost 0.13.5",
+ "prost-build 0.13.5",
+ "serde",
+ "serde_json",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "pgvector"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc58e2d255979a31caa7cabfa7aac654af0354220719ab7a68520ae7a91e8c0b"
+dependencies = [
+ "bytes",
+ "postgres-types",
+ "serde",
+]
+
+[[package]]
+name = "phf"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
+dependencies = [
+ "phf_macros 0.10.0",
+ "phf_shared 0.10.0",
+ "proc-macro-hack",
+]
+
+[[package]]
+name = "phf"
+version = "0.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
+dependencies = [
+ "phf_macros 0.11.3",
+ "phf_shared 0.11.3",
+]
+
+[[package]]
+name = "phf_generator"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
+dependencies = [
+ "phf_shared 0.10.0",
+ "rand 0.8.5",
+]
+
+[[package]]
+name = "phf_generator"
+version = "0.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
+dependencies = [
+ "phf_shared 0.11.3",
+ "rand 0.8.5",
+]
+
+[[package]]
+name = "phf_macros"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0"
+dependencies = [
+ "phf_generator 0.10.0",
+ "phf_shared 0.10.0",
+ "proc-macro-hack",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "phf_macros"
+version = "0.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216"
+dependencies = [
+ "phf_generator 0.11.3",
+ "phf_shared 0.11.3",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "phf_shared"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
+dependencies = [
+ "siphasher 0.3.11",
+]
+
+[[package]]
+name = "phf_shared"
+version = "0.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
+dependencies = [
+ "siphasher 1.0.1",
+]
+
+[[package]]
+name = "pin-project"
+version = "1.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916"
+dependencies = [
+ "pin-project-internal",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "pingora"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79c9fc7098dc3e7d09d2d1647921005be9301cf68536826195dc5369e05124bd"
+dependencies = [
+ "pingora-cache",
+ "pingora-core",
+ "pingora-http",
+ "pingora-load-balancing",
+ "pingora-proxy",
+ "pingora-timeout",
+]
+
+[[package]]
+name = "pingora-cache"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35ee62f28526d8d484621e77f8d6a1807f1bd07558a06ab5a204b4834d6be056"
+dependencies = [
+ "ahash",
+ "async-trait",
+ "blake2",
+ "bytes",
+ "hex",
+ "http 1.2.0",
+ "httparse",
+ "httpdate",
+ "indexmap 1.9.3",
+ "log",
+ "lru",
+ "once_cell",
+ "parking_lot",
+ "pingora-core",
+ "pingora-error",
+ "pingora-header-serde",
+ "pingora-http",
+ "pingora-lru",
+ "pingora-timeout",
+ "regex",
+ "rmp",
+ "rmp-serde",
+ "rustracing",
+ "rustracing_jaeger",
+ "serde",
+ "strum",
+ "tokio",
+]
+
+[[package]]
+name = "pingora-core"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d123320b69bd06e897fc16bd1dde962a7b488c4d2ae825683fbca0198fad8669"
+dependencies = [
+ "ahash",
+ "async-trait",
+ "brotli",
+ "bytes",
+ "chrono",
+ "clap",
+ "daemonize",
+ "flate2",
+ "futures",
+ "h2 0.4.7",
+ "http 1.2.0",
+ "httparse",
+ "httpdate",
+ "libc",
+ "log",
+ "lru",
+ "nix",
+ "once_cell",
+ "openssl-probe",
+ "parking_lot",
+ "percent-encoding",
+ "pingora-error",
+ "pingora-http",
+ "pingora-openssl",
+ "pingora-pool",
+ "pingora-runtime",
+ "pingora-timeout",
+ "prometheus",
+ "rand 0.8.5",
+ "regex",
+ "serde",
+ "serde_yaml 0.8.26",
+ "sfv",
+ "socket2",
+ "strum",
+ "strum_macros",
+ "thread_local",
+ "tokio",
+ "tokio-test",
+ "unicase",
+ "windows-sys 0.59.0",
+ "zstd",
+]
+
+[[package]]
+name = "pingora-error"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6389511530152c535a554f592ae4a9691b1246cff20eb4564f2a34fc921195c0"
+
+[[package]]
+name = "pingora-header-serde"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bcb3f62d852da015e76ced56e93e6d52941679a9825281c90f2897841129e59d"
+dependencies = [
+ "bytes",
+ "http 1.2.0",
+ "httparse",
+ "pingora-error",
+ "pingora-http",
+ "thread_local",
+ "zstd",
+ "zstd-safe",
+]
+
+[[package]]
+name = "pingora-http"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70202f126056f366549afc804741e12dd9f419cfc79a0063ab15653007a0f4c6"
+dependencies = [
+ "bytes",
+ "http 1.2.0",
+ "pingora-error",
+]
+
+[[package]]
+name = "pingora-ketama"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c1bb6c2e11823a05ec9140fc8827f112b8380d78b837535f284e0a98f24cc0a"
+dependencies = [
+ "crc32fast",
+]
+
+[[package]]
+name = "pingora-load-balancing"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84d558167ecb05cea487a6479700390a67fe414724f203e10c3912584a0f2cb1"
+dependencies = [
+ "arc-swap",
+ "async-trait",
+ "derivative",
+ "fnv",
+ "futures",
+ "http 1.2.0",
+ "log",
+ "pingora-core",
+ "pingora-error",
+ "pingora-http",
+ "pingora-ketama",
+ "pingora-runtime",
+ "rand 0.9.1",
+ "tokio",
+]
+
+[[package]]
+name = "pingora-lru"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb50f65f06c4b81ccb3edcceaa54bb9439608506b0b3b8c048798169a64aad8e"
+dependencies = [
+ "arrayvec",
+ "hashbrown 0.15.2",
+ "parking_lot",
+ "rand 0.9.1",
+]
+
+[[package]]
+name = "pingora-openssl"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f18158b901a02289f2a2a954a531c96e4d0703c94f7c9291981c9e53fddc6c1"
+dependencies = [
+ "foreign-types",
+ "libc",
+ "openssl",
+ "openssl-src",
+ "openssl-sys",
+ "tokio-openssl",
+]
+
+[[package]]
+name = "pingora-pool"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bacdd5dbdec690d468856d988b170c8bb4ab62e0edefc0f432ba5e326489f421"
+dependencies = [
+ "crossbeam-queue",
+ "log",
+ "lru",
+ "parking_lot",
+ "pingora-timeout",
+ "thread_local",
+ "tokio",
+]
+
+[[package]]
+name = "pingora-proxy"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5031783d6743bd31e4de7d7c7a19e9eecf369174c3cbd8a57eb52bc6bf882d92"
+dependencies = [
+ "async-trait",
+ "bytes",
+ "clap",
+ "futures",
+ "h2 0.4.7",
+ "http 1.2.0",
+ "log",
+ "once_cell",
+ "pingora-cache",
+ "pingora-core",
+ "pingora-error",
+ "pingora-http",
+ "pingora-timeout",
+ "regex",
+ "tokio",
+]
+
+[[package]]
+name = "pingora-runtime"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31a7c445ca224630961045684201e3cf8da9af0b01f286ed54ff8b2403aaabff"
+dependencies = [
+ "once_cell",
+ "rand 0.8.5",
+ "thread_local",
+ "tokio",
+]
+
+[[package]]
+name = "pingora-timeout"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "685bb8808cc1919c63a06ab14fdac9b84a4887ced49259a5c0adc8bfb2ffe558"
+dependencies = [
+ "once_cell",
+ "parking_lot",
+ "pin-project-lite",
+ "thread_local",
+ "tokio",
+]
+
+[[package]]
+name = "pkcs1"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
+dependencies = [
+ "der 0.7.9",
+ "pkcs8 0.10.2",
+ "spki 0.7.3",
+]
+
+[[package]]
+name = "pkcs8"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
+dependencies = [
+ "der 0.6.1",
+ "spki 0.6.0",
+]
+
+[[package]]
+name = "pkcs8"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
+dependencies = [
+ "der 0.7.9",
+ "spki 0.7.3",
+]
+
+[[package]]
+name = "pkg-config"
+version = "0.3.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
+
+[[package]]
+name = "pmutil"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "postgres-native-tls"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d442770e2b1e244bb5eb03b31c79b65bb2568f413b899eaba850fa945a65954"
+dependencies = [
+ "futures",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+ "tokio-postgres",
+]
+
+[[package]]
+name = "postgres-protocol"
+version = "0.6.8"
+source = "git+https://github.com/encoredev/rust-postgres?branch=encore-patches-sync#e71577eea1d14769a3021b6dd214448e3de3ffb6"
+dependencies = [
+ "base64 0.22.1",
+ "byteorder",
+ "bytes",
+ "fallible-iterator",
+ "hmac",
+ "log",
+ "md-5",
+ "memchr",
+ "rand 0.9.1",
+ "sha2",
+ "stringprep",
+]
+
+[[package]]
+name = "postgres-types"
+version = "0.2.9"
+source = "git+https://github.com/encoredev/rust-postgres?branch=encore-patches-sync#e71577eea1d14769a3021b6dd214448e3de3ffb6"
+dependencies = [
+ "array-init",
+ "bytes",
+ "chrono",
+ "cidr",
+ "fallible-iterator",
+ "geo-types",
+ "postgres-protocol",
+ "serde",
+ "serde_json",
+ "uuid",
+]
+
+[[package]]
+name = "powerfmt"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
+dependencies = [
+ "zerocopy",
+]
+
+[[package]]
+name = "precomputed-hash"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
+
+[[package]]
+name = "predicates"
+version = "2.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd"
+dependencies = [
+ "difflib",
+ "float-cmp",
+ "itertools 0.10.5",
+ "normalize-line-endings",
+ "predicates-core",
+ "regex",
+]
+
+[[package]]
+name = "predicates"
+version = "3.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573"
+dependencies = [
+ "anstyle",
+ "difflib",
+ "predicates-core",
+]
+
+[[package]]
+name = "predicates-core"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa"
+
+[[package]]
+name = "predicates-tree"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c"
+dependencies = [
+ "predicates-core",
+ "termtree",
+]
+
+[[package]]
+name = "prettyplease"
+version = "0.2.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705"
+dependencies = [
+ "proc-macro2",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-hack"
+version = "0.5.20+deprecated"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "prometheus"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1"
+dependencies = [
+ "cfg-if",
+ "fnv",
+ "lazy_static",
+ "memchr",
+ "parking_lot",
+ "protobuf",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "proptest"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f"
+dependencies = [
+ "bit-set",
+ "bit-vec",
+ "bitflags 2.9.4",
+ "lazy_static",
+ "num-traits",
+ "rand 0.9.1",
+ "rand_chacha 0.9.0",
+ "rand_xorshift",
+ "regex-syntax 0.8.5",
+ "rusty-fork",
+ "tempfile",
+ "unarray",
+]
+
+[[package]]
+name = "prost"
+version = "0.12.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29"
+dependencies = [
+ "bytes",
+ "prost-derive 0.12.6",
+]
+
+[[package]]
+name = "prost"
+version = "0.13.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5"
+dependencies = [
+ "bytes",
+ "prost-derive 0.13.5",
+]
+
+[[package]]
+name = "prost-build"
+version = "0.12.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4"
+dependencies = [
+ "bytes",
+ "heck 0.5.0",
+ "itertools 0.12.1",
+ "log",
+ "multimap",
+ "once_cell",
+ "petgraph",
+ "prettyplease",
+ "prost 0.12.6",
+ "prost-types 0.12.6",
+ "regex",
+ "syn 2.0.95",
+ "tempfile",
+]
+
+[[package]]
+name = "prost-build"
+version = "0.13.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
+dependencies = [
+ "heck 0.5.0",
+ "itertools 0.14.0",
+ "log",
+ "multimap",
+ "once_cell",
+ "petgraph",
+ "prettyplease",
+ "prost 0.13.5",
+ "prost-types 0.13.5",
+ "regex",
+ "syn 2.0.95",
+ "tempfile",
+]
+
+[[package]]
+name = "prost-derive"
+version = "0.12.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1"
+dependencies = [
+ "anyhow",
+ "itertools 0.12.1",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "prost-derive"
+version = "0.13.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
+dependencies = [
+ "anyhow",
+ "itertools 0.14.0",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "prost-types"
+version = "0.12.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0"
+dependencies = [
+ "prost 0.12.6",
+]
+
+[[package]]
+name = "prost-types"
+version = "0.13.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16"
+dependencies = [
+ "prost 0.13.5",
+]
+
+[[package]]
+name = "protobuf"
+version = "2.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
+
+[[package]]
+name = "quick-error"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+
+[[package]]
+name = "quickcheck"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
+dependencies = [
+ "env_logger 0.8.4",
+ "log",
+ "rand 0.8.5",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "r-efi"
+version = "5.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
+
+[[package]]
+name = "radix_fmt"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce082a9940a7ace2ad4a8b7d0b1eac6aa378895f18be598230c5f2284ac05426"
+
+[[package]]
+name = "rand"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
+dependencies = [
+ "fuchsia-cprng",
+ "libc",
+ "rand_core 0.3.1",
+ "rdrand",
+ "winapi",
+]
+
+[[package]]
+name = "rand"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
+dependencies = [
+ "getrandom 0.1.16",
+ "libc",
+ "rand_chacha 0.2.2",
+ "rand_core 0.5.1",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha 0.3.1",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "rand"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97"
+dependencies = [
+ "rand_chacha 0.9.0",
+ "rand_core 0.9.3",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.9.3",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+dependencies = [
+ "rand_core 0.4.2",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
+
+[[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+dependencies = [
+ "getrandom 0.1.16",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom 0.2.15",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
+dependencies = [
+ "getrandom 0.3.3",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
+dependencies = [
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
+dependencies = [
+ "rand_core 0.9.3",
+]
+
+[[package]]
+name = "rdrand"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+dependencies = [
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834"
+dependencies = [
+ "bitflags 2.9.4",
+]
+
+[[package]]
+name = "regex"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata 0.4.9",
+ "regex-syntax 0.8.5",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+dependencies = [
+ "regex-syntax 0.6.29",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax 0.8.5",
+]
+
+[[package]]
+name = "regex-lite"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a"
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "reqwest"
+version = "0.11.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
+dependencies = [
+ "base64 0.21.7",
+ "bytes",
+ "encoding_rs",
+ "futures-core",
+ "futures-util",
+ "h2 0.3.26",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "hyper 0.14.32",
+ "hyper-tls 0.5.0",
+ "ipnet",
+ "js-sys",
+ "log",
+ "mime",
+ "native-tls",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustls-pemfile 1.0.4",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "sync_wrapper 0.1.2",
+ "system-configuration 0.5.1",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "winreg 0.50.0",
+]
+
+[[package]]
+name = "reqwest"
+version = "0.12.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da"
+dependencies = [
+ "base64 0.22.1",
+ "bytes",
+ "encoding_rs",
+ "futures-channel",
+ "futures-core",
+ "futures-util",
+ "h2 0.4.7",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "http-body-util",
+ "hyper 1.5.2",
+ "hyper-rustls 0.27.5",
+ "hyper-tls 0.6.0",
+ "hyper-util",
+ "ipnet",
+ "js-sys",
+ "log",
+ "mime",
+ "mime_guess",
+ "native-tls",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustls-pemfile 2.2.0",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "sync_wrapper 1.0.2",
+ "system-configuration 0.6.1",
+ "tokio",
+ "tokio-native-tls",
+ "tokio-util",
+ "tower 0.5.2",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "wasm-streams",
+ "web-sys",
+ "windows-registry",
+]
+
+[[package]]
+name = "reqwest-middleware"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "562ceb5a604d3f7c885a792d42c199fd8af239d0a51b2fa6a78aafa092452b04"
+dependencies = [
+ "anyhow",
+ "async-trait",
+ "http 1.2.0",
+ "reqwest 0.12.12",
+ "serde",
+ "thiserror 1.0.69",
+ "tower-service",
+]
+
+[[package]]
+name = "rfc6979"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb"
+dependencies = [
+ "crypto-bigint 0.4.9",
+ "hmac",
+ "zeroize",
+]
+
+[[package]]
+name = "ring"
+version = "0.16.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
+dependencies = [
+ "cc",
+ "libc",
+ "once_cell",
+ "spin 0.5.2",
+ "untrusted 0.7.1",
+ "web-sys",
+ "winapi",
+]
+
+[[package]]
+name = "ring"
+version = "0.17.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "getrandom 0.2.15",
+ "libc",
+ "spin 0.9.8",
+ "untrusted 0.9.0",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "rmp"
+version = "0.8.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4"
+dependencies = [
+ "byteorder",
+ "num-traits",
+ "paste",
+]
+
+[[package]]
+name = "rmp-serde"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db"
+dependencies = [
+ "byteorder",
+ "rmp",
+ "serde",
+]
+
+[[package]]
+name = "rsa"
+version = "0.9.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519"
+dependencies = [
+ "const-oid",
+ "digest",
+ "num-bigint-dig",
+ "num-integer",
+ "num-traits",
+ "pkcs1",
+ "pkcs8 0.10.2",
+ "rand_core 0.6.4",
+ "signature 2.2.0",
+ "spki 0.7.3",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rust_decimal"
+version = "1.36.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555"
+dependencies = [
+ "arrayvec",
+ "num-traits",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_version"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "rustix"
+version = "0.38.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6"
+dependencies = [
+ "bitflags 2.9.4",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "rustls"
+version = "0.19.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7"
+dependencies = [
+ "base64 0.13.1",
+ "log",
+ "ring 0.16.20",
+ "sct 0.6.1",
+ "webpki",
+]
+
+[[package]]
+name = "rustls"
+version = "0.21.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
+dependencies = [
+ "log",
+ "ring 0.17.8",
+ "rustls-webpki 0.101.7",
+ "sct 0.7.1",
+]
+
+[[package]]
+name = "rustls"
+version = "0.22.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
+dependencies = [
+ "log",
+ "ring 0.17.8",
+ "rustls-pki-types",
+ "rustls-webpki 0.102.8",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rustls"
+version = "0.23.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b"
+dependencies = [
+ "once_cell",
+ "rustls-pki-types",
+ "rustls-webpki 0.102.8",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rustls-native-certs"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00"
+dependencies = [
+ "openssl-probe",
+ "rustls-pemfile 1.0.4",
+ "schannel",
+ "security-framework",
+]
+
+[[package]]
+name = "rustls-native-certs"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5"
+dependencies = [
+ "openssl-probe",
+ "rustls-pemfile 2.2.0",
+ "rustls-pki-types",
+ "schannel",
+ "security-framework",
+]
+
+[[package]]
+name = "rustls-pemfile"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
+dependencies = [
+ "base64 0.21.7",
+]
+
+[[package]]
+name = "rustls-pemfile"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
+dependencies = [
+ "rustls-pki-types",
+]
+
+[[package]]
+name = "rustls-pki-types"
+version = "1.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37"
+
+[[package]]
+name = "rustls-webpki"
+version = "0.101.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
+dependencies = [
+ "ring 0.17.8",
+ "untrusted 0.9.0",
+]
+
+[[package]]
+name = "rustls-webpki"
+version = "0.102.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
+dependencies = [
+ "ring 0.17.8",
+ "rustls-pki-types",
+ "untrusted 0.9.0",
+]
+
+[[package]]
+name = "rustracing"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a44822b10c095e574869de2b891e40c724fef42cadaea040d1cd3bdbb13d36a5"
+dependencies = [
+ "backtrace",
+ "crossbeam-channel",
+ "rand 0.8.5",
+ "trackable 0.2.24",
+]
+
+[[package]]
+name = "rustracing_jaeger"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6c2fe9411ef5f43ac773f0e84ad735804c55719346a7aad52de2d9162db97c8"
+dependencies = [
+ "crossbeam-channel",
+ "hostname",
+ "percent-encoding",
+ "rand 0.8.5",
+ "rustracing",
+ "thrift_codec",
+ "trackable 0.2.24",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
+
+[[package]]
+name = "rusty-fork"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f"
+dependencies = [
+ "fnv",
+ "quick-error",
+ "tempfile",
+ "wait-timeout",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "safe_arch"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "schannel"
+version = "0.1.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "sct"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce"
+dependencies = [
+ "ring 0.16.20",
+ "untrusted 0.7.1",
+]
+
+[[package]]
+name = "sct"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
+dependencies = [
+ "ring 0.17.8",
+ "untrusted 0.9.0",
+]
+
+[[package]]
+name = "sec1"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928"
+dependencies = [
+ "base16ct",
+ "der 0.6.1",
+ "generic-array",
+ "pkcs8 0.9.0",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "security-framework"
+version = "2.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
+dependencies = [
+ "bitflags 2.9.4",
+ "core-foundation",
+ "core-foundation-sys",
+ "libc",
+ "security-framework-sys",
+]
+
+[[package]]
+name = "security-framework-sys"
+version = "2.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.217"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.217"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "serde_fmt"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1d4ddca14104cd60529e8c7f7ba71a2c8acd8f7f5cfcdc2faf97eeb7c3010a4"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.135"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9"
+dependencies = [
+ "indexmap 2.7.0",
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_path_to_error"
+version = "0.1.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
+dependencies = [
+ "itoa",
+ "serde",
+]
+
+[[package]]
+name = "serde_spanned"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde_urlencoded"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
+dependencies = [
+ "form_urlencoded",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_with"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa"
+dependencies = [
+ "base64 0.22.1",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.7.0",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "serde_with_macros",
+ "time",
+]
+
+[[package]]
+name = "serde_with_macros"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e"
+dependencies = [
+ "darling",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "serde_yaml"
+version = "0.8.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b"
+dependencies = [
+ "indexmap 1.9.3",
+ "ryu",
+ "serde",
+ "yaml-rust",
+]
+
+[[package]]
+name = "serde_yaml"
+version = "0.9.34+deprecated"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
+dependencies = [
+ "indexmap 2.7.0",
+ "itoa",
+ "ryu",
+ "serde",
+ "unsafe-libyaml",
+]
+
+[[package]]
+name = "sfv"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f27daf6ed3fc7ffd5ea3ce9f684fe351c47e50f2fdbb6236e2bad0b440dbe408"
+dependencies = [
+ "data-encoding",
+ "indexmap 2.7.0",
+ "rust_decimal",
+]
+
+[[package]]
+name = "sha1"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest",
+]
+
+[[package]]
+name = "sha2"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest",
+]
+
+[[package]]
+name = "sha3"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
+dependencies = [
+ "digest",
+ "keccak",
+]
+
+[[package]]
+name = "sharded-slab"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
+dependencies = [
+ "lazy_static",
+]
+
+[[package]]
+name = "shared_child"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09fa9338aed9a1df411814a5b2252f7cd206c55ae9bf2fa763f8de84603aa60c"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
+name = "signal-hook-registry"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "signature"
+version = "1.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c"
+dependencies = [
+ "digest",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "signature"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
+dependencies = [
+ "digest",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "similar"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e"
+dependencies = [
+ "bstr",
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "similar-asserts"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e"
+dependencies = [
+ "console",
+ "similar",
+]
+
+[[package]]
+name = "simple_asn1"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085"
+dependencies = [
+ "num-bigint",
+ "num-traits",
+ "thiserror 1.0.69",
+ "time",
+]
+
+[[package]]
+name = "siphasher"
+version = "0.3.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
+
+[[package]]
+name = "siphasher"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
+
+[[package]]
+name = "slab"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+
+[[package]]
+name = "smartstring"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
+dependencies = [
+ "autocfg",
+ "static_assertions",
+ "version_check",
+]
+
+[[package]]
+name = "snap"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
+
+[[package]]
+name = "socket2"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "spin"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
+
+[[package]]
+name = "spin"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+
+[[package]]
+name = "spki"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b"
+dependencies = [
+ "base64ct",
+ "der 0.6.1",
+]
+
+[[package]]
+name = "spki"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
+dependencies = [
+ "base64ct",
+ "der 0.7.9",
+]
+
+[[package]]
+name = "stable_deref_trait"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
+
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
+[[package]]
+name = "string_cache"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b"
+dependencies = [
+ "new_debug_unreachable",
+ "once_cell",
+ "parking_lot",
+ "phf_shared 0.10.0",
+ "precomputed-hash",
+ "serde",
+]
+
+[[package]]
+name = "string_enum"
+version = "0.4.1"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "pmutil",
+ "proc-macro2",
+ "quote",
+ "swc_macros_common",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "stringprep"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1"
+dependencies = [
+ "unicode-bidi",
+ "unicode-normalization",
+ "unicode-properties",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
+[[package]]
+name = "strum"
+version = "0.26.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
+dependencies = [
+ "strum_macros",
+]
+
+[[package]]
+name = "strum_macros"
+version = "0.26.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
+dependencies = [
+ "heck 0.5.0",
+ "proc-macro2",
+ "quote",
+ "rustversion",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "subtle"
+version = "2.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
+
+[[package]]
+name = "sval"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6dc0f9830c49db20e73273ffae9b5240f63c42e515af1da1fceefb69fceafd8"
+
+[[package]]
+name = "sval_buffer"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "429922f7ad43c0ef8fd7309e14d750e38899e32eb7e8da656ea169dd28ee212f"
+dependencies = [
+ "sval",
+ "sval_ref",
+]
+
+[[package]]
+name = "sval_dynamic"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68f16ff5d839396c11a30019b659b0976348f3803db0626f736764c473b50ff4"
+dependencies = [
+ "sval",
+]
+
+[[package]]
+name = "sval_fmt"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c01c27a80b6151b0557f9ccbe89c11db571dc5f68113690c1e028d7e974bae94"
+dependencies = [
+ "itoa",
+ "ryu",
+ "sval",
+]
+
+[[package]]
+name = "sval_json"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0deef63c70da622b2a8069d8600cf4b05396459e665862e7bdb290fd6cf3f155"
+dependencies = [
+ "itoa",
+ "ryu",
+ "sval",
+]
+
+[[package]]
+name = "sval_nested"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a39ce5976ae1feb814c35d290cf7cf8cd4f045782fe1548d6bc32e21f6156e9f"
+dependencies = [
+ "sval",
+ "sval_buffer",
+ "sval_ref",
+]
+
+[[package]]
+name = "sval_ref"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb7c6ee3751795a728bc9316a092023529ffea1783499afbc5c66f5fabebb1fa"
+dependencies = [
+ "sval",
+]
+
+[[package]]
+name = "sval_serde"
+version = "2.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a5572d0321b68109a343634e3a5d576bf131b82180c6c442dee06349dfc652a"
+dependencies = [
+ "serde",
+ "sval",
+ "sval_nested",
+]
+
+[[package]]
+name = "swc_atoms"
+version = "0.6.4"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "hstr",
+ "once_cell",
+ "rustc-hash",
+ "serde",
+]
+
+[[package]]
+name = "swc_cached"
+version = "0.3.18"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "ahash",
+ "anyhow",
+ "dashmap",
+ "once_cell",
+ "regex",
+ "serde",
+]
+
+[[package]]
+name = "swc_common"
+version = "0.33.8"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "ast_node",
+ "atty",
+ "better_scoped_tls",
+ "cfg-if",
+ "either",
+ "from_variant",
+ "new_debug_unreachable",
+ "num-bigint",
+ "once_cell",
+ "rustc-hash",
+ "serde",
+ "siphasher 0.3.11",
+ "string_cache",
+ "swc_atoms",
+ "swc_eq_ignore_macros",
+ "swc_visit",
+ "termcolor",
+ "tracing",
+ "unicode-width",
+ "url",
+]
+
+[[package]]
+name = "swc_ecma_ast"
+version = "0.110.9"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "bitflags 2.9.4",
+ "is-macro",
+ "num-bigint",
+ "phf 0.11.3",
+ "scoped-tls",
+ "string_enum",
+ "swc_atoms",
+ "swc_common",
+ "unicode-id",
+]
+
+[[package]]
+name = "swc_ecma_loader"
+version = "0.45.9"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "anyhow",
+ "dashmap",
+ "indexmap 1.9.3",
+ "normpath",
+ "once_cell",
+ "path-clean",
+ "pathdiff",
+ "serde",
+ "serde_json",
+ "swc_cached",
+ "swc_common",
+ "tracing",
+]
+
+[[package]]
+name = "swc_ecma_parser"
+version = "0.141.21"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "either",
+ "new_debug_unreachable",
+ "num-bigint",
+ "num-traits",
+ "phf 0.11.3",
+ "serde",
+ "smallvec",
+ "smartstring",
+ "swc_atoms",
+ "swc_common",
+ "swc_ecma_ast",
+ "tracing",
+ "typed-arena",
+]
+
+[[package]]
+name = "swc_ecma_transforms_base"
+version = "0.134.30"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "better_scoped_tls",
+ "bitflags 2.9.4",
+ "indexmap 1.9.3",
+ "once_cell",
+ "phf 0.10.1",
+ "rustc-hash",
+ "serde",
+ "smallvec",
+ "swc_atoms",
+ "swc_common",
+ "swc_ecma_ast",
+ "swc_ecma_parser",
+ "swc_ecma_utils",
+ "swc_ecma_visit",
+ "tracing",
+]
+
+[[package]]
+name = "swc_ecma_utils"
+version = "0.124.26"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "indexmap 1.9.3",
+ "num_cpus",
+ "once_cell",
+ "rustc-hash",
+ "swc_atoms",
+ "swc_common",
+ "swc_ecma_ast",
+ "swc_ecma_visit",
+ "tracing",
+ "unicode-id",
+]
+
+[[package]]
+name = "swc_ecma_visit"
+version = "0.96.9"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "num-bigint",
+ "swc_atoms",
+ "swc_common",
+ "swc_ecma_ast",
+ "swc_visit",
+ "tracing",
+]
+
+[[package]]
+name = "swc_eq_ignore_macros"
+version = "0.1.2"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "pmutil",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "swc_macros_common"
+version = "0.3.8"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "pmutil",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "swc_visit"
+version = "0.5.7"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "either",
+ "swc_visit_macros",
+]
+
+[[package]]
+name = "swc_visit_macros"
+version = "0.5.8"
+source = "git+https://github.com/encoredev/swc?branch=node-resolve-exports#3ccddcb7d70380b6952296717b2d9f2056f4c2ac"
+dependencies = [
+ "Inflector",
+ "pmutil",
+ "proc-macro2",
+ "quote",
+ "swc_macros_common",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "symlink"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7973cce6668464ea31f176d85b13c7ab3bba2cb3b77a2ed26abd7801688010a"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "sync_wrapper"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
+
+[[package]]
+name = "sync_wrapper"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
+dependencies = [
+ "futures-core",
+]
+
+[[package]]
+name = "synstructure"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "sysctl"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8"
+dependencies = [
+ "bitflags 1.3.2",
+ "byteorder",
+ "libc",
+ "thiserror 1.0.69",
+ "walkdir",
+]
+
+[[package]]
+name = "system-configuration"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
+dependencies = [
+ "bitflags 1.3.2",
+ "core-foundation",
+ "system-configuration-sys 0.5.0",
+]
+
+[[package]]
+name = "system-configuration"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
+dependencies = [
+ "bitflags 2.9.4",
+ "core-foundation",
+ "system-configuration-sys 0.6.0",
+]
+
+[[package]]
+name = "system-configuration-sys"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "system-configuration-sys"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "tempdir"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
+dependencies = [
+ "rand 0.4.6",
+ "remove_dir_all",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "getrandom 0.2.15",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "termtree"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
+
+[[package]]
+name = "textwrap"
+version = "0.16.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
+
+[[package]]
+name = "thiserror"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
+dependencies = [
+ "thiserror-impl 1.0.69",
+]
+
+[[package]]
+name = "thiserror"
+version = "2.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3ac7f54ca534db81081ef1c1e7f6ea8a3ef428d2fc069097c079443d24124d3"
+dependencies = [
+ "thiserror-impl 2.0.10",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "2.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e9465d30713b56a37ede7185763c3492a91be2f5fa68d958c44e41ab9248beb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "thread_local"
+version = "1.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+]
+
+[[package]]
+name = "thrift_codec"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fb61fb3d0a0af14949f3a6949b2639112e13226647112824f4d081533f9b1a8"
+dependencies = [
+ "byteorder",
+ "trackable 0.2.24",
+]
+
+[[package]]
+name = "time"
+version = "0.3.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
+dependencies = [
+ "deranged",
+ "itoa",
+ "num-conv",
+ "powerfmt",
+ "serde",
+ "time-core",
+ "time-macros",
+]
+
+[[package]]
+name = "time-core"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
+
+[[package]]
+name = "time-macros"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
+dependencies = [
+ "num-conv",
+ "time-core",
+]
+
+[[package]]
+name = "tinystr"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
+[[package]]
+name = "tinyvec"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8"
+dependencies = [
+ "tinyvec_macros",
+]
+
+[[package]]
+name = "tinyvec_macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
+
+[[package]]
+name = "tokio"
+version = "1.43.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e"
+dependencies = [
+ "backtrace",
+ "bytes",
+ "libc",
+ "mio",
+ "parking_lot",
+ "pin-project-lite",
+ "signal-hook-registry",
+ "socket2",
+ "tokio-macros",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "tokio-io-timeout"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf"
+dependencies = [
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "tokio-native-tls"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
+dependencies = [
+ "native-tls",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-nsq"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "289e54c5548b30d6fd1edb525812fa26c745ba0dccdf5fc552ffe7f8b0f7991e"
+dependencies = [
+ "anyhow",
+ "async-compression",
+ "backoff",
+ "built",
+ "byteorder",
+ "futures",
+ "futures-util",
+ "gethostname",
+ "hyper 0.14.32",
+ "lazy_static",
+ "log",
+ "matches",
+ "regex",
+ "rustls 0.19.1",
+ "serde",
+ "serde_json",
+ "snap",
+ "thiserror 1.0.69",
+ "tokio",
+ "tokio-io-timeout",
+ "tokio-rustls 0.22.0",
+]
+
+[[package]]
+name = "tokio-openssl"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59df6849caa43bb7567f9a36f863c447d95a11d5903c9cc334ba32576a27eadd"
+dependencies = [
+ "openssl",
+ "openssl-sys",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-postgres"
+version = "0.7.13"
+source = "git+https://github.com/encoredev/rust-postgres?branch=encore-patches-sync#e71577eea1d14769a3021b6dd214448e3de3ffb6"
+dependencies = [
+ "async-trait",
+ "byteorder",
+ "bytes",
+ "constant_time_eq",
+ "fallible-iterator",
+ "futures-channel",
+ "futures-util",
+ "log",
+ "parking_lot",
+ "percent-encoding",
+ "phf 0.11.3",
+ "pin-project-lite",
+ "postgres-protocol",
+ "postgres-types",
+ "rand 0.9.1",
+ "socket2",
+ "tokio",
+ "tokio-util",
+ "whoami",
+]
+
+[[package]]
+name = "tokio-retry"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f"
+dependencies = [
+ "pin-project",
+ "rand 0.8.5",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6"
+dependencies = [
+ "rustls 0.19.1",
+ "tokio",
+ "webpki",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
+dependencies = [
+ "rustls 0.21.12",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
+dependencies = [
+ "rustls 0.22.4",
+ "rustls-pki-types",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.26.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37"
+dependencies = [
+ "rustls 0.23.20",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-stream"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
+dependencies = [
+ "futures-core",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-test"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
+dependencies = [
+ "async-stream",
+ "bytes",
+ "futures-core",
+ "tokio",
+ "tokio-stream",
+]
+
+[[package]]
+name = "tokio-tungstenite"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38"
+dependencies = [
+ "futures-util",
+ "log",
+ "rustls 0.22.4",
+ "rustls-native-certs 0.7.3",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls 0.25.0",
+ "tungstenite 0.21.0",
+]
+
+[[package]]
+name = "tokio-tungstenite"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9"
+dependencies = [
+ "futures-util",
+ "log",
+ "tokio",
+ "tungstenite 0.24.0",
+]
+
+[[package]]
+name = "tokio-util"
+version = "0.7.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
+dependencies = [
+ "bytes",
+ "futures-core",
+ "futures-sink",
+ "futures-util",
+ "hashbrown 0.14.5",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "toml"
+version = "0.7.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
+dependencies = [
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "toml_edit",
+]
+
+[[package]]
+name = "toml_datetime"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "toml_edit"
+version = "0.19.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
+dependencies = [
+ "indexmap 2.7.0",
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "winnow",
+]
+
+[[package]]
+name = "tonic"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
+dependencies = [
+ "async-stream",
+ "async-trait",
+ "axum 0.6.20",
+ "base64 0.21.7",
+ "bytes",
+ "flate2",
+ "h2 0.3.26",
+ "http 0.2.12",
+ "http-body 0.4.6",
+ "hyper 0.14.32",
+ "hyper-timeout",
+ "percent-encoding",
+ "pin-project",
+ "prost 0.12.6",
+ "rustls 0.21.12",
+ "rustls-pemfile 1.0.4",
+ "tokio",
+ "tokio-rustls 0.24.1",
+ "tokio-stream",
+ "tower 0.4.13",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+ "webpki-roots",
+]
+
+[[package]]
+name = "tower"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
+dependencies = [
+ "futures-core",
+ "futures-util",
+ "indexmap 1.9.3",
+ "pin-project",
+ "pin-project-lite",
+ "rand 0.8.5",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "tower"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
+dependencies = [
+ "futures-core",
+ "futures-util",
+ "pin-project-lite",
+ "sync_wrapper 1.0.2",
+ "tokio",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "tower-http"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
+dependencies = [
+ "bitflags 2.9.4",
+ "bytes",
+ "futures-util",
+ "http 1.2.0",
+ "http-body 1.0.1",
+ "http-body-util",
+ "http-range-header",
+ "httpdate",
+ "mime",
+ "mime_guess",
+ "percent-encoding",
+ "pin-project-lite",
+ "tokio",
+ "tokio-util",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "tower-layer"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
+
+[[package]]
+name = "tower-service"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
+
+[[package]]
+name = "tracing"
+version = "0.1.41"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
+dependencies = [
+ "log",
+ "pin-project-lite",
+ "tracing-attributes",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-attributes"
+version = "0.1.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "tracing-core"
+version = "0.1.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
+dependencies = [
+ "once_cell",
+ "valuable",
+]
+
+[[package]]
+name = "tracing-log"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
+dependencies = [
+ "log",
+ "once_cell",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-subscriber"
+version = "0.3.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
+dependencies = [
+ "matchers",
+ "nu-ansi-term",
+ "once_cell",
+ "regex",
+ "sharded-slab",
+ "smallvec",
+ "thread_local",
+ "tracing",
+ "tracing-core",
+ "tracing-log",
+]
+
+[[package]]
+name = "trackable"
+version = "0.2.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b98abb9e7300b9ac902cc04920945a874c1973e08c310627cc4458c04b70dd32"
+dependencies = [
+ "trackable 1.3.0",
+ "trackable_derive",
+]
+
+[[package]]
+name = "trackable"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae"
+dependencies = [
+ "trackable_derive",
+]
+
+[[package]]
+name = "trackable_derive"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f"
+dependencies = [
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "triomphe"
+version = "0.1.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+]
+
+[[package]]
+name = "try-lock"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
+
+[[package]]
+name = "tungstenite"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1"
+dependencies = [
+ "byteorder",
+ "bytes",
+ "data-encoding",
+ "http 1.2.0",
+ "httparse",
+ "log",
+ "rand 0.8.5",
+ "rustls 0.22.4",
+ "rustls-pki-types",
+ "sha1",
+ "thiserror 1.0.69",
+ "url",
+ "utf-8",
+]
+
+[[package]]
+name = "tungstenite"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a"
+dependencies = [
+ "byteorder",
+ "bytes",
+ "data-encoding",
+ "http 1.2.0",
+ "httparse",
+ "log",
+ "rand 0.8.5",
+ "sha1",
+ "thiserror 1.0.69",
+ "utf-8",
+]
+
+[[package]]
+name = "txtar"
+version = "1.0.0"
+dependencies = [
+ "assert_fs",
+ "clean-path",
+ "predicates 2.1.5",
+ "similar-asserts",
+ "thiserror 1.0.69",
+]
+
+[[package]]
+name = "typed-arena"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a"
+
+[[package]]
+name = "typeid"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e13db2e0ccd5e14a544e8a246ba2312cd25223f616442d7f2cb0e3db614236e"
+
+[[package]]
+name = "typenum"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
+
+[[package]]
+name = "ucd-trie"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
+
+[[package]]
+name = "unarray"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
+
+[[package]]
+name = "unicase"
+version = "2.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
+
+[[package]]
+name = "unicode-bidi"
+version = "0.3.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
+
+[[package]]
+name = "unicode-id"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10103c57044730945224467c09f71a4db0071c123a0648cc3e818913bde6b561"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
+
+[[package]]
+name = "unicode-normalization"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
+dependencies = [
+ "tinyvec",
+]
+
+[[package]]
+name = "unicode-properties"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
+
+[[package]]
+name = "unicode-segmentation"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
+
+[[package]]
+name = "unsafe-libyaml"
+version = "0.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
+
+[[package]]
+name = "untrusted"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
+
+[[package]]
+name = "untrusted"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
+
+[[package]]
+name = "url"
+version = "2.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
+dependencies = [
+ "form_urlencoded",
+ "idna",
+ "percent-encoding",
+]
+
+[[package]]
+name = "urlencoding"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
+
+[[package]]
+name = "utf-8"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
+
+[[package]]
+name = "utf16_iter"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
+
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
+
+[[package]]
+name = "uuid"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
+
+[[package]]
+name = "valuable"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
+
+[[package]]
+name = "value-bag"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2"
+dependencies = [
+ "value-bag-serde1",
+ "value-bag-sval2",
+]
+
+[[package]]
+name = "value-bag-serde1"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bb773bd36fd59c7ca6e336c94454d9c66386416734817927ac93d81cb3c5b0b"
+dependencies = [
+ "erased-serde",
+ "serde",
+ "serde_fmt",
+]
+
+[[package]]
+name = "value-bag-sval2"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53a916a702cac43a88694c97657d449775667bcd14b70419441d05b7fea4a83a"
+dependencies = [
+ "sval",
+ "sval_buffer",
+ "sval_dynamic",
+ "sval_fmt",
+ "sval_json",
+ "sval_ref",
+ "sval_serde",
+]
+
+[[package]]
+name = "vcpkg"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
+
+[[package]]
+name = "version_check"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
+
+[[package]]
+name = "vsimd"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64"
+
+[[package]]
+name = "wait-timeout"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "want"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
+dependencies = [
+ "try-lock",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasi"
+version = "0.14.2+wasi-0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
+dependencies = [
+ "wit-bindgen-rt",
+]
+
+[[package]]
+name = "wasite"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
+dependencies = [
+ "bumpalo",
+ "log",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.49"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "once_cell",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
+
+[[package]]
+name = "wasm-streams"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65"
+dependencies = [
+ "futures-util",
+ "js-sys",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.76"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "webpki"
+version = "0.21.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea"
+dependencies = [
+ "ring 0.16.20",
+ "untrusted 0.7.1",
+]
+
+[[package]]
+name = "webpki-roots"
+version = "0.25.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
+
+[[package]]
+name = "which"
+version = "4.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
+dependencies = [
+ "either",
+ "home",
+ "once_cell",
+ "rustix",
+]
+
+[[package]]
+name = "whoami"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d"
+dependencies = [
+ "redox_syscall",
+ "wasite",
+ "web-sys",
+]
+
+[[package]]
+name = "wide"
+version = "0.7.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03"
+dependencies = [
+ "bytemuck",
+ "safe_arch",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-core"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-registry"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0"
+dependencies = [
+ "windows-result",
+ "windows-strings",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-result"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-strings"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10"
+dependencies = [
+ "windows-result",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets 0.48.5",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
+dependencies = [
+ "windows_aarch64_gnullvm 0.48.5",
+ "windows_aarch64_msvc 0.48.5",
+ "windows_i686_gnu 0.48.5",
+ "windows_i686_msvc 0.48.5",
+ "windows_x86_64_gnu 0.48.5",
+ "windows_x86_64_gnullvm 0.48.5",
+ "windows_x86_64_msvc 0.48.5",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "winnow"
+version = "0.5.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "winreg"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d107f8c6e916235c4c01cabb3e8acf7bea8ef6a63ca2e7fa0527c049badfc48c"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winreg"
+version = "0.50.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
+dependencies = [
+ "cfg-if",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "wit-bindgen-rt"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
+dependencies = [
+ "bitflags 2.9.4",
+]
+
+[[package]]
+name = "write16"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
+
+[[package]]
+name = "writeable"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
+
+[[package]]
+name = "xid"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3752a194518cdee5d019812fb7978c51d8f0b7cfe9ace5983df1780964bb84c0"
+dependencies = [
+ "crc32fast",
+ "hostname",
+ "md5",
+ "once_cell",
+ "rand 0.8.5",
+ "sysctl",
+ "thiserror 1.0.69",
+ "winreg 0.8.0",
+]
+
+[[package]]
+name = "xmlparser"
+version = "0.13.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4"
+
+[[package]]
+name = "yaml-rust"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
+dependencies = [
+ "linked-hash-map",
+]
+
+[[package]]
+name = "yoke"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
+
+[[package]]
+name = "yoke-derive"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+ "synstructure",
+]
+
+[[package]]
+name = "zerocopy"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
+dependencies = [
+ "byteorder",
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "zerofrom"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+ "synstructure",
+]
+
+[[package]]
+name = "zeroize"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
+
+[[package]]
+name = "zerovec"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.95",
+]
+
+[[package]]
+name = "zstd"
+version = "0.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9"
+dependencies = [
+ "zstd-safe",
+]
+
+[[package]]
+name = "zstd-safe"
+version = "7.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059"
+dependencies = [
+ "zstd-sys",
+]
+
+[[package]]
+name = "zstd-sys"
+version = "2.0.13+zstd.1.5.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa"
+dependencies = [
+ "cc",
+ "pkg-config",
+]
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000000..fed5c568c6
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,21 @@
+[workspace]
+resolver = "2"
+members = ["runtimes/core", "runtimes/js", "tsparser", "supervisor"]
+
+[profile.dev.package]
+insta.opt-level = 3
+
+[profile.release]
+lto = true
+
+[patch.crates-io]
+tokio-postgres = { git = "https://github.com/encoredev/rust-postgres", branch = "encore-patches-sync" }
+postgres-protocol = { git = "https://github.com/encoredev/rust-postgres", branch = "encore-patches-sync" }
+postgres-types = { git = "https://github.com/encoredev/rust-postgres", branch = "encore-patches-sync" }
+swc_ecma_parser = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_ecma_ast = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_ecma_transforms_base = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_atoms = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_common = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_ecma_loader = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
+swc_ecma_visit = { git = "https://github.com/encoredev/swc", branch = "node-resolve-exports" }
diff --git a/Cross.toml b/Cross.toml
new file mode 100644
index 0000000000..4a264df650
--- /dev/null
+++ b/Cross.toml
@@ -0,0 +1,12 @@
+[build]
+pre-build = [
+ "apt-get install unzip &&",
+ "curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-x86_64.zip &&",
+ "unzip protoc-24.4-linux-x86_64.zip -d /usr/local &&",
+ "rm protoc-24.4-linux-x86_64.zip &&",
+ "export PATH=$PATH:/usr/local/bin",
+]
+
+[build.env]
+volumes = ["ENCORE_WORKDIR"]
+passthrough = ["TYPE_DEF_TMP_PATH", "ENCORE_VERSION"]
\ No newline at end of file
diff --git a/DEVELOPING.md b/DEVELOPING.md
deleted file mode 100644
index b7e6cf1947..0000000000
--- a/DEVELOPING.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Developing Encore
-
-Building an Encore application requires access to both the Encore runtime (the `encore.dev` package) as well as a custom-built
-([Go runtime](https://github.com/encoredev/go)) to implement Encore's request semantics and automated instrumentation.
-
-As a result the Encore Daemon must know where these two things exist on the filesystem in order to properly compile the Encore application.
-
-This must be done in one of two ways: embedding the installation path at compile time (similar to `GOROOT`)
-or by setting an environment variable at runtime.
-
-The environment variables are:
-- `ENCORE_GOROOT` – the path to encore-go on disk
-- `ENCORE_RUNTIME_PATH` – the path to the `encore.dev` runtime implementation.
-
-`ENCORE_RUNTIME_PATH` can be set to location of the `compiler/runtime` package in this repository,
-while `ENCORE_GOROOT` must be pointed to where `encore-go` was built.
-
-For more information on this see [cli/internal/env/env.go](cli/internal/env/env.go).
-
-## Architecture
-
-The code base is divided into several parts:
-
-### cli
-The `encore` command line interface. The encore background daemon
-is located at `cli/daemon` and is responsible for managing processes,
-setting up databases and talking with the Encore servers for operations like
-fetching production logs.
-
-### parser
-The Encore Parser statically analyzes Encore apps to build up a model
-of the application dubbed the Encore Syntax Tree (EST) that lives in
-`parser/est`.
-
-For speed the parser does not perform traditional type-checking; it does
-limited type-checking for enforcing Encore-specific rules but otherwise
-relies on the underlying Go compiler to perform type-checking as part of
-building the application.
-
-### compiler
-The Encore Compiler rewrites the source code based on the parsed
-Encore Syntax Tree to create a fully functioning application.
-It rewrites API calls & API handlers, injects instrumentation
-and secret values, and more.
\ No newline at end of file
diff --git a/README.md b/README.md
index 09ca89b986..98b159f113 100644
--- a/README.md
+++ b/README.md
@@ -1,216 +1,335 @@
-# Encore - The Go backend framework with superpowers
+
+
+Open Source Framework for creating type-safe distributed systems with declarative infrastructure
+
-
+- **Backend Frameworks:** [Encore.ts](https://encore.dev) and [Encore.go](https://encore.dev/go) simplify creating microservices and type-safe APIs, and provide an AI-ready declarative approach to define infrastructure in code.
+- **Local Development:** Encore's CLI automatically manages local infrastructure and provides a development dashboard with tracing, service catalog, and architecture diagrams.
+- **Infrastructure Integration:** Simplified integration with cloud infrastructure using the open source CLI ([learn more](https://encore.dev/docs/ts/self-host/build)), or using the optional [Encore Cloud](https://encore.cloud) platform to automate DevOps and infrastructure provisioning in your cloud on AWS and GCP.
-https://encore.dev
+**⭐ Star this repository** to help spread the word.
-Encore is a Go backend framework for rapidly creating APIs and distributed systems.
+**💿 Install Encore:**
+- **macOS:** `brew install encoredev/tap/encore`
+- **Linux:** `curl -L https://encore.dev/install.sh | bash`
+- **Windows:** `iwr https://encore.dev/install.ps1 | iex`
-It uses static analysis and code generation to reduce the boilerplate you have to write,
-resulting in an extremely productive developer experience.
+**🕹 Create your first app:**
+- **TypeScript:** `encore app create --example=ts/hello-world`
+- **Go:** `encore app create --example=hello-world`
-The key features of Encore are:
+**🤖 Add Encore LLM instructions to your app:**
+- **Encore.ts:** [ts_llm_instructions.txt](./ts_llm_instructions.txt)
+- **Encore.go:** [go_llm_instructions.txt](./go_llm_instructions.txt)
+- **How to use:**
+ - Cursor: Rename the file to `.cursorrules`.
+ - GitHub Copilot: Paste content in `.github/copilot-instructions.md`.
+ - For other tools, place the file in your app root.
+
+**⚡️ Build your first Encore.ts app with AI using Leap.new**
+- [**Leap**](https://leap.new) is an AI developer agent for building full-stack Encore.ts applications. It's the fastest way to build your first app.
-* **No boilerplate**: Encore drastically reduces the boilerplate needed to set up
- a production ready backend application. Define backend services, API endpoints,
- and call APIs with a single line of Go code.
+**🧩 See example apps:** [Example Apps Repo](https://github.com/encoredev/examples/)
-* **Distributed Tracing**: Encore uses a combination of static analysis and code
- generation to automatically instrument your application for excellent observability.
- Automatically captures information about API calls, goroutines, HTTP requests,
- database queries, and more. Automatically works for local development as well
- as in production.
+**🚀 See products being build with Encore:** [Showcase](https://encore.cloud/showcase)
-* **Infrastructure Provisioning**: Encore understands how your application works,
- and uses that understanding to provision and manage your cloud infrastructure.
- Automatically works with all the major cloud providers, as well as for local development.
+**👋 Have questions?** Join the friendly developer community on [Discord](https://encore.dev/discord).
-* **Simple Secrets**: Encore makes it easy to store and securely use secrets and API keys.
- Never worry about how to store and get access to secret values again.
+**📞 Talk to a human:** [Book a 1:1 demo](https://encore.dev/book) with one of our founders.
-* **API Documentation**: Encore parses your source code to understand the request/response
- schemas for all your APIs. Encore can automatically generate high-quality, interactive
- API Documentation for you. It can also automatically generate type-safe, documented
- clients for your frontends.
+## 🍿 Intro video
+[Watch the intro video](https://youtu.be/vvqTGfoXVsw) for a quick introduction to Encore concepts & code examples.
-**Read the complete documentation at [encore.dev/docs](https://encore.dev/docs).**
+
-## Quick Start
+## Introduction to Encore
-### Install
-```bash
-# macOS
-brew install encoredev/tap/encore
-# Linux
-curl -L https://encore.dev/install.sh | bash
-# Windows
-iwr https://encore.dev/install.ps1 | iex
-```
+Building scalable applications with cloud services is powerful but often frustrating. Developers face complex setups and repetitive tasks that slow them down.
-### Create your app
-```bash
-encore app create my-app
-cd my-app
-encore run
-```
+Encore solves this with an all-in-one backend development toolkit, streamlining everything from local testing to cloud integration and DevOps.
-### Deploy
-```bash
-git push encore
-```
+
+
+
-#### Setup Demo
-[](https://asciinema.org/a/406681)
+### How it works
-## Superpowers
+Encore's open source backend frameworks [Encore.ts](https://encore.dev/docs/ts) and [Encore.go](https://encore.dev/docs/primitives/overview) enable you to define resources like services, databases, cron jobs, and Pub/Sub, as type-safe objects in your application code.
-Encore comes with tons of superpowers that radically simplify backend development compared to traditional frameworks:
+With the frameworks you only define **infrastructure semantics** — _the things that matter to your application's behavior_ — not configuration for _specific_ cloud services. Encore parses your application and builds a graph of both its logical architecture and its infrastructure requirements, it then automatically generates boilerplate and helps orchestrate the relevant infrastructure for each environment. This means the same application code can be used to run locally, test in preview environments, and deploy to cloud environments on e.g. AWS and GCP.
-- A state of the art developer experience with unmatched productivity
-- Define services, APIs, and make API calls with a single line of Go code
-- Autocomplete and get compile-time checks for API calls
-- Generates beautiful API docs and API clients automatically
-- Instruments your app with Distributed Tracing, logs, and metrics – automatically
-- Runs serverlessly on Encore's cloud, or deploys to your own favorite cloud
-- Sets up dedicated Preview Environments for your pull requests
-- Supports flexible authentication
-- Manages your databases and migrates them automatically
-- Provides an extremely simple yet secure secrets management
-- And lots more...
+This often removes the need for separate infrastructure configuration like Terraform, increases standardization in both your codebase and infrastructure, and makes your application highly portable across cloud providers.
-## Using Encore
+Encore is fully open source, there is **no proprietary code running in your application**.
-Encore makes it super easy to create backend services and APIs.
+### Example: Hello World
-### Creating a service with an API
+Defining microservices and API endpoints is incredibly simple—with less than 10 lines of code, you can create a production-ready, deployable service.
-In Encore, a backend service is just a regular Go package with one or more APIs defined.
-The Go package name becomes the service name (which must be unique within your app).
+**Hello World in Encore.ts**
-```go
-package greet
+```typescript
+import { api } from "encore.dev/api";
-import (
- "context"
- "fmt"
-)
+export const get = api(
+ { expose: true, method: "GET", path: "/hello/:name" },
+ async ({ name }: { name: string }): Promise => {
+ const msg = `Hello ${name}!`;
+ return { message: msg };
+ }
+);
-type Params struct {
- Name string
+interface Response {
+ message: string;
}
+```
-type Response struct {
- Message string
+**Hello World in Encore.go**
+
+```go
+package hello
+
+//encore:api public path=/hello/:name
+func World(ctx context.Context, name string) (*Response, error) {
+ msg := fmt.Sprintf("Hello, %s!", name)
+ return &Response{Message: msg}, nil
}
-//encore:api public
-func Person(ctx context.Context, params *Params) (*Response, error) {
- msg := fmt.Sprintf("Hello, %s!", params.Name)
- return &Response{Message: msg}, nil
+type Response struct {
+ Message string
}
```
-This creates a backend service named `greet`, with a single API endpoint named `Person`.
+### Example: Using Pub/Sub
-Calling it is easy:
-```bash
-$ encore run # run the app in a separate terminal
-$ curl http://localhost:4060/greet.Person -d '{"Name": "Jane"}'
-# Outputs: {"Message": "Hello, Jane!"}
-```
+If you want a Pub/Sub Topic, you declare it directly in your application code and Encore will integrate the infrastructure and generate the boilerplate code necessary.
+Encore supports the following Pub/Sub infrastructure:
+- **NSQ** for local environments (automatically provisioned by Encore's CLI)
+- **GCP Pub/Sub** for environments on GCP
+- **SNS/SQS** for environments on AWS
-[Learn more in the Encore docs](https://encore.dev/docs/concepts/services-and-apis).
+**Using Pub/Sub in Encore.ts**
-### Calling an API endpoint
-Calling an API endpoint from another endpoint is easy.
+```typescript
+import { Topic } "encore.dev/pubsub"
-Just import the service (with a regular Go import), and then call the function
-as if it were a regular Go function:
+export interface SignupEvent {
+ userID: string;
+}
+
+export const signups = new Topic("signups", {
+ deliveryGuarantee: "at-least-once",
+});
+```
+
+**Using Pub/Sub in Encore.go**
```go
-import "my.app/greet"
-
-func MyAPI(ctx context.Context) error {
- resp, err := greet.Person(ctx, &greet.Params{Name: "John"})
- if err != nil {
- fmt.Println("The greeting message is:", resp.Message)
- }
- return err
-}
+import "encore.dev/pubsub"
+
+type User struct { /* fields... */ }
+
+var Signup = pubsub.NewTopic[*User]("signup", pubsub.TopicConfig{
+ DeliveryGuarantee: pubsub.AtLeastOnce,
+})
+
+// Publish messages by calling a method
+Signup.Publish(ctx, &User{...})
```
-Encore uses its static analysis and code generation to turn this into a proper API call.
+### Learn more in the docs
-[Learn more in the Encore docs](https://encore.dev/docs/concepts/services-and-apis).
+See how to use the backend frameworks in the docs:
-### SQL Databases
+- **Services:** [Go](https://encore.dev/docs/go/primitives/services) / [TypeScript](https://encore.dev/docs/ts/primitives/services)
+- **APIs:** [Go](https://encore.dev/docs/go/primitives/defining-apis) / [TypeScript](https://encore.dev/docs/ts/primitives/defining-apis)
+- **Databases:** [Go](https://encore.dev/docs/go/primitives/databases) / [TypeScript](https://encore.dev/docs/ts/primitives/databases)
+- **Cron Jobs:** [Go](https://encore.dev/docs/go/primitives/cron-jobs) / [TypeScript](https://encore.dev/docs/ts/primitives/cron-jobs)
+- **Pub/Sub:** [Go](https://encore.dev/docs/go/primitives/pubsub) / [TypeScript](https://encore.dev/docs/ts/primitives/pubsub)
+- **Object Storage:** [Go](https://encore.dev/docs/go/primitives/object-storage) / [TypeScript](https://encore.dev/docs/ts/primitives/object-storage)
+- **Caching:** [Go](https://encore.dev/docs/go/primitives/caching) / TypeScript (Coming soon)
-Encore automatically provisions, connects to, and performs schema migrations of SQL databases for you.
-All you have to do is define the SQL migrations:
+## Using Encore: An end-to-end workflow from local to cloud
-```sql
--- greet/migrations/1_create_table.up.sql
-CREATE TABLE person (
- name TEXT PRIMARY KEY,
- count INT NOT NULL
-);
-```
+Encore provides purpose-built tooling for each step in the development process, from local development and testing, to cloud DevOps. Here we'll cover the key features for each part of the process.
-Then import `encore.dev/storage/sqldb` and just start querying:
+### Local Development
-```go
-// genGreeting generates a personalized greeting for the given name.
-func genGreeting(ctx context.Context, name string) (string, error) {
- var count int
- // Insert the row, and increment count if the row is already in the db.
- err := sqldb.QueryRow(ctx, `
- INSERT INTO "person" (name, count)
- VALUES ($1, 1)
- ON CONFLICT (name) DO UPDATE
- SET count = person.count + 1
- RETURNING count
- `, name).Scan(&count)
- if err != nil {
- return "", err
- }
-
- switch count {
- case 1:
- return fmt.Sprintf("Nice to meet you, %s!", name), nil
- case 2:
- return fmt.Sprintf("Hi again, %s!", name), nil
- default:
- return fmt.Sprintf("Good to see you, %s! We've met %d times before.", name, count-1), nil
- }
-}
-```
+
+
+
+
+When you run your app locally using the [Encore CLI](https://encore.dev/docs/install), Encore parses your code and automatically sets up the necessary local infrastructure on the fly. _No more messing around with Docker Compose!_
+
+You also get built-in tools for an efficient workflow when creating distributed systems and event-driven applications:
+
+- **Local environment matches cloud:** Encore automatically handles the semantics of service communication and interfacing with different types of infrastructure services, so that the local environment is a 1:1 representation of your cloud environment.
+- **Cross-service type-safety:** When building microservices applications with Encore, you get type-safety and auto-complete in your IDE when making cross-service API calls.
+- **Type-aware infrastructure:** With Encore, infrastructure like Pub/Sub queues are type-aware objects in your program. This enables full end-to-end type-safety when building event-driven applications.
+- **Secrets management:** Built-in [secrets management](https://encore.dev/docs/ts/primitives/secrets) for all environments.
+- **Tracing:** The [local development dashboard](https://encore.dev/docs/ts/observability/dev-dash) provides local tracing to help understand application behavior and find bugs.
+- **Automatic API docs & clients:** Encore generates [API docs](https://encore.dev/docs/ts/obsevability/service-catalog) and [API clients](https://encore.dev/docs/ts/cli/client-generation) in Go, TypeScript, JavaScript, and OpenAPI specification.
+
+_Here's a video showing the local development dashboard:_
+
+https://github.com/encoredev/encore/assets/78424526/4d066c76-9e6c-4c0e-b4c7-6b2ba6161dc8
+
+### Testing
+
+
+
+
+
+Encore comes with several built-in tools to help with testing:
+
+- **Built-in service/API mocking:** Encore provides built-in support for [mocking API calls](https://encore.dev/docs/go/develop/testing/mocking), and interfaces for automatically generating mock objects for your services.
+- **Local test infra:** When running tests locally, Encore automatically provides dedicated [test infrastructure](https://encore.dev/docs/go/develop/testing#test-only-infrastructure) to isolate individual tests.
+- **Local test tracing:** The [Local Development Dashboard](https://encore.dev/docs/ts/observability/dev-dash) provides distributed tracing for tests, providing great visibility into what's happening and making it easier to understand why a test failed.
+- **Preview Environments:** When using Encore Cloud (optional), it automatically provisions a temporary cloud-based [Preview Environment](https://encore.dev/docs/platform/deploy/preview-environments) for each Pull Request, an effective tool when doing end-to-end testing.
+
+### Optional: Automate your AWS/GCP with Encore Cloud
+
+
+
+
+
+Encore Cloud is Encore's managed service offering for teams wanting to focus their engineering effort on their product development, avoiding investing time in platformization and DevOps.
+
+Encore Cloud provides **automatic infrastructure provisioning in your cloud on AWS & GCP**. So instead of writing Terraform, YAML, or clicking in cloud consoles, you [connect your cloud account](https://encore.dev/docs/platform/deploy/own-cloud) and simply deploy your application. Since using Encore's open source backend frameworks means your application code is cloud agnostic and not tied to any specific infrastructure services, Encore Cloud enables you to change your infrastructure depending on your evolving needs, without needing to make code changes or manually update infrastructure config files.
+
+When you deploy, Encore Cloud automatically provisions [infrastructure](https://encore.dev/docs/platform/infrastructure/infra) using battle-tested cloud services on AWS and GCP, such as:
+- **Compute:** GCP Cloud Run, AWS Fargate, Kubernetes (GKE and EKS)
+- **Databases:** GCP Cloud SQL, AWS RDS
+- **Pub/Sub:** GCP Pub/Sub, AWS SQS/SNS
+- **Caches:** GCP Memorystore, Amazon ElastiCache
+- **Object Storage:** GCS, Amazon S3
+- **Secrets:** GCP Secret Manager, AWS Secrets Manager
+- Etc.
+
+Encore Cloud also includes cloud versions of Encore's built-in development tools:
+
+- [Service Catalog & API Docs](https://encore.dev/docs/ts/observability/service-catalog)
+- [Architecture Diagrams](https://encore.dev/docs/ts/observability/flow)
+- [Tracing](https://encore.dev/docs/ts/observability/tracing)
+
+_Here's a video showing the Encore Cloud dashboard:_
+
+https://github.com/encoredev/encore/assets/78424526/8116b387-d4d4-4e54-8768-3686ba0245f5
+
+## Why use Encore?
+
+- **Faster Development**: Encore streamlines the development process with its backend frameworks, clear abstractions, and built-in local development tools.
+- **Scalability & Performance**: Encore simplifies building large-scale microservices applications that can handle growing user bases and demands, without the normal boilerplate and complexity.
+- **Control & Standardization**: Built-in tools like automated architecture diagrams, infrastructure tracking and approval workflows, make it easy for teams and leaders to get an overview of the entire application.
+- **Security & Compliance**: Encore Cloud helps ensure your application is secure and compliant by enforcing security standards like least privilege IAM, and provisioning infrastructure according to best practices for each cloud provider.
+- **Reduced Costs**: Encore Cloud's automatic infrastructure management minimizes wasteful cloud expenses and reduces DevOps workload, allowing you to work more efficiently.
+
+## Common use cases
+
+Encore is designed to give teams a productive and less complex experience when solving most backend use cases. Many teams use Encore to build things like:
+
+- High-performance B2B Platforms
+- Fintech & Consumer apps
+- Global E-commerce marketplaces
+- Microservices backends for SaaS applications and mobile apps
+- And much more...
+
+## Getting started
+
+- **1. Install Encore:**
+ - **macOS:** `brew install encoredev/tap/encore`
+ - **Linux:** `curl -L https://encore.dev/install.sh | bash`
+ - **Windows:** `iwr https://encore.dev/install.ps1 | iex`
+- **2. Create your first app:**
+ - **TypeScript:** `encore app create --example=ts/introduction`
+ - **Go:** `encore app create --example=hello-world`
+- **3. Star the project** on [GitHub](https://github.com/encoredev/encore) to stay up-to-date
+- **4. Explore the [Documentation](https://encore.dev/docs)** to learn more about Encore's features
+- **5. [Join Discord](https://encore.dev/discord)** to ask questions and meet other Encore developers
+
+## Open Source
+
+Everything needed to develop and deploy Encore applications is Open Source, including the backend frameworks, parser, compiler, runtime, and CLI.
+This includes all code needed for local development and everything that runs in your application when it is deployed.
+
+The Open Source CLI also provides a mechanism to generate a Docker images for your application, so you easily self-host your application. [Learn more in the docs](https://encore.dev/docs/ts/self-host/build).
+
+## Join the most pioneering developer community
+
+Developers building with Encore are forward-thinkers who want to focus on creative programming and building great software to solve meaningful problems. It's a friendly place, great for exchanging ideas and learning new things! **Join the conversation on [Discord](https://encore.dev/discord).**
+
+We rely on your contributions and feedback to improve Encore for everyone who is using it.
+Here's how you can contribute:
+
+- ⭐ **Star and watch this repository to help spread the word and stay up to date.**
+- Meet fellow Encore developers and chat on [Discord](https://encore.dev/discord).
+- Follow Encore on [Twitter](https://twitter.com/encoredotdev).
+- Share feedback or ask questions via [email](mailto:hello@encore.dev).
+- Leave feedback on the [Public Roadmap](https://encore.dev/roadmap).
+- Send a pull request here on GitHub with your contribution.
+
+## Videos
+
+- Intro: Encore concepts & features
+- Demo video: Getting started with Encore.ts
+- Demo: Building and deploying a simple Go service
+- Demo: Building an event-driven system in Go
+
+## Visuals
+
+### Code example (Go)
+
+https://github.com/encoredev/encore/assets/78424526/f511b3fe-751f-4bb8-a1da-6c9e0765ac08
+
+### Local Development Dashboard
+
+https://github.com/encoredev/encore/assets/78424526/4c659fb8-e9ec-4f14-820b-c2b8d35e5359
+
+### Generated Architecture Diagrams & Service Catalog
+
+https://github.com/encoredev/encore/assets/78424526/a880ed2d-e9a6-4add-b5a8-a4b44b97587b
+
+### Auto-Provisioning Infrastructure & Multi-cloud Deployments
+
+https://github.com/encoredev/encore/assets/78424526/8116b387-d4d4-4e54-8768-3686ba0245f5
+
+### Distributed Tracing & Metrics
+
+https://github.com/encoredev/encore/assets/78424526/35189335-e3d7-4046-bab0-1af0f00d2504
+
+## Frequently Asked Questions (FAQ)
+
+### Who's behind Encore?
+
+Encore was founded by long-time backend engineers from Spotify, Google, and Monzo with over 50 years of collective experience. We’ve lived through the challenges of building complex distributed systems with thousands of services, and scaling to hundreds of millions of users.
+
+Encore grew out of these experiences and is a solution to the frustrations that came with them: unnecessary crippling complexity and constant repetition of undifferentiated work that suffocates the developer’s creativity. With Encore, we want to set developers free to achieve their creative potential.
+
+### Who is Encore for?
-#### Database Demo
-[](https://asciinema.org/a/406695)
+**For individual developers** building for the cloud, Encore provides a radically improved experience. With Encore you’re able to stay in the flow state and experience the joy and creativity of building.
-[Learn more in the Encore docs](https://encore.dev/docs/concepts/databases).
+**For startup teams** who need to build a scalable backend to support the growth of their product, Encore lets them get up and running in the cloud within minutes. It lets them focus on solving the needs of their users, instead of spending most of their time re-solving the everyday challenges of building distributed systems in the cloud.
-### API Documentation
+**For individual teams in large organizations** that want to focus on innovating and building new features, Encore lets them stop spending time on operations and onboarding new team members. Using Encore for new feature development is easy, just spin up a new backend service in a few minutes.
-Encore automatically generates API documentation for your app.
+### How is Encore different?
-You can access it by viewing the local development dashboard by opening the API URL
-in your browser when your app is running (normally [localhost:4060](http://localhost:4060)).
+Encore is the only tool that understands what you’re building. Encore uses static analysis to deeply understand the application you’re building. This enables a unique developer experience that helps you stay in the flow as you’re building. For instance, you don't need to bother with configuring and managing infrastructure, setting up environments and keeping them in sync, or writing documentation and drafting architecture diagrams. Encore does all of this automatically out of the box.
-[](https://encore.dev/docs/concepts/api-docs)
+### Why does Encore provide infrastructure integrations through Encore Cloud?
-### Distributed Tracing
+We've found that to meaningfully improve the developer experience, you have to operate across the full stack. Unless you understand how an application is deployed, there are a large number of things in the development process that you can't simplify. That's why so many other developer tools have such a limited impact. With Encore's more integrated approach, we're able to unlock a radically better experience for developers.
-Encore automatically instruments your app with Distributed Tracing.
+### What if I want to migrate away from Encore?
-For local development you can access it by viewing the local development dashboard by opening the API URL
-in your browser when your app is running (normally [localhost:4060](http://localhost:4060)).
+Encore is designed to let you go outside of the framework when you want to, and easily drop down in abstraction level when you need to, so you never run into any dead-ends.
-Any API calls to your app automatically produces traces.
+Should you want to migrate away, it's straightforward and does not require a big rewrite. 99% of your code is regular Go or TypeScript.
-
+Encore provides tools for [self-hosting](https://encore.dev/docs/ts/self-host/build) your application, by using the Open Source CLI to produce a standalone Docker image that can be deployed anywhere you'd like.
-## Developing Encore and building from source
+## Contributing to Encore and building from source
-See [DEVELOPING.md](DEVELOPING.md).
\ No newline at end of file
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/check.bash b/check.bash
new file mode 100755
index 0000000000..d5d5e66557
--- /dev/null
+++ b/check.bash
@@ -0,0 +1,189 @@
+#!/usr/bin/env bash
+#
+# This script will run the same checks as Encore's CI pipeline and report the same static analysis errors
+# as the pipeline by default. It can be used to check for what errors might be reported by the pipeline
+# before you commit and open a PR.
+#
+# Usage:
+# ./check.bash [options]
+#
+# Options:
+# --base [ The merge base to compare against (default: origin/main)
+# --diff Show the diff against base instead of running the checks
+# --filter-mode ] The filter mode to use for reviewdog; added, file, diff_context, nofilter (default: file)
+# --all Alias for `--filter-mode nofilter` (runs checks against all files in the working directory)
+#
+# Examples:
+#
+# # Run the checks against files changed since branching from origin/main
+# # (This is the default behavior and what our CI process does)
+# ./check.bash
+#
+# # Show the diff between the current working directory and origin/main
+# ./check.bash --diff
+#
+# # Run the checks against the entire working directory (regardless of changes made)
+# ./check.bash --all
+
+
+##############################################################################################################################
+# Step 0: Setup the script with basic error handling #
+##############################################################################################################################
+
+ set -euo pipefail
+ # nosemgrep
+ IFS=$'\n\t'
+
+ function errHandler() {
+ echo "Exiting due to an error line $1" >&2
+ echo "" >&2
+ awk 'NR>L-4 && NR> ":""),$0 }' L="$1" "$0" >&2
+ }
+ trap 'errHandler $LINENO' ERR
+
+
+##############################################################################################################################
+# Step 1: Configure the script with the parameters the use wants #
+##############################################################################################################################
+
+ # Parameters
+ WORK_DIR=$( dirname "${BASH_SOURCE[0]}" ) # Get the directory this script is in
+ BASE_REF="origin/main" # The merge base to compare against
+ DIFF_ONLY="false" # If true, show the diff instead of running the checks
+ FILTER_MODE="file" # The filter mode to use for reviewdog (added, file, diff_context, nofilter)
+
+ # Parse the command line arguments
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --base)
+ BASE_REF="$2"
+ shift 2
+ ;;
+ --diff)
+ DIFF_ONLY="true"
+ shift 1
+ ;;
+ --filter-mode)
+ FILTER_MODE="$2"
+ shift 2
+ ;;
+ --all)
+ FILTER_MODE="nofilter"
+ shift 1
+ ;;
+ *)
+ echo "Unknown argument: $1"
+ exit 1
+ ;;
+ esac
+ done
+
+
+##############################################################################################################################
+# Step 2: Check for required tools and error out if anything is missing which we can't install for the user #
+##############################################################################################################################
+
+ # Check for tools we can't install using go
+ command -v go >/dev/null 2>&1 || { echo >&2 "go is required but not installed. Aborting."; exit 1; }
+ command -v git >/dev/null 2>&1 || { echo >&2 "git is required but not installed. Aborting."; exit 1; }
+ command -v sed >/dev/null 2>&1 || { echo >&2 "sed is required but not installed. Aborting."; exit 1; }
+ command -v semgrep >/dev/null 2>&1 || { echo >&2 "semgrep is required but not installed. Aborting."; exit 1; }
+
+ # Now install all missing tools
+ command -v reviewdog >/dev/null 2>&1 || go install github.com/reviewdog/reviewdog/cmd/reviewdog@latest || { echo >&2 "Unable to install reviewdog. Aborting."; exit 1; }
+ command -v staticcheck >/dev/null 2>&1 || go install honnef.co/go/tools/cmd/staticcheck@latest || { echo >&2 "Unable to install staticcheck. Aborting."; exit 1; }
+ command -v errcheck >/dev/null 2>&1 || go install github.com/kisielk/errcheck@latest || { echo >&2 "Unable to install errcheck. Aborting."; exit 1; }
+ command -v ineffassign >/dev/null 2>&1 || go install github.com/gordonklaus/ineffassign@latest || { echo >&2 "Unable to install ineffassign. Aborting."; exit 1; }
+
+
+##############################################################################################################################
+# Step 3: Create a diff of the changes in the working directory against the common ancestor of the current branch and main #
+# This will be used to run static analysis checks on only the files that have changed. This diff should mimic the #
+# diff that would be created by GitHub when all current changes are committed and pushed into a PR on GitHub. #
+##############################################################################################################################
+
+ # Don't generate the diff if we don't need it to filter!
+ if [[ "$FILTER_MODE" != "nofilter" ]]; then
+
+ # Create a temp directory to store the common ancestor commit
+ TMP_DIR=$(mktemp -d)
+ if [[ ! "$TMP_DIR" || ! -d "$TMP_DIR" ]]; then
+ echo "Could not create temp dir"
+ exit 1
+ fi
+
+ # Create a temp file to store the diff we need
+ DIFF_FILE=$(mktemp)
+ if [[ ! "$DIFF_FILE" || ! -f "$DIFF_FILE" ]]; then
+ echo "Could not create temp diff file"
+ exit 1
+ fi
+
+ # Create a blank file to use as a comparison when a file is missing because either it's new or been deleted
+ BLANK_FILE=$(mktemp)
+ if [[ ! "$BLANK_FILE" || ! -f "$BLANK_FILE" ]]; then
+ echo "Could not create blank file"
+ exit 1
+ fi
+
+ # Clean up on exit and delete all the temp files we just created
+ function cleanup() {
+ rm -rf "$TMP_DIR"
+ rm -f "$DIFF_FILE"
+ rm -f "$BLANK_FILE"
+ }
+ trap cleanup EXIT
+
+ # Clone the repo into the temp directory
+ git clone -q "$WORK_DIR" "$TMP_DIR"
+
+ # Change our temp directory to be a clean copy of the common ancestor commit
+ pushd "$TMP_DIR" > /dev/null
+ git reset -q --hard HEAD
+ git checkout -q "$(git merge-base "$BASE_REF" HEAD)"
+ TRACKED_FILES_FROM_MAIN=$(git ls-files)
+ popd > /dev/null
+
+ # Create a list of files that we care about
+ MODIFICATIONS_IN_WORKING_DIR=$(git status --short | awk '{print $2}')
+ TRACKED_FILES_IN_WORKING_DIR=$(git ls-files)
+ ALL_FILES=$(echo "$TRACKED_FILES_IN_WORKING_DIR $MODIFICATIONS_IN_WORKING_DIR $TRACKED_FILES_FROM_MAIN" | tr ' ' '\n' | sort -u)
+
+ # Create a diff of the changes in the working directory against the common ancestor of the current branch and main
+ for file in $ALL_FILES; do
+ # If the original file doesn't exist, use a blank file instead
+ # (This means it was a new file that was added in the current version of the code base)
+ ORIGINAL_FILE="$TMP_DIR/$file"
+ if [[ ! -f "$ORIGINAL_FILE" ]]; then
+ ORIGINAL_FILE="$BLANK_FILE"
+ fi
+
+ # If the updated file doesn't exist, use a blank file instead
+ # (This means the file was deleted in the current version of the code base)
+ UPDATED_FILE="$WORK_DIR/$file"
+ if [[ ! -f "$UPDATED_FILE" ]]; then
+ UPDATED_FILE="$BLANK_FILE"
+ fi
+
+ # Run git diff between the original file and the updated file
+ # Replace the file paths in the diff to match the relative path in the working directory
+ # Then write the diff into our diff file
+ git diff "$ORIGINAL_FILE" "$UPDATED_FILE" | sed "s|$ORIGINAL_FILE|/$file|g" | sed "s|$UPDATED_FILE|/$file|g" >> "$DIFF_FILE" || true # Suppress the exit code
+ done
+
+ if [[ "$DIFF_ONLY" == "true" ]]; then
+ cat "$DIFF_FILE"
+ exit 0
+ fi
+ fi
+
+
+##############################################################################################################################
+# Step 4: Run review dog using the diff we just created, allowing reviewdog to only show errors from changes we've made #
+##############################################################################################################################
+
+ if [[ "$FILTER_MODE" == "nofilter" ]]; then
+ reviewdog -filter-mode=nofilter
+ else
+ reviewdog -filter-mode="$FILTER_MODE" -diff="cat $DIFF_FILE"
+ fi
diff --git a/cli/cmd/encore/app.go b/cli/cmd/encore/app.go
deleted file mode 100644
index 3124deec05..0000000000
--- a/cli/cmd/encore/app.go
+++ /dev/null
@@ -1,628 +0,0 @@
-package main
-
-import (
- "archive/tar"
- "bytes"
- "compress/gzip"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/fs"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strings"
- "time"
-
- "encr.dev/cli/internal/conf"
- "github.com/AlecAivazis/survey/v2"
- "github.com/briandowns/spinner"
- "github.com/fatih/color"
- "github.com/spf13/cobra"
- "github.com/tailscale/hujson"
-)
-
-func init() {
- appCmd := &cobra.Command{
- Use: "app",
- Short: "Commands to create and link Encore apps",
- }
- rootCmd.AddCommand(appCmd)
-
- var createAppTemplate string
-
- createAppCmd := &cobra.Command{
- Use: "create [name]",
- Short: "Create a new Encore app",
- Args: cobra.MaximumNArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- name := ""
- if len(args) > 0 {
- name = args[0]
- }
- if err := createApp(context.Background(), name, createAppTemplate); err != nil {
- fatal(err)
- }
- },
- }
- appCmd.AddCommand(createAppCmd)
- createAppCmd.Flags().StringVar(&createAppTemplate, "example", "", "URL to example code to use.")
-
- var forceLink bool
- linkAppCmd := &cobra.Command{
- Use: "link [app-id]",
- Short: "Link an Encore app with the server",
- Args: cobra.MaximumNArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- var appID string
- if len(args) > 0 {
- appID = args[0]
- }
- linkApp(appID, forceLink)
- },
- }
- appCmd.AddCommand(linkAppCmd)
- linkAppCmd.Flags().BoolVarP(&forceLink, "force", "f", false, "Force link even if the app is already linked.")
-
- cloneAppCmd := &cobra.Command{
- Use: "clone [app-id] [directory]",
- Short: "Clone an Encore app to your computer",
- Args: cobra.MinimumNArgs(1),
- Run: func(c *cobra.Command, args []string) {
- cmdArgs := append([]string{"clone", "encore://" + args[0]}, args[1:]...)
- cmd := exec.Command("git", cmdArgs...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- os.Exit(1)
- }
- },
- }
- appCmd.AddCommand(cloneAppCmd)
-}
-
-// createApp is the implementation of the "encore app create" command.
-func createApp(ctx context.Context, name, template string) (err error) {
- yellow := color.New(color.FgYellow)
- cyan := color.New(color.FgCyan)
- green := color.New(color.FgGreen)
-
- if _, err := conf.CurrentUser(); errors.Is(err, fs.ErrNotExist) {
- var loginNow bool
- cyan.Fprintln(os.Stderr, "You are not logged in to encore.dev.")
- survey.AskOne(&survey.Confirm{
- Message: cyan.Sprint("Log in now to automatically link your app with encore.dev?"),
- Default: true,
- }, &loginNow)
- if loginNow {
- if err := doLogin(); err != nil {
- fatal(err)
- }
- } else {
- yellow.Fprintln(os.Stderr, "Continuing without logging in. You can manually link your app later using `encore app link`.")
- }
- }
-
- if name == "" {
- survey.AskOne(&survey.Input{
- Message: "App Name (lowercase letters, digits, and dashes)",
- }, &name, survey.WithValidator(func(in interface{}) error { return validateName(in.(string)) }))
- }
-
- if err := validateName(name); err != nil {
- return err
- } else if _, err := os.Stat(name); err == nil {
- return fmt.Errorf("directory %s already exists", name)
- }
-
- if template == "" {
- var idx int
-
- dockerMsg := ""
- if _, err := exec.LookPath("docker"); err != nil {
- dockerMsg = " [requires Docker]"
- }
- prompt := &survey.Select{
- Message: "Select app template:",
- Options: []string{
- "Trello clone" + dockerMsg,
- "Hello World",
- "Empty app",
- },
- }
- survey.AskOne(prompt, &idx)
- switch idx {
- case 0:
- template = "trello-clone"
- case 1:
- template = "hello-world"
- case 2:
- template = ""
- }
- }
-
- // Parse template information, if provided.
- var ex *repoInfo
- if template != "" {
- var err error
- ex, err = parseTemplate(ctx, template)
- if err != nil {
- return err
- }
- }
-
- if err := os.Mkdir(name, 0755); err != nil {
- return err
- }
- defer func() {
- if err != nil {
- // Clean up the directory we just created in case of an error.
- os.RemoveAll(name)
- }
- }()
-
- if ex != nil {
- s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
- s.Prefix = fmt.Sprintf("Downloading template %s ", ex.Name)
- s.Start()
- err := downloadAndExtractTemplate(ctx, name, *ex)
- s.Stop()
- fmt.Println()
-
- if err != nil {
- return fmt.Errorf("failed to download template %s: %v", ex.Name, err)
- }
- gray := color.New(color.Faint)
- gray.Printf("Downloaded template %s.\n", ex.Name)
- } else {
- // Set up files that we need when we don't have an example
- if err := ioutil.WriteFile(filepath.Join(name, ".gitignore"), []byte("/.encore\n"), 0644); err != nil {
- fatal(err)
- }
- encoreModData := []byte("module encore.app\n")
- if err := ioutil.WriteFile(filepath.Join(name, "go.mod"), encoreModData, 0644); err != nil {
- fatal(err)
- }
- }
-
- // Create the app on the server.
- _, err = conf.CurrentUser()
- loggedIn := err == nil
-
- var app *appConf
- if loggedIn {
- s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
- s.Prefix = "Creating app on encore.dev "
- s.Start()
- app, err = createAppOnServer(name)
- s.Stop()
- if err != nil {
- return fmt.Errorf("creating app on encore.dev: %v", err)
- }
- }
-
- // Create the encore.app file
- var encoreAppData []byte
- if loggedIn {
- encoreAppData = []byte(`{
- "id": "` + app.Slug + `",
-}
-`)
- } else {
- encoreAppData = []byte(`{
- // The app is not currently linked to the encore.dev platform.
- // Use "encore app link" to link it.
- "id": "",
-}
-`)
- }
- if err := ioutil.WriteFile(filepath.Join(name, "encore.app"), encoreAppData, 0644); err != nil {
- return err
- }
-
- if err := initGitRepo(name, app); err != nil {
- return err
- }
-
- green.Printf("\nSuccessfully created app %s!\n", name)
- cyanf := cyan.SprintfFunc()
- if app != nil {
- fmt.Printf("App ID: %s\n", cyanf(app.Slug))
- fmt.Printf("Web URL: %s%s", cyanf("https://app.encore.dev/"+app.Slug), newline)
- }
-
- fmt.Print("\nUseful commands:\n\n")
-
- cyan.Printf(" encore run\n")
- fmt.Print(" Run your app locally\n\n")
-
- cyan.Printf(" encore test ./...\n")
- fmt.Print(" Run tests\n\n")
-
- if app != nil {
- cyan.Printf(" git push encore\n")
- fmt.Print(" Deploys your app\n\n")
- }
-
- greenBoldF := green.Add(color.Bold).SprintfFunc()
- fmt.Printf("Get started now: %s\n", greenBoldF("cd %s && encore run", name))
-
- return nil
-}
-
-func validateName(name string) error {
- ln := len(name)
- if ln == 0 {
- return fmt.Errorf("name must not be empty")
- } else if ln > 50 {
- return fmt.Errorf("name too long (max 50 chars)")
- }
-
- for i, s := range name {
- // Outside of [a-z], [0-9] and != '-'?
- if !((s >= 'a' && s <= 'z') || (s >= '0' && s <= '9') || s == '-') {
- return fmt.Errorf("name must only contain lowercase letters, digits, or dashes")
- } else if s == '-' {
- if i == 0 {
- return fmt.Errorf("name cannot start with a dash")
- } else if (i + 1) == ln {
- return fmt.Errorf("name cannot end with a dash")
- } else if name[i-1] == '-' {
- return fmt.Errorf("name cannot contain repeated dashes")
- }
- }
- }
- return nil
-}
-
-type appConf struct {
- Slug string `json:"slug"`
- DefaultBranch *string `json:"main_branch"`
-}
-
-func createAppOnServer(name string) (*appConf, error) {
- if _, err := conf.CurrentUser(); err != nil {
- return nil, err
- }
-
- url := "https://api.encore.dev/apps"
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- reqData, _ := json.Marshal(map[string]string{"Name": name})
- req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(reqData))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/json")
- var respData struct {
- Data appConf
- }
- err = slurpJSON(req, &respData)
- return &respData.Data, err
-}
-
-func validateAppID(id string) (ok bool, err error) {
- if _, err := conf.CurrentUser(); errors.Is(err, fs.ErrNotExist) {
- fatal("not logged in. Run 'encore auth login' first.")
- } else if err != nil {
- return false, err
- }
-
- url := "https://api.encore.dev/apps/" + url.PathEscape(id)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
- if err != nil {
- return false, err
- }
- req.Header.Set("Content-Type", "application/json")
-
- client := conf.AuthClient()
- resp, err := client.Do(req)
- if err != nil {
- return false, err
- }
- defer resp.Body.Close()
- switch resp.StatusCode {
- case 200:
- return true, nil
- case 404:
- return false, nil
- default:
- body, _ := ioutil.ReadAll(resp.Body)
- return false, fmt.Errorf("server responded with HTTP %s: %s", resp.Status, body)
- }
-}
-
-type repoInfo struct {
- Owner string
- Repo string
- Branch string
- Path string // subdirectory to copy ("." for whole project)
- Name string // example name
-}
-
-func parseTemplate(ctx context.Context, tmpl string) (*repoInfo, error) {
- switch {
- case strings.HasPrefix(tmpl, "http"):
- // Already an URL; do nothing
- case strings.HasPrefix(tmpl, "github.com"):
- // Assume a URL without the scheme
- tmpl = "https://" + tmpl
- default:
- // Simple template name
- tmpl = "https://github.com/encoredev/examples/tree/main/" + tmpl
- }
-
- u, err := url.Parse(tmpl)
- if err != nil {
- return nil, fmt.Errorf("invalid template: %v", err)
- }
- if u.Host != "github.com" {
- return nil, fmt.Errorf("template must be hosted on GitHub, not %s", u.Host)
- }
- // Path must be one of:
- // "/owner/repo"
- // "/owner/repo/tree/"
- // "/owner/repo/tree//path"
- parts := strings.SplitN(u.Path, "/", 6)
- switch {
- case len(parts) == 3: // "/owner/repo"
- owner, repo := parts[1], parts[2]
- // Check the default branch
- var resp struct {
- DefaultBranch string `json:"default_branch"`
- }
- url := fmt.Sprintf("https://api.github.com/repos/%s/%s", owner, repo)
- req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
- if err != nil {
- return nil, err
- } else if err := slurpJSON(req, &resp); err != nil {
- return nil, err
- }
- return &repoInfo{
- Owner: owner,
- Repo: repo,
- Branch: resp.DefaultBranch,
- Path: ".",
- Name: repo,
- }, nil
- case len(parts) >= 5: // "/owner/repo"
- owner, repo, t, branch := parts[1], parts[2], parts[3], parts[4]
- p := "."
- name := repo
- if len(parts) == 6 {
- p = parts[5]
- name = path.Base(p)
- }
- if t != "tree" {
- return nil, fmt.Errorf("unsupported template url: %s", tmpl)
- }
- return &repoInfo{
- Owner: owner,
- Repo: repo,
- Branch: branch,
- Path: p,
- Name: name,
- }, nil
- default:
- return nil, fmt.Errorf("unsupported template url: %s", tmpl)
- }
-}
-
-func downloadAndExtractTemplate(ctx context.Context, dst string, info repoInfo) error {
- url := fmt.Sprintf("https://codeload.github.com/%s/%s/tar.gz/%s", info.Owner, info.Repo, info.Branch)
- req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
- if err != nil {
- return err
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- return fmt.Errorf("GET %s: got non-200 response: %s", url, resp.Status)
- }
- gz, err := gzip.NewReader(resp.Body)
- if err != nil {
- return fmt.Errorf("could not read gzip response: %v", err)
- }
- defer gz.Close()
- tr := tar.NewReader(gz)
-
- prefix := path.Join(info.Repo+"-"+info.Branch, info.Path)
- prefix += "/"
- files := 0
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- if files == 0 {
- return fmt.Errorf("could not find template")
- }
- return nil
- } else if err != nil {
- return fmt.Errorf("reading repo data: %v", err)
- }
- if hdr.FileInfo().IsDir() {
- continue
- }
- if p := path.Clean(hdr.Name); strings.HasPrefix(p, prefix) {
- files++
- p = p[len(prefix):]
- filePath := filepath.Join(dst, filepath.FromSlash(p))
- if err := createFile(tr, filePath); err != nil {
- return fmt.Errorf("create %s: %v", p, err)
- }
- }
- }
-}
-
-func createFile(src io.Reader, dst string) error {
- if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
- return err
- }
- f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644)
- if err != nil {
- return err
- }
- _, err = io.Copy(f, src)
- if err2 := f.Close(); err == nil {
- err = err2
- }
- return err
-}
-
-func slurpJSON(req *http.Request, respData interface{}) error {
- resp, err := conf.AuthClient().Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- body, _ := ioutil.ReadAll(resp.Body)
- return fmt.Errorf("got non-200 response: %s: %s", resp.Status, body)
- }
- if err := json.NewDecoder(resp.Body).Decode(respData); err != nil {
- return fmt.Errorf("could not decode response: %v", err)
- }
- return nil
-}
-
-// initGitRepo initializes the git repo.
-// If app is not nil, it configures the repo to push to the given app.
-// If git does not exist, it reports an error matching exec.ErrNotFound.
-func initGitRepo(path string, app *appConf) (err error) {
- defer func() {
- if e := recover(); e != nil {
- if ee, ok := e.(error); ok {
- err = ee
- } else {
- panic(e)
- }
- }
- }()
-
- git := func(args ...string) []byte {
- cmd := exec.Command("git", args...)
- cmd.Dir = path
- out, err := cmd.CombinedOutput()
- if err != nil && err != exec.ErrNotFound {
- panic(fmt.Errorf("git %s: %s (%w)", strings.Join(args, " "), out, err))
- }
- return out
- }
-
- // Initialize git repo
- git("init")
- if app != nil && app.DefaultBranch != nil {
- git("checkout", "-b", *app.DefaultBranch)
- }
- git("config", "--local", "push.default", "current")
- git("add", "-A")
-
- cmd := exec.Command("git", "commit", "-m", "Initial commit")
- cmd.Dir = path
- // Configure the committer if the user hasn't done it themselves yet.
- if ok, _ := gitUserConfigured(); !ok {
- cmd.Env = append(os.Environ(),
- "GIT_AUTHOR_NAME=Encore",
- "GIT_AUTHOR_EMAIL=git-bot@encore.dev",
- "GIT_COMMITTER_NAME=Encore",
- "GIT_COMMITTER_EMAIL=git-bot@encore.dev",
- )
- }
- if out, err := cmd.CombinedOutput(); err != nil && err != exec.ErrNotFound {
- return fmt.Errorf("create initial commit repository: %s (%v)", out, err)
- }
-
- if app != nil {
- git("remote", "add", "encore", "encore://"+app.Slug)
- }
-
- return nil
-}
-
-func addEncoreRemote(root, appID string) {
- // Determine if there are any remotes
- cmd := exec.Command("git", "remote")
- cmd.Dir = root
- out, err := cmd.CombinedOutput()
- if err != nil {
- return
- }
- out = bytes.TrimSpace(out)
- if len(out) == 0 {
- cmd = exec.Command("git", "remote", "add", "encore", "encore://"+appID)
- cmd.Dir = root
- if err := cmd.Run(); err == nil {
- fmt.Println("Configured git remote 'encore' to push/pull with Encore.")
- }
- }
-}
-
-func linkApp(appID string, force bool) {
- root, _ := determineAppRoot()
- filePath := filepath.Join(root, "encore.app")
-
- // Parse the app data using a map so we preserve all
- // the keys present when writing it back below.
- var appData map[string]interface{}
- if data, err := ioutil.ReadFile(filePath); err != nil {
- fatal(err)
- os.Exit(1)
- } else if err := hujson.Unmarshal(data, &appData); err != nil {
- fatal("could not parse encore.app: ", err)
- os.Exit(1)
- } else if appData["id"] != nil && appData["id"] != "" {
- fatal("the app is already linked.\n\nNote: to link to a different app, specify the --force flag.")
- }
-
- if appID == "" {
- fmt.Println("Make sure the app is created on app.encore.dev, and then enter its ID to link it.")
- fmt.Print("App ID: ")
- if _, err := fmt.Scanln(&appID); err != nil {
- fatal(err)
- } else if appID == "" {
- fatal("no app id given.")
- }
- }
-
- if linked, err := validateAppID(appID); err != nil {
- fatal(err)
- } else if !linked {
- fmt.Fprintln(os.Stderr, "Error: that app does not exist, or you don't have access to it.")
- os.Exit(1)
- }
-
- appData["id"] = appID
- data, _ := hujson.MarshalIndent(appData, "", " ")
- if err := ioutil.WriteFile(filePath, data, 0644); err != nil {
- fatal(err)
- os.Exit(1)
- }
-
- addEncoreRemote(root, appID)
- fmt.Println("Successfully linked app!")
-}
-
-// gitUserConfigured reports whether the user has configured
-// user.name and user.email in git.
-func gitUserConfigured() (bool, error) {
- for _, s := range []string{"user.name", "user.email"} {
- out, err := exec.Command("git", "config", s).CombinedOutput()
- if err != nil {
- return false, err
- } else if len(bytes.TrimSpace(out)) == 0 {
- return false, nil
- }
- }
- return true, nil
-}
diff --git a/cli/cmd/encore/app/app.go b/cli/cmd/encore/app/app.go
new file mode 100644
index 0000000000..cd4969445b
--- /dev/null
+++ b/cli/cmd/encore/app/app.go
@@ -0,0 +1,23 @@
+package app
+
+import (
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/root"
+)
+
+// These can be overwritten using
+// `go build -ldflags "-X encr.dev/cli/cmd/encore/app.defaultGitRemoteName=encore"`.
+var (
+ defaultGitRemoteName = "encore"
+ defaultGitRemoteURL = "encore://"
+)
+
+var appCmd = &cobra.Command{
+ Use: "app",
+ Short: "Commands to create and link Encore apps",
+}
+
+func init() {
+ root.Cmd.AddCommand(appCmd)
+}
diff --git a/cli/cmd/encore/app/clone.go b/cli/cmd/encore/app/clone.go
new file mode 100644
index 0000000000..ceef94b513
--- /dev/null
+++ b/cli/cmd/encore/app/clone.go
@@ -0,0 +1,42 @@
+package app
+
+import (
+ "os"
+ "os/exec"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+)
+
+var cloneAppCmd = &cobra.Command{
+ Use: "clone [app-id] [directory]",
+ Short: "Clone an Encore app to your computer",
+ Args: cobra.MinimumNArgs(1),
+
+ DisableFlagsInUseLine: true,
+ Run: func(c *cobra.Command, args []string) {
+ cmdArgs := append([]string{"clone", "--origin", defaultGitRemoteName, defaultGitRemoteURL + args[0]}, args[1:]...)
+ cmd := exec.Command("git", cmdArgs...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ os.Exit(1)
+ }
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ switch len(args) {
+ case 0:
+ return cmdutil.AutoCompleteAppSlug(cmd, args, toComplete)
+ case 1:
+ return nil, cobra.ShellCompDirectiveFilterDirs
+ default:
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ },
+}
+
+func init() {
+ appCmd.AddCommand(cloneAppCmd)
+}
diff --git a/cli/cmd/encore/app/create.go b/cli/cmd/encore/app/create.go
new file mode 100644
index 0000000000..a38a31aba7
--- /dev/null
+++ b/cli/cmd/encore/app/create.go
@@ -0,0 +1,696 @@
+package app
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/briandowns/spinner"
+ "github.com/cockroachdb/errors"
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+ "github.com/tailscale/hujson"
+
+ "encr.dev/cli/cmd/encore/auth"
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/cli/internal/telemetry"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/github"
+ "encr.dev/pkg/xos"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var (
+ createAppTemplate string
+ createAppOnPlatform bool
+)
+
+var createAppCmd = &cobra.Command{
+ Use: "create [name]",
+ Short: "Create a new Encore app",
+ Args: cobra.MaximumNArgs(1),
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ name := ""
+ if len(args) > 0 {
+ name = args[0]
+ }
+ if err := createApp(context.Background(), name, createAppTemplate); err != nil {
+ cmdutil.Fatal(err)
+ }
+ },
+}
+
+func init() {
+ appCmd.AddCommand(createAppCmd)
+ createAppCmd.Flags().BoolVar(&createAppOnPlatform, "platform", true, "whether to create the app with the Encore Platform")
+ createAppCmd.Flags().StringVar(&createAppTemplate, "example", "", "URL to example code to use.")
+}
+
+func promptAccountCreation() {
+ cyan := color.New(color.FgCyan)
+ red := color.New(color.FgRed)
+ // Prompt the user for creating an account if they're not logged in.
+ if _, err := conf.CurrentUser(); errors.Is(err, fs.ErrNotExist) && createAppOnPlatform {
+ PromptLoop:
+ for {
+ _, _ = cyan.Fprint(os.Stderr, "Log in / Sign up for a free Encore Cloud account to enable automated cloud deployments? (Y/n): ")
+ var input string
+ _, _ = fmt.Scanln(&input)
+ input = strings.TrimSpace(input)
+ switch input {
+ case "Y", "y", "yes", "":
+ telemetry.Send("app.create.account", map[string]any{"response": true})
+ if err := auth.DoLogin(auth.AutoFlow); err != nil {
+ cmdutil.Fatal(err)
+ }
+ case "N", "n", "no":
+ telemetry.Send("app.create.account", map[string]any{"response": false})
+ // Continue without creating an account.
+ case "q", "quit", "exit":
+ os.Exit(1)
+ default:
+ // Try again.
+ _, _ = red.Fprintln(os.Stderr, "Unexpected answer, please enter 'y' or 'n'.")
+ continue PromptLoop
+ }
+ break
+ }
+ }
+}
+
+func promptRunApp() bool {
+ cyan := color.New(color.FgCyan)
+ red := color.New(color.FgRed)
+ for {
+ _, _ = cyan.Fprint(os.Stderr, "Run your app now? (Y/n): ")
+ var input string
+ _, _ = fmt.Scanln(&input)
+ input = strings.TrimSpace(input)
+ switch input {
+ case "Y", "y", "yes", "":
+ telemetry.Send("app.create.run", map[string]any{"response": true})
+ return true
+ case "N", "n", "no":
+ telemetry.Send("app.create.run", map[string]any{"response": false})
+ return false
+ case "q", "quit", "exit":
+ telemetry.Send("app.create.run", map[string]any{"response": false})
+ return false
+ default:
+ // Try again.
+ _, _ = red.Fprintln(os.Stderr, "Unexpected answer, please enter 'y' or 'n'.")
+ }
+ }
+}
+
+// createApp is the implementation of the "encore app create" command.
+func createApp(ctx context.Context, name, template string) (err error) {
+ var lang language
+ defer func() {
+ // We need to send the telemetry synchronously to ensure it's sent before the command exits.
+ telemetry.SendSync("app.create", map[string]any{
+ "template": template,
+ "lang": lang,
+ "error": err != nil,
+ })
+ }()
+ cyan := color.New(color.FgCyan)
+ green := color.New(color.FgGreen)
+
+ promptAccountCreation()
+
+ if name == "" || template == "" {
+ name, template, lang = selectTemplate(name, template, false)
+ }
+ // Treat the special name "empty" as the empty app template
+ // (the rest of the code assumes that's the empty string).
+ if template == "empty" {
+ template = ""
+ }
+
+ if err := validateName(name); err != nil {
+ return err
+ } else if _, err := os.Stat(name); err == nil {
+ return fmt.Errorf("directory %s already exists", name)
+ }
+
+ // Parse template information, if provided.
+ var ex *github.Tree
+ if template != "" {
+ var err error
+ ex, err = parseTemplate(ctx, template)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := os.Mkdir(name, 0755); err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ // Clean up the directory we just created in case of an error.
+ _ = os.RemoveAll(name)
+ }
+ }()
+
+ if ex != nil {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = fmt.Sprintf("Downloading template %s ", ex.Name())
+ s.Start()
+ err := github.ExtractTree(ctx, ex, name)
+ s.Stop()
+ fmt.Println()
+
+ if err != nil {
+ return fmt.Errorf("failed to download template %s: %v", ex.Name(), err)
+ }
+ gray := color.New(color.Faint)
+ _, _ = gray.Printf("Downloaded template %s.\n", ex.Name())
+ } else {
+ // Set up files that we need when we don't have an example
+ if err := xos.WriteFile(filepath.Join(name, ".gitignore"), []byte("/.encore\n"), 0644); err != nil {
+ cmdutil.Fatal(err)
+ }
+ encoreModData := []byte("module encore.app\n")
+ if err := xos.WriteFile(filepath.Join(name, "go.mod"), encoreModData, 0644); err != nil {
+ cmdutil.Fatal(err)
+ }
+ }
+
+ _, err = conf.CurrentUser()
+ loggedIn := err == nil
+
+ exCfg, err := parseExampleConfig(name)
+ if err != nil {
+ return fmt.Errorf("failed to parse example config: %v", err)
+ }
+
+ // Delete the example config file.
+ _ = os.Remove(exampleJSONPath(name))
+
+ var app *platform.App
+ if loggedIn && createAppOnPlatform {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = "Creating app on encore.dev "
+ s.Start()
+ app, err = createAppOnServer(name, exCfg)
+ s.Stop()
+ if err != nil {
+ return fmt.Errorf("creating app on encore.dev: %v", err)
+ }
+ }
+
+ appRootRelpath := filepath.FromSlash(exCfg.EncoreAppPath)
+ encoreAppPath := filepath.Join(name, appRootRelpath, "encore.app")
+ appData, err := os.ReadFile(encoreAppPath)
+ if err != nil {
+ appData, err = []byte("{}"), nil
+ }
+
+ if app != nil {
+ appData, err = setEncoreAppID(appData, app.Slug, []string{})
+ } else {
+ appData, err = setEncoreAppID(appData, "", []string{
+ "The app is not currently linked to the encore.dev platform.",
+ `Use "encore app link" to link it.`,
+ })
+ }
+ if err != nil {
+ return errors.Wrap(err, "write encore.app file")
+ }
+ if err := xos.WriteFile(encoreAppPath, appData, 0644); err != nil {
+ return errors.Wrap(err, "write encore.app file")
+ }
+
+ // Update to latest encore.dev release
+ if _, err := os.Stat(filepath.Join(name, appRootRelpath, "go.mod")); err == nil {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = "Running go get encore.dev@latest"
+ s.Start()
+ if err := gogetEncore(filepath.Join(name, appRootRelpath)); err != nil {
+ s.FinalMSG = fmt.Sprintf("failed, skipping: %v", err.Error())
+ }
+ s.Stop()
+ } else if _, err := os.Stat(filepath.Join(name, appRootRelpath, "package.json")); err == nil {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = "Running npm install encore.dev@latest"
+ s.Start()
+ if err := npmInstallEncore(filepath.Join(name, appRootRelpath)); err != nil {
+ s.FinalMSG = fmt.Sprintf("failed, skipping: %v", err.Error())
+ }
+ s.Stop()
+ }
+
+ // Rewrite any existence of ENCORE_APP_ID to the allocated app id.
+ if app != nil {
+ if err := rewritePlaceholders(name, app); err != nil {
+ red := color.New(color.FgRed)
+ _, _ = red.Printf("Failed rewriting source code placeholders, skipping: %v\n", err)
+ }
+ }
+
+ if err := initGitRepo(name, app); err != nil {
+ return err
+ }
+
+ // Try to generate wrappers. Don't error out if it fails for some reason,
+ // it's a nice-to-have to avoid IDEs thinking there are compile errors before 'encore run' runs.
+ _ = generateWrappers(filepath.Join(name, appRootRelpath))
+
+ // Create the app on the daemon.
+ appRoot, err := filepath.Abs(filepath.Join(name, appRootRelpath))
+ if err != nil {
+ cmdutil.Fatalf("failed to get absolute path: %v", err)
+ }
+ daemon := cmdutil.ConnectDaemon(ctx)
+ _, err = daemon.CreateApp(ctx, &daemonpb.CreateAppRequest{
+ AppRoot: appRoot,
+ Tutorial: exCfg.Tutorial,
+ Template: template,
+ })
+ if err != nil {
+ color.Red("Failed to create app on daemon: %s\n", err)
+ }
+ cmdutil.ClearTerminalExceptFirstNLines(0)
+ _, _ = green.Printf("Successfully created app %s!\n", name)
+ if app != nil {
+ cyanf := cyan.SprintfFunc()
+ fmt.Println()
+ fmt.Printf("App ID: %s\n", cyanf(app.Slug))
+ fmt.Printf("Web URL: %s%s", cyanf("https://app.encore.cloud/"+app.Slug), cmdutil.Newline)
+ fmt.Printf("App Root: %s\n", cyanf(appRoot))
+ fmt.Println()
+ }
+ greenBoldF := green.Add(color.Bold).SprintfFunc()
+ fmt.Printf("Run your app with: %s\n", greenBoldF("cd %s && encore run", filepath.Join(name, appRootRelpath)))
+ fmt.Println()
+ if promptRunApp() {
+ cmdutil.ClearTerminalExceptFirstNLines(0)
+ stream, err := daemon.Run(ctx, &daemonpb.RunRequest{
+ AppRoot: appRoot,
+ Watch: true,
+ WorkingDir: ".",
+ Environ: os.Environ(),
+ ListenAddr: "127.0.0.1:4000",
+ Browser: daemonpb.RunRequest_BROWSER_ALWAYS,
+ })
+ if err != nil {
+ cmdutil.Fatalf("failed to run app: %v", err)
+ }
+ converter := cmdutil.ConvertJSONLogs(cmdutil.Colorize(true))
+ _ = cmdutil.StreamCommandOutput(stream, converter)
+ return nil
+ }
+ cmdutil.ClearTerminalExceptFirstNLines(0)
+ fmt.Print("Useful commands:\n\n")
+
+ _, _ = cyan.Printf(" encore run\n")
+ fmt.Print(" Run your app locally\n\n")
+
+ if detectLang(name) == languageGo {
+ _, _ = cyan.Printf(" encore test ./...\n")
+ } else {
+ _, _ = cyan.Printf(" encore test\n")
+ }
+ fmt.Print(" Run tests\n\n")
+
+ if app != nil {
+ _, _ = cyan.Printf(" git push encore\n")
+ fmt.Print(" Deploys your app\n\n")
+ }
+
+ fmt.Printf("Get started now: %s\n", greenBoldF("cd %s && encore run", filepath.Join(name, appRootRelpath)))
+ return nil
+}
+
+// detectLang attempts to detect the application language for an Encore application
+// situated at appRoot.
+func detectLang(appRoot string) language {
+ if _, err := os.Stat(filepath.Join(appRoot, "go.mod")); err == nil {
+ return languageGo
+ } else if _, err := os.Stat(filepath.Join(appRoot, "package.json")); err == nil {
+ return languageTS
+ }
+ return languageGo
+}
+
+func validateName(name string) error {
+ ln := len(name)
+ if ln == 0 {
+ return fmt.Errorf("name must not be empty")
+ } else if ln > 50 {
+ return fmt.Errorf("name too long (max 50 chars)")
+ }
+
+ for i, s := range name {
+ // Outside of [a-z], [0-9] and != '-'?
+ if !((s >= 'a' && s <= 'z') || (s >= '0' && s <= '9') || s == '-') {
+ return fmt.Errorf("name must only contain lowercase letters, digits, or dashes")
+ } else if s == '-' {
+ if i == 0 {
+ return fmt.Errorf("name cannot start with a dash")
+ } else if (i + 1) == ln {
+ return fmt.Errorf("name cannot end with a dash")
+ } else if name[i-1] == '-' {
+ return fmt.Errorf("name cannot contain repeated dashes")
+ }
+ }
+ }
+ return nil
+}
+
+func gogetEncore(dir string) error {
+ var goBinPath string
+
+ // Prefer the 'go' binary from the Encore GOROOT if available.
+ if goroot, ok := env.OptEncoreGoRoot().Get(); ok {
+ goBinPath = filepath.Join(goroot, "bin", "go")
+ } else {
+ // Otherwise fall back to just "go", so that exec.Command
+ // does a path lookup.
+ goBinPath = "go"
+ }
+
+ // Use the 'go' binary from the Encore GOROOT in case the user
+ // does not have Go installed separately from Encore.
+ // nosemgrep go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.Command(goBinPath, "get", "encore.dev@latest")
+ cmd.Dir = dir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return errors.Newf("go get failed: %v: %s", err, out)
+ }
+ return nil
+}
+
+func npmInstallEncore(dir string) error {
+ args := []string{"install"}
+ if version.Channel == version.DevBuild {
+ args = append(args, filepath.Join(env.EncoreRuntimesPath(), "js", "encore.dev"))
+ } else {
+ args = append(args, fmt.Sprintf("encore.dev@%s", strings.TrimPrefix(version.Version, "v")))
+ }
+
+ // First install the 'encore.dev' package.
+ cmd := exec.Command("npm", args...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ err = fmt.Errorf("installing encore.dev package failed: %v: %s", err, out)
+ }
+
+ // Then run 'npm install'.
+ cmd = exec.Command("npm", "install")
+ cmd.Dir = dir
+ if out2, err2 := cmd.CombinedOutput(); err2 != nil && err == nil {
+ err = fmt.Errorf("'npm install' failed: %v: %s", err2, out2)
+ }
+
+ return err
+}
+
+func createAppOnServer(name string, cfg exampleConfig) (*platform.App, error) {
+ if _, err := conf.CurrentUser(); err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ params := &platform.CreateAppParams{
+ Name: name,
+ InitialSecrets: cfg.InitialSecrets,
+ AppRootDir: cfg.EncoreAppPath,
+ }
+ return platform.CreateApp(ctx, params)
+}
+
+func parseTemplate(ctx context.Context, tmpl string) (*github.Tree, error) {
+ // If the template does not contain a colon or a dot, it's definitely
+ // not a github.com URL. Assume it's a simple template name.
+ if !strings.Contains(tmpl, ":") && !strings.Contains(tmpl, ".") {
+ tmpl = "https://github.com/encoredev/examples/tree/main/" + tmpl
+ }
+ return github.ParseTree(ctx, tmpl)
+}
+
+// initGitRepo initializes the git repo.
+// If app is not nil, it configures the repo to push to the given app.
+// If git does not exist, it reports an error matching exec.ErrNotFound.
+func initGitRepo(path string, app *platform.App) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ee, ok := e.(error); ok {
+ err = ee
+ } else {
+ panic(e)
+ }
+ }
+ }()
+
+ git := func(args ...string) []byte {
+ cmd := exec.Command("git", args...)
+ cmd.Dir = path
+ out, err := cmd.CombinedOutput()
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
+ panic(fmt.Errorf("git %s: %s (%w)", strings.Join(args, " "), out, err))
+ }
+ return out
+ }
+
+ // Initialize git repo
+ git("init")
+ if app != nil && app.MainBranch != nil {
+ git("checkout", "-b", *app.MainBranch)
+ }
+ git("config", "--local", "push.default", "current")
+ git("add", "-A")
+
+ cmd := exec.Command("git", "commit", "-m", "Initial commit")
+ cmd.Dir = path
+ // Configure the committer if the user hasn't done it themselves yet.
+ if ok, _ := gitUserConfigured(); !ok {
+ cmd.Env = append(os.Environ(),
+ "GIT_AUTHOR_NAME=Encore",
+ "GIT_AUTHOR_EMAIL=git-bot@encore.dev",
+ "GIT_COMMITTER_NAME=Encore",
+ "GIT_COMMITTER_EMAIL=git-bot@encore.dev",
+ )
+ }
+ if out, err := cmd.CombinedOutput(); err != nil && !errors.Is(err, exec.ErrNotFound) {
+ return fmt.Errorf("create initial commit repository: %s (%v)", out, err)
+ }
+
+ if app != nil {
+ git("remote", "add", defaultGitRemoteName, defaultGitRemoteURL+app.Slug)
+ }
+
+ return nil
+}
+
+func addEncoreRemote(root, appID string) {
+ // Determine if there are any remotes
+ cmd := exec.Command("git", "remote")
+ cmd.Dir = root
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return
+ }
+ out = bytes.TrimSpace(out)
+ if len(out) == 0 {
+ cmd = exec.Command("git", "remote", "add", defaultGitRemoteName, defaultGitRemoteURL+appID)
+ cmd.Dir = root
+ if err := cmd.Run(); err == nil {
+ fmt.Println("Configured git remote 'encore' to push/pull with Encore.")
+ }
+ }
+}
+
+// gitUserConfigured reports whether the user has configured
+// user.name and user.email in git.
+func gitUserConfigured() (bool, error) {
+ for _, s := range []string{"user.name", "user.email"} {
+ out, err := exec.Command("git", "config", s).CombinedOutput()
+ if err != nil {
+ return false, err
+ } else if len(bytes.TrimSpace(out)) == 0 {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// rewritePlaceholders recursively rewrites all files within basePath
+// to replace placeholders with the actual values for this particular app.
+func rewritePlaceholders(basePath string, app *platform.App) error {
+ var first error
+ err := filepath.WalkDir(basePath, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ if err := rewritePlaceholder(path, info, app); err != nil {
+ if first == nil {
+ first = err
+ }
+ }
+ return nil
+ })
+ if err == nil {
+ err = first
+ }
+ return err
+}
+
+// rewritePlaceholder rewrites a file to replace placeholders with the
+// actual values for this particular app. If the file contains none of
+// the placeholders, this is a no-op.
+func rewritePlaceholder(path string, info fs.DirEntry, app *platform.App) error {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ placeholders := []string{
+ "{{ENCORE_APP_ID}}", app.Slug,
+ }
+
+ var replaced bool
+ for i := 0; i < len(placeholders); i += 2 {
+ placeholder := []byte(placeholders[i])
+ target := []byte(placeholders[i+1])
+ if bytes.Contains(data, placeholder) {
+ data = bytes.ReplaceAll(data, placeholder, target)
+ replaced = true
+ }
+ }
+
+ if replaced {
+ return xos.WriteFile(path, data, info.Type().Perm())
+ }
+ return nil
+}
+
+// exampleConfig is the optional configuration file for example apps.
+type exampleConfig struct {
+ // Relative path to the directory where the `encore.app` should be located.
+ // Defaults to ".".
+ EncoreAppPath string `json:"encore_app_path"`
+
+ InitialSecrets map[string]string `json:"initial_secrets"`
+ Tutorial bool `json:"tutorial"`
+}
+
+func parseExampleConfig(repoPath string) (cfg exampleConfig, err error) {
+ baseConfig := exampleConfig{
+ EncoreAppPath: ".",
+ }
+ data, err := os.ReadFile(exampleJSONPath(repoPath))
+ if errors.Is(err, fs.ErrNotExist) {
+ return baseConfig, nil
+ } else if err != nil {
+ return baseConfig, err
+ }
+
+ data, err = hujson.Standardize(data)
+ if err != nil {
+ return baseConfig, err
+ } else if err := json.Unmarshal(data, &cfg); err != nil {
+ return baseConfig, err
+ }
+
+ if cfg.EncoreAppPath == "" {
+ cfg.EncoreAppPath = "."
+ }
+ if !filepath.IsLocal(cfg.EncoreAppPath) {
+ return baseConfig, errors.New("encore_app_path must be a local path")
+ }
+ return cfg, nil
+}
+
+func exampleJSONPath(repoPath string) string {
+ return filepath.Join(repoPath, "example-initial-setup.json")
+}
+
+// setEncoreAppID rewrites the encore.app file to replace the app id, preserving comments.
+// It optionally adds comment lines before the "id" field if commentLines is not nil.
+func setEncoreAppID(data []byte, id string, commentLines []string) ([]byte, error) {
+ if len(data) == 0 {
+ data = []byte("{}")
+ }
+
+ root, err := hujson.Parse(data)
+ if err != nil {
+ return data, errors.Wrap(err, "parse encore.app")
+ }
+ obj, ok := root.Value.(*hujson.Object)
+ if !ok {
+ return data, errors.New("invalid encore.app format: not a json object")
+ }
+
+ var buf bytes.Buffer
+ for i, ln := range commentLines {
+ if i == 0 {
+ fmt.Fprintf(&buf, "\n")
+ }
+ fmt.Fprintf(&buf, "\t// %s\n", strings.TrimSpace(ln))
+ }
+ extra := hujson.Extra(buf.Bytes())
+ jsonValue, _ := json.Marshal(id)
+ value := hujson.Value{
+ Value: hujson.Literal(jsonValue),
+ }
+
+ found := false
+ for i := range obj.Members {
+ m := &obj.Members[i]
+ if lit, ok := m.Name.Value.(hujson.Literal); ok && lit.String() == "id" {
+ if commentLines != nil {
+ m.Name.BeforeExtra = extra
+ }
+ m.Value = value
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ obj.Members = append([]hujson.ObjectMember{{
+ Name: hujson.Value{
+ BeforeExtra: extra,
+ Value: hujson.Literal(`"id"`),
+ },
+ Value: value,
+ }}, obj.Members...)
+ }
+
+ root.Format()
+ return root.Pack(), nil
+}
+
+// generateWrappers runs 'encore gen wrappers' in the given directory.
+func generateWrappers(dir string) error {
+ // Use this executable if we can.
+ exe, err := os.Executable()
+ if err != nil {
+ exe = "encore"
+ }
+ // nosemgrep go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.Command(exe, "gen", "wrappers")
+ cmd.Dir = dir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("encore gen wrappers failed: %v: %s", err, out)
+ }
+ return nil
+}
diff --git a/cli/cmd/encore/app/create_form.go b/cli/cmd/encore/app/create_form.go
new file mode 100644
index 0000000000..2cc5667acd
--- /dev/null
+++ b/cli/cmd/encore/app/create_form.go
@@ -0,0 +1,725 @@
+package app
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/charmbracelet/bubbles/list"
+ "github.com/charmbracelet/bubbles/spinner"
+ "github.com/charmbracelet/bubbles/textinput"
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+ "github.com/tailscale/hujson"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+)
+
+const (
+ codeBlue = "#6D89FF"
+ codePurple = "#A36C8C"
+ codeGreen = "#B3D77E"
+ validationFail = "#CB1010"
+)
+
+var (
+ inputStyle = lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Dark: codeBlue, Light: codeBlue})
+ descStyle = lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Dark: codeGreen, Light: codePurple})
+ docStyle = lipgloss.NewStyle().Padding(0, 2, 0, 2)
+ errorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color(validationFail))
+ successStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#00C200"))
+)
+
+type templateItem struct {
+ ItemTitle string `json:"title"`
+ Desc string `json:"desc"`
+ Template string `json:"template"`
+ Lang language `json:"lang"`
+}
+
+func (i templateItem) Title() string { return i.ItemTitle }
+func (i templateItem) Description() string { return i.Desc }
+func (i templateItem) FilterValue() string { return i.ItemTitle }
+
+type createFormModel struct {
+ step int // 0, 1, 2, 3
+
+ lang languageSelectModel
+ templates templateListModel
+ appName appNameModel
+
+ skipShowingTemplate bool
+
+ aborted bool
+}
+
+func (m createFormModel) Init() tea.Cmd {
+ return tea.Batch(
+ m.appName.Init(),
+ m.templates.Init(),
+ )
+}
+
+type languageSelectDone struct {
+ lang language
+}
+
+type languageSelectModel struct {
+ list list.Model
+}
+
+func (m languageSelectModel) Selected() language {
+ sel := m.list.SelectedItem()
+ if sel == nil {
+ return ""
+ }
+ return sel.(langItem).lang
+}
+
+func (m languageSelectModel) Update(msg tea.Msg) (languageSelectModel, tea.Cmd) {
+ var c tea.Cmd
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch msg.Type {
+ case tea.KeyEnter:
+ // Have we selected a language?
+ if idx := m.list.Index(); idx >= 0 {
+ return m, func() tea.Msg {
+ return languageSelectDone{
+ lang: m.Selected(),
+ }
+ }
+ }
+ }
+ }
+
+ m.list, c = m.list.Update(msg)
+ return m, c
+}
+
+func (m *languageSelectModel) SetSize(width, height int) {
+ m.list.SetWidth(width)
+ m.list.SetHeight(max(height-1, 0))
+}
+
+const checkmark = "✔"
+
+func (m languageSelectModel) View() string {
+ var b strings.Builder
+ b.WriteString(inputStyle.Render("Select language for your application"))
+ b.WriteString(descStyle.Render(" [Use arrows to move]"))
+ b.WriteString("\n")
+ b.WriteString(m.list.View())
+
+ return b.String()
+}
+
+type appNameDone struct{}
+
+type appNameModel struct {
+ predefined string
+ text textinput.Model
+ dirExists bool
+}
+
+func (m appNameModel) Init() tea.Cmd {
+ return tea.Batch(
+ textinput.Blink,
+ )
+}
+
+func (m appNameModel) Selected() string {
+ if m.predefined != "" {
+ return m.predefined
+ }
+ return m.text.Value()
+}
+
+func (m appNameModel) Update(msg tea.Msg) (appNameModel, tea.Cmd) {
+ var cmds []tea.Cmd
+ var c tea.Cmd
+ m.text, c = m.text.Update(msg)
+ cmds = append(cmds, c)
+
+ if val := m.text.Value(); val != "" {
+ _, err := os.Stat(val)
+ m.dirExists = err == nil
+ }
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch msg.Type {
+ case tea.KeyEnter:
+ if m.text.Value() != "" && !m.dirExists {
+ cmds = append(cmds, func() tea.Msg {
+ return appNameDone{}
+ })
+ }
+ }
+ }
+
+ return m, tea.Batch(cmds...)
+}
+
+func (m appNameModel) View() string {
+ var b strings.Builder
+ if m.text.Focused() {
+ b.WriteString(inputStyle.Render("App Name"))
+ b.WriteString(descStyle.Render(" [Use only lowercase letters, digits, and dashes]"))
+ b.WriteByte('\n')
+ b.WriteString(m.text.View())
+ if m.dirExists {
+ b.WriteString(errorStyle.Render(" error: dir already exists"))
+ }
+ } else {
+ fmt.Fprintf(&b, "%s App Name: %s", checkmark, m.text.Value())
+ }
+ b.WriteByte('\n')
+ return b.String()
+}
+
+type templateListModel struct {
+ predefined string
+ filter language
+
+ all []templateItem
+ list list.Model
+ loading spinner.Model
+}
+
+func (m templateListModel) Init() tea.Cmd {
+ return tea.Batch(
+ loadTemplates,
+ m.loading.Tick,
+ )
+}
+
+func (m *templateListModel) SetSize(width, height int) {
+ m.list.SetWidth(width)
+ m.list.SetHeight(max(height-1, 0))
+}
+
+type templateSelectDone struct{}
+
+func (m templateListModel) Update(msg tea.Msg) (templateListModel, tea.Cmd) {
+ var cmds []tea.Cmd
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch msg.Type {
+ case tea.KeyEnter:
+ // Have we selected a language?
+ if idx := m.list.Index(); idx >= 0 {
+ return m, func() tea.Msg { return templateSelectDone{} }
+ }
+ }
+
+ case spinner.TickMsg:
+ m.loading, _ = m.loading.Update(msg)
+
+ case loadedTemplates:
+ m.all = msg
+ m.refreshFilter()
+ newList, c := m.list.Update(msg)
+ m.list = newList
+ cmds = append(cmds, c)
+ }
+
+ newList, c := m.list.Update(msg)
+ m.list = newList
+ cmds = append(cmds, c)
+
+ return m, tea.Batch(cmds...)
+}
+
+func (m *templateListModel) UpdateFilter(lang language) {
+ m.filter = lang
+ m.refreshFilter()
+}
+
+func (m *templateListModel) refreshFilter() {
+ var listItems []list.Item
+ for _, it := range m.all {
+ if it.Lang == m.filter {
+ listItems = append(listItems, it)
+ }
+ }
+ m.list.SetItems(listItems)
+}
+
+func (m templateListModel) View() string {
+ var b strings.Builder
+ b.WriteString(inputStyle.Render("Template"))
+ b.WriteString(descStyle.Render(" [Use arrows to move]"))
+ b.WriteByte('\n')
+ b.WriteString(m.list.View())
+
+ return b.String()
+}
+
+func (m templateListModel) Selected() string {
+ if m.predefined != "" {
+ return m.predefined
+ }
+ idx := m.list.Index()
+ if idx < 0 {
+ return ""
+ }
+ return m.list.Items()[idx].FilterValue()
+}
+
+func (m createFormModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
+ var (
+ cmds []tea.Cmd
+ c tea.Cmd
+ )
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch msg.Type {
+ case tea.KeyCtrlC, tea.KeyEsc, 'q':
+ m.aborted = true
+ return m, tea.Quit
+ }
+
+ switch m.step {
+ case 0:
+ m.lang, c = m.lang.Update(msg)
+ cmds = append(cmds, c)
+ case 1:
+ m.templates, c = m.templates.Update(msg)
+ cmds = append(cmds, c)
+ case 2:
+ m.appName, c = m.appName.Update(msg)
+ cmds = append(cmds, c)
+ }
+ return m, tea.Batch(cmds...)
+
+ case languageSelectDone:
+ m.step = 1
+ if m.skipShowingTemplate {
+ m.step = 2
+ }
+ m.templates.UpdateFilter(msg.lang)
+
+ case templateSelectDone:
+ if m.appName.predefined != "" {
+ // We're done.
+ m.step = 3
+ cmds = append(cmds, tea.Quit)
+ } else {
+ m.step = 2
+ }
+
+ case appNameDone:
+ cmds = append(cmds, tea.Quit)
+ m.step = 3
+
+ case tea.WindowSizeMsg:
+ m.SetSize(msg.Width, msg.Height)
+ return m, nil
+ }
+
+ // Update all submodels for other messages.
+ m.lang, c = m.lang.Update(msg)
+ cmds = append(cmds, c)
+ m.templates, c = m.templates.Update(msg)
+ cmds = append(cmds, c)
+ m.appName, c = m.appName.Update(msg)
+ cmds = append(cmds, c)
+
+ return m, tea.Batch(cmds...)
+}
+
+func (m *createFormModel) SetSize(width, height int) {
+ // Step 1
+ doneHeight := lipgloss.Height(m.doneView())
+ {
+ availHeight := height - doneHeight
+ m.lang.SetSize(width, availHeight)
+ }
+
+ // Step 2
+ {
+ availHeight := height - doneHeight
+ m.templates.SetSize(width, availHeight)
+ }
+}
+
+func (m createFormModel) doneView() string {
+ var b strings.Builder
+
+ renderDone := func(title, value string) {
+ b.WriteString(successStyle.Render(fmt.Sprintf("%s %s: ", checkmark, title)))
+ b.WriteString(value)
+ b.WriteByte('\n')
+ }
+
+ renderLangDone := func() {
+ renderDone("Language", m.lang.Selected().Display())
+ }
+
+ renderNameDone := func() {
+ renderDone("App Name", m.appName.Selected())
+ }
+
+ renderTemplateDone := func() {
+ renderDone("Template", m.templates.Selected())
+ }
+
+ if m.appName.predefined != "" {
+ renderNameDone()
+ }
+ if m.templates.predefined == "" && m.step > 0 {
+ renderLangDone()
+ }
+ if !m.skipShowingTemplate {
+ if m.templates.predefined != "" || m.step > 1 {
+ renderTemplateDone()
+ }
+ }
+ if m.appName.predefined == "" && m.step > 2 {
+ renderNameDone()
+ }
+
+ return b.String()
+}
+
+func (m createFormModel) View() string {
+ var b strings.Builder
+
+ doneView := m.doneView()
+
+ b.WriteString(doneView)
+ if doneView != "" {
+ b.WriteByte('\n')
+ }
+
+ if m.step == 0 {
+ b.WriteString(m.lang.View())
+ }
+
+ if m.step == 1 {
+ b.WriteString(m.templates.View())
+ }
+
+ if m.step == 2 {
+ b.WriteString(m.appName.View())
+ }
+
+ return docStyle.Render(b.String())
+}
+
+func (m templateListModel) templatesLoading() bool {
+ return len(m.list.Items()) == 0
+}
+
+func (m templateListModel) SelectedItem() (templateItem, bool) {
+ if m.predefined != "" {
+ return templateItem{}, false
+ }
+ idx := m.list.Index()
+ items := m.list.Items()
+ if idx >= 0 && len(items) > idx {
+ return items[idx].(templateItem), true
+ }
+ return templateItem{}, false
+}
+
+func selectTemplate(inputName, inputTemplate string, skipShowingTemplate bool) (appName, template string, selectedLang language) {
+ // If we have both name and template already, return them.
+ if inputName != "" && inputTemplate != "" {
+ return inputName, inputTemplate, ""
+ }
+
+ var lang languageSelectModel
+ {
+ ls := list.NewDefaultItemStyles()
+ ls.SelectedTitle = ls.SelectedTitle.Foreground(lipgloss.Color(codeBlue)).BorderForeground(lipgloss.Color(codeBlue))
+ ls.SelectedDesc = ls.SelectedDesc.Foreground(lipgloss.Color(codeBlue)).BorderForeground(lipgloss.Color(codeBlue))
+ del := list.NewDefaultDelegate()
+ del.Styles = ls
+ del.ShowDescription = false
+ del.SetSpacing(0)
+
+ items := []list.Item{
+ langItem{
+ lang: languageGo,
+ desc: "Build performant and scalable backends with Go",
+ },
+ langItem{
+ lang: languageTS,
+ desc: "Build backend and full-stack applications with TypeScript",
+ },
+ }
+
+ ll := list.New(items, del, 0, 0)
+ ll.SetShowTitle(false)
+ ll.SetShowHelp(false)
+ ll.SetShowPagination(true)
+ ll.SetShowFilter(false)
+ ll.SetFilteringEnabled(false)
+ ll.SetShowStatusBar(false)
+ lang = languageSelectModel{
+ list: ll,
+ }
+ lang.SetSize(0, 20)
+ }
+
+ var templates templateListModel
+ {
+ ls := list.NewDefaultItemStyles()
+ ls.SelectedTitle = ls.SelectedTitle.Foreground(lipgloss.Color(codeBlue)).BorderForeground(lipgloss.Color(codeBlue))
+ ls.SelectedDesc = ls.SelectedDesc.Foreground(lipgloss.Color(codeBlue)).BorderForeground(lipgloss.Color(codeBlue))
+ del := list.NewDefaultDelegate()
+ del.Styles = ls
+
+ ll := list.New(nil, del, 0, 20)
+ ll.SetShowTitle(false)
+ ll.SetShowHelp(false)
+ ll.SetShowPagination(true)
+ ll.SetShowFilter(false)
+ ll.SetFilteringEnabled(false)
+ ll.SetShowStatusBar(false)
+
+ sp := spinner.New()
+ sp.Spinner = spinner.Dot
+ sp.Style = inputStyle.Copy().Inline(true)
+ templates = templateListModel{
+ predefined: inputTemplate,
+ list: ll,
+ loading: sp,
+ }
+ }
+
+ var nameModel appNameModel
+ {
+ text := textinput.New()
+ text.Focus()
+ text.CharLimit = 20
+ text.Width = 30
+ text.Validate = incrementalValidateNameInput
+
+ nameModel = appNameModel{predefined: inputName, text: text}
+ }
+
+ m := createFormModel{
+ step: 0,
+ lang: lang,
+ templates: templates,
+ appName: nameModel,
+ skipShowingTemplate: skipShowingTemplate,
+ }
+
+ // If we have a name, start the list without any selection.
+ if m.appName.predefined != "" {
+ m.templates.list.Select(-1)
+ }
+ if m.templates.predefined != "" {
+ m.step = 2 // skip to app name selection
+ }
+
+ p := tea.NewProgram(m)
+
+ result, err := p.Run()
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+
+ // Validate the result.
+ res := result.(createFormModel)
+ if res.aborted {
+ os.Exit(1)
+ }
+
+ appName, template = inputName, inputTemplate
+
+ if appName == "" {
+ appName = res.appName.text.Value()
+ }
+
+ if template == "" {
+ sel, ok := res.templates.SelectedItem()
+ if !ok {
+ cmdutil.Fatal("no template selected")
+ }
+ template = sel.Template
+ }
+
+ return appName, template, res.lang.Selected()
+}
+
+type langItem struct {
+ lang language
+ desc string
+}
+
+func (i langItem) FilterValue() string {
+ return i.lang.Display()
+}
+func (i langItem) Title() string {
+ return i.FilterValue()
+}
+func (i langItem) Description() string { return "" }
+
+type language string
+
+const (
+ languageGo language = "go"
+ languageTS language = "ts"
+)
+
+func (lang language) Display() string {
+ switch lang {
+ case languageGo:
+ return "Go"
+ case languageTS:
+ return "TypeScript"
+ default:
+ return string(lang)
+ }
+}
+
+type loadedTemplates []templateItem
+
+var defaultTutorials = []templateItem{
+ {
+ ItemTitle: "Intro to Encore.ts",
+ Desc: "An interactive tutorial",
+ Template: "ts/introduction",
+ Lang: "ts",
+ },
+}
+
+var defaultTemplates = []templateItem{
+ {
+ ItemTitle: "Hello World",
+ Desc: "A simple REST API",
+ Template: "hello-world",
+ Lang: "go",
+ },
+ {
+ ItemTitle: "Hello World",
+ Desc: "A simple REST API",
+ Template: "ts/hello-world",
+ Lang: "ts",
+ },
+ {
+ ItemTitle: "Uptime Monitor",
+ Desc: "Microservices, SQL Databases, Pub/Sub, Cron Jobs",
+ Template: "uptime",
+ Lang: "go",
+ },
+ {
+ ItemTitle: "Uptime Monitor",
+ Desc: "Microservices, SQL Databases, Pub/Sub, Cron Jobs",
+ Template: "ts/uptime",
+ Lang: "ts",
+ },
+ {
+ ItemTitle: "GraphQL",
+ Desc: "GraphQL API, Microservices, SQL Database",
+ Template: "graphql",
+ Lang: "go",
+ },
+ {
+ ItemTitle: "URL Shortener",
+ Desc: "REST API, SQL Database",
+ Template: "url-shortener",
+ Lang: "go",
+ },
+ {
+ ItemTitle: "URL Shortener",
+ Desc: "REST API, SQL Database",
+ Template: "ts/url-shortener",
+ Lang: "ts",
+ },
+ {
+ ItemTitle: "SaaS Starter",
+ Desc: "Complete app with Clerk auth, Stripe billing, etc. (advanced)",
+ Template: "ts/saas-starter",
+ Lang: "ts",
+ },
+ {
+ ItemTitle: "Empty app",
+ Desc: "Start from scratch (experienced users only)",
+ Template: "",
+ Lang: "go",
+ },
+ {
+ ItemTitle: "Empty app",
+ Desc: "Start from scratch (experienced users only)",
+ Template: "ts/empty",
+ Lang: "ts",
+ },
+}
+
+func fetchTemplates(url string, defaults []templateItem) []templateItem {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if req, err := http.NewRequestWithContext(ctx, "GET", url, nil); err == nil {
+ if resp, err := http.DefaultClient.Do(req); err == nil {
+ if data, err := io.ReadAll(resp.Body); err == nil {
+ data, err = hujson.Standardize(data)
+ if err == nil {
+ var items []templateItem
+ if err := json.Unmarshal(data, &items); err == nil && len(items) > 0 {
+ return items
+ }
+ }
+ }
+ }
+ }
+ return defaults
+}
+
+func loadTemplates() tea.Msg {
+ var wg sync.WaitGroup
+ var templates, tutorials []templateItem
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ templates = fetchTemplates("https://raw.githubusercontent.com/encoredev/examples/main/cli-templates.json", defaultTemplates)
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ tutorials = fetchTemplates("https://raw.githubusercontent.com/encoredev/examples/main/cli-tutorials.json", defaultTutorials)
+ }()
+ wg.Wait()
+ return loadedTemplates(append(tutorials, templates...))
+}
+
+// incrementalValidateNameInput is like validateName but only
+// checks for valid/invalid characters. It can't check for
+// whether the last character is a dash, since if we treat that
+// as an error the user won't be able to enter dashes at all.
+func incrementalValidateNameInput(name string) error {
+ ln := len(name)
+ if ln == 0 {
+ return fmt.Errorf("name must not be empty")
+ } else if ln > 50 {
+ return fmt.Errorf("name too long (max 50 chars)")
+ }
+
+ for i, s := range name {
+ // Outside of [a-z], [0-9] and != '-'?
+ if !((s >= 'a' && s <= 'z') || (s >= '0' && s <= '9') || s == '-') {
+ return fmt.Errorf("name must only contain lowercase letters, digits, or dashes")
+ } else if s == '-' {
+ if i == 0 {
+ return fmt.Errorf("name cannot start with a dash")
+ } else if name[i-1] == '-' {
+ return fmt.Errorf("name cannot contain repeated dashes")
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/cli/cmd/encore/app/create_test.go b/cli/cmd/encore/app/create_test.go
new file mode 100644
index 0000000000..f260070627
--- /dev/null
+++ b/cli/cmd/encore/app/create_test.go
@@ -0,0 +1,79 @@
+package app
+
+import (
+ "fmt"
+ "testing"
+)
+
+func Test_setEncoreAppID(t *testing.T) {
+ tests := []struct {
+ data []byte
+ id string
+ commentLines []string
+ want string
+ }{
+ {
+ data: []byte(`{}`),
+ id: "foo",
+ commentLines: []string{"bar"},
+ want: `{
+ // bar
+ "id": "foo",
+}
+`,
+ },
+ {
+ data: []byte(``),
+ id: "foo",
+ commentLines: []string{"bar"},
+ want: `{
+ // bar
+ "id": "foo",
+}
+`,
+ },
+ {
+ data: []byte(`{
+ // foo
+ "id": "test",
+}`),
+ id: "foo",
+ commentLines: []string{"bar", "baz"},
+ want: `{
+ // bar
+ // baz
+ "id": "foo",
+}
+`,
+ },
+ {
+ data: []byte(`{
+ "some_other_field": true,
+ // foo
+ "id": "test",
+}`),
+ id: "foo",
+ commentLines: []string{"bar", "baz"},
+ want: `{
+ "some_other_field": true,
+ // bar
+ // baz
+ "id": "foo",
+}
+`,
+ },
+ }
+ for i, tt := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ got, err := setEncoreAppID(tt.data, tt.id, tt.commentLines)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gotStr := string(got)
+ if gotStr != tt.want {
+ t.Errorf("setEncoreAppID() = %q, want %q", gotStr, tt.want)
+ }
+ })
+ }
+}
diff --git a/cli/cmd/encore/app/initialize.go b/cli/cmd/encore/app/initialize.go
new file mode 100644
index 0000000000..7d9607ce9d
--- /dev/null
+++ b/cli/cmd/encore/app/initialize.go
@@ -0,0 +1,132 @@
+package app
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/briandowns/spinner"
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/internal/conf"
+ "encr.dev/pkg/xos"
+)
+
+const (
+ tsEncoreAppData = `{%s
+ "id": "%s",
+ "lang": "typescript",
+}
+`
+ goEncoreAppData = `{%s
+ "id": "%s",
+}
+`
+)
+
+// Create a new app from scratch: `encore app create`
+// Link an existing app to an existing repo: `encore app link `
+// Link an existing repo to a new app: `encore app init `
+
+func init() {
+ initAppCmd := &cobra.Command{
+ Use: "init [name]",
+ Short: "Create a new Encore app from an existing repository",
+ Args: cobra.MaximumNArgs(1),
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ var name string
+ if len(args) > 0 {
+ name = args[0]
+ }
+ if err := initializeApp(name); err != nil {
+ cmdutil.Fatal(err)
+ }
+ },
+ }
+
+ appCmd.AddCommand(initAppCmd)
+}
+
+func initializeApp(name string) error {
+ // Check if encore.app file exists
+ _, _, err := cmdutil.MaybeAppRoot()
+ if errors.Is(err, cmdutil.ErrNoEncoreApp) {
+ // expected
+ } else if err != nil {
+ cmdutil.Fatal(err)
+ } else if err == nil {
+ // There is already an app here or in a parent directory.
+ cmdutil.Fatal("an encore.app file already exists (here or in a parent directory)")
+ }
+
+ cyan := color.New(color.FgCyan)
+ promptAccountCreation()
+
+ name, _, lang := selectTemplate(name, "", true)
+
+ if err := validateName(name); err != nil {
+ return err
+ }
+
+ appSlug := ""
+ appSlugComments := ""
+ // Create the app on the server.
+ if _, err := conf.CurrentUser(); err == nil {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = "Creating app on encore.dev "
+ s.Start()
+
+ app, err := createAppOnServer(name, exampleConfig{})
+ s.Stop()
+ if err != nil {
+ return fmt.Errorf("creating app on encore.dev: %v", err)
+ }
+ appSlug = app.Slug
+ }
+
+ // Create the encore.app file
+ var encoreAppTemplate = goEncoreAppData
+ if lang == "ts" {
+ encoreAppTemplate = tsEncoreAppData
+ }
+ if appSlug == "" {
+ appSlugComments = strings.Join([]string{
+ "",
+ "The app is not currently linked to the encore.dev platform.",
+ `Use "encore app link" to link it.`,
+ }, "\n\t//")
+ }
+ encoreAppData := []byte(fmt.Sprintf(encoreAppTemplate, appSlugComments, appSlug))
+ if err := xos.WriteFile("encore.app", encoreAppData, 0644); err != nil {
+ return err
+ }
+
+ // Update to latest encore.dev release if this looks to be a go module.
+ if _, err := os.Stat("go.mod"); err == nil {
+ s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ s.Prefix = "Running go get encore.dev@latest"
+ s.Start()
+ if err := gogetEncore("."); err != nil {
+ s.FinalMSG = fmt.Sprintf("failed, skipping: %v", err.Error())
+ }
+ s.Stop()
+ }
+
+ green := color.New(color.FgGreen)
+ _, _ = green.Fprint(os.Stdout, "Successfully initialized application on Encore Cloud!\n")
+ if appSlug == "" {
+ _, _ = fmt.Fprintf(os.Stdout, "The app is not currently linked to the encore.dev platform.\n")
+ _, _ = fmt.Fprintf(os.Stdout, "Use \"encore app link\" to link it.\n")
+ return nil
+ }
+ _, _ = fmt.Fprintf(os.Stdout, "- App ID: %s\n", cyan.Sprint(appSlug))
+ _, _ = fmt.Fprintf(os.Stdout, "- Cloud Dashboard: %s\n\n", cyan.Sprintf("https://app.encore.cloud/%s", appSlug))
+
+ return nil
+}
diff --git a/cli/cmd/encore/app/link.go b/cli/cmd/encore/app/link.go
new file mode 100644
index 0000000000..0573e78d7d
--- /dev/null
+++ b/cli/cmd/encore/app/link.go
@@ -0,0 +1,148 @@
+package app
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/spf13/cobra"
+ "github.com/tailscale/hujson"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+ "encr.dev/pkg/xos"
+)
+
+var forceLink bool
+var linkAppCmd = &cobra.Command{
+ Use: "link [app-id]",
+ Short: "Link an Encore app with the server",
+ Args: cobra.MaximumNArgs(1),
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ var appID string
+ if len(args) > 0 {
+ appID = args[0]
+ }
+ linkApp(appID, forceLink)
+ },
+ ValidArgsFunction: cmdutil.AutoCompleteAppSlug,
+}
+
+func init() {
+ appCmd.AddCommand(linkAppCmd)
+ linkAppCmd.Flags().BoolVarP(&forceLink, "force", "f", false, "Force link even if the app is already linked.")
+}
+
+func linkApp(appID string, force bool) {
+ // Determine the app root.
+ root, _, err := cmdutil.MaybeAppRoot()
+ if errors.Is(err, cmdutil.ErrNoEncoreApp) {
+ root, err = os.Getwd()
+ }
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+
+ filePath := filepath.Join(root, "encore.app")
+ data, err := os.ReadFile(filePath)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ cmdutil.Fatal(err)
+ os.Exit(1)
+ }
+ if len(bytes.TrimSpace(data)) == 0 {
+ // Treat missing and empty files as an empty object.
+ data = []byte("{}")
+ }
+
+ val, err := hujson.Parse(data)
+ if err != nil {
+ cmdutil.Fatal("could not parse encore.app: ", err)
+ }
+
+ appData, ok := val.Value.(*hujson.Object)
+ if !ok {
+ cmdutil.Fatal("could not parse encore.app: expected JSON object")
+ }
+
+ // Find the "id" value, if any.
+ var idValue *hujson.Value
+ for i := 0; i < len(appData.Members); i++ {
+ kv := &appData.Members[i]
+ lit, ok := kv.Name.Value.(hujson.Literal)
+ if !ok || lit.String() != "id" {
+ continue
+ }
+ idValue = &kv.Value
+ }
+
+ if idValue != nil {
+ val, ok := idValue.Value.(hujson.Literal)
+ if ok && val.String() != "" && val.String() != appID && !force {
+ cmdutil.Fatal("the app is already linked.\n\nNote: to link to a different app, specify the --force flag.")
+ }
+ }
+
+ if appID == "" {
+ // The app is not linked. Prompt the user for an app ID.
+ fmt.Println("Make sure the app is created on app.encore.cloud, and then enter its ID to link it.")
+ fmt.Print("App ID: ")
+ if _, err := fmt.Scanln(&appID); err != nil {
+ cmdutil.Fatal(err)
+ } else if appID == "" {
+ cmdutil.Fatal("no app id given.")
+ }
+ }
+
+ if linked, err := validateAppSlug(appID); err != nil {
+ cmdutil.Fatal(err)
+ } else if !linked {
+ fmt.Fprintln(os.Stderr, "Error: that app does not exist, or you don't have access to it.")
+ os.Exit(1)
+ }
+
+ // Write it back to our data structure.
+ if idValue != nil {
+ idValue.Value = hujson.String(appID)
+ } else {
+ appData.Members = append(appData.Members, hujson.ObjectMember{
+ Name: hujson.Value{Value: hujson.String("id")},
+ Value: hujson.Value{Value: hujson.String(appID)},
+ })
+ }
+
+ val.Format()
+ if err := xos.WriteFile(filePath, val.Pack(), 0644); err != nil {
+ cmdutil.Fatal(err)
+ os.Exit(1)
+ }
+
+ addEncoreRemote(root, appID)
+ fmt.Println("Successfully linked app!")
+}
+
+func validateAppSlug(slug string) (ok bool, err error) {
+ if _, err := conf.CurrentUser(); errors.Is(err, fs.ErrNotExist) {
+ cmdutil.Fatal("not logged in. Run 'encore auth login' first.")
+ } else if err != nil {
+ return false, err
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if _, err := platform.GetApp(ctx, slug); err != nil {
+ var e platform.Error
+ if errors.As(err, &e) && e.HTTPCode == 404 {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
diff --git a/cli/cmd/encore/auth.go b/cli/cmd/encore/auth.go
deleted file mode 100644
index 7b5484bc8e..0000000000
--- a/cli/cmd/encore/auth.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package main
-
-import (
- "errors"
- "fmt"
- "os"
- "runtime"
- "time"
-
- "encr.dev/cli/internal/browser"
- "encr.dev/cli/internal/conf"
- "encr.dev/cli/internal/login"
- "github.com/briandowns/spinner"
- "github.com/spf13/cobra"
-)
-
-func init() {
- authCmd := &cobra.Command{
- Use: "auth",
- Short: "Commands to authenticate with Encore",
- }
-
- signupCmd := &cobra.Command{
- Use: "signup",
- Short: "Create a new Encore account",
- Run: func(cmd *cobra.Command, args []string) {
- doLogin()
- },
- }
-
- loginCmd := &cobra.Command{
- Use: "login",
- Short: "Log in to Encore",
- Run: func(cmd *cobra.Command, args []string) {
- if err := doLogin(); err != nil {
- fatal(err)
- }
- },
- }
-
- logoutCmd := &cobra.Command{
- Use: "logout",
- Short: "Logs out the currently logged in user",
- Run: func(cmd *cobra.Command, args []string) {
- doLogout()
- },
- }
-
- whoamiCmd := &cobra.Command{
- Use: "whoami",
- Short: "Show the current logged in user",
- Run: func(cmd *cobra.Command, args []string) {
- whoami()
- },
- }
-
- authCmd.AddCommand(signupCmd)
- authCmd.AddCommand(loginCmd)
- authCmd.AddCommand(logoutCmd)
- authCmd.AddCommand(whoamiCmd)
- rootCmd.AddCommand(authCmd)
-}
-
-func doLogin() error {
- flow, err := login.Begin()
- if err != nil {
- return err
- }
-
- browser.Open(flow.URL)
-
- // On Windows we need a proper \r\n newline to ensure the URL detection doesn't extend to the next line.
- // fmt.Fprintln and family prints just a simple \n, so don't use that.
- fmt.Fprint(os.Stdout, "Log in to Encore using your browser here: ", flow.URL, newline)
-
- s := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
- s.Prefix = "Waiting for login to complete "
- s.Start()
- defer s.Stop()
-
- select {
- case cfg := <-flow.LoginCh:
- if err := conf.Write(cfg); err != nil {
- return fmt.Errorf("write credentials: %v", err)
- }
- fmt.Fprintln(os.Stdout, "Successfully logged in!")
- return nil
- case <-time.After(10 * time.Minute):
- flow.Close()
- return fmt.Errorf("timed out")
- }
-}
-
-func doLogout() {
- if err := conf.Logout(); err != nil {
- fmt.Fprintln(os.Stderr, "could not logout:", err)
- os.Exit(1)
- }
- fmt.Fprintln(os.Stdout, "encore: logged out.")
-}
-
-func whoami() {
- cfg, err := conf.CurrentUser()
- if errors.Is(err, os.ErrNotExist) {
- fmt.Fprint(os.Stdout, "not logged in.", newline)
- } else if err != nil {
- fatal(err)
- } else {
- fmt.Fprintf(os.Stdout, "logged in as %s%s", cfg.Email, newline)
- }
-}
-
-var newline string
-
-func init() {
- switch runtime.GOOS {
- case "windows":
- newline = "\r\n"
- default:
- newline = "\n"
- }
-}
diff --git a/cli/cmd/encore/auth/auth.go b/cli/cmd/encore/auth/auth.go
new file mode 100644
index 0000000000..f4d06fd38c
--- /dev/null
+++ b/cli/cmd/encore/auth/auth.go
@@ -0,0 +1,150 @@
+package auth
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ "encr.dev/cli/internal/login"
+ "encr.dev/internal/conf"
+)
+
+var authKey string
+
+func init() {
+ authCmd := &cobra.Command{
+ Use: "auth",
+ Short: "Commands to authenticate with Encore",
+ }
+
+ signupCmd := &cobra.Command{
+ Use: "signup",
+ Short: "Create a new Encore account",
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ if err := DoLogin(DeviceAuth); err != nil {
+ cmdutil.Fatal(err)
+ }
+ },
+ }
+
+ loginCmd := &cobra.Command{
+ Use: "login [--auth-key=]",
+ Short: "Log in to Encore",
+
+ Run: func(cmd *cobra.Command, args []string) {
+ if authKey != "" {
+ if err := DoLoginWithAuthKey(); err != nil {
+ cmdutil.Fatal(err)
+ }
+ } else {
+ if err := DoLogin(DeviceAuth); err != nil {
+ cmdutil.Fatal(err)
+ }
+ }
+ },
+ }
+
+ logoutCmd := &cobra.Command{
+ Use: "logout",
+ Short: "Logs out the currently logged in user",
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ DoLogout()
+ },
+ }
+
+ whoamiCmd := &cobra.Command{
+ Use: "whoami",
+ Short: "Show the current logged in user",
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ Whoami()
+ },
+ }
+
+ authCmd.AddCommand(signupCmd)
+
+ authCmd.AddCommand(loginCmd)
+ loginCmd.Flags().StringVarP(&authKey, "auth-key", "k", "", "Auth Key to use for login")
+
+ authCmd.AddCommand(logoutCmd)
+ authCmd.AddCommand(whoamiCmd)
+ root.Cmd.AddCommand(authCmd)
+}
+
+type Flow int
+
+const (
+ AutoFlow Flow = iota
+ Interactive
+ DeviceAuth
+)
+
+func DoLogin(flow Flow) (err error) {
+ var fn func() (*conf.Config, error)
+ switch flow {
+ case Interactive:
+ fn = login.Interactive
+ case DeviceAuth:
+ fn = login.DeviceAuth
+ default:
+ fn = login.DecideFlow
+ }
+ cfg, err := fn()
+ if err != nil {
+ return err
+ }
+
+ if err := conf.Write(cfg); err != nil {
+ return fmt.Errorf("write credentials: %v", err)
+ }
+ fmt.Fprintln(os.Stdout, "Successfully logged in!")
+ return nil
+}
+
+func DoLogout() {
+ if err := conf.Logout(); err != nil {
+ fmt.Fprintln(os.Stderr, "could not logout:", err)
+ os.Exit(1)
+ }
+ // Stop running daemon to clear any cached credentials
+ cmdutil.StopDaemon()
+ fmt.Fprintln(os.Stdout, "encore: logged out.")
+}
+
+func DoLoginWithAuthKey() error {
+ cfg, err := login.WithAuthKey(authKey)
+ if err != nil {
+ return err
+ }
+ if err := conf.Write(cfg); err != nil {
+ return fmt.Errorf("write credentials: %v", err)
+ }
+ fmt.Fprintln(os.Stdout, "Successfully logged in!")
+ return nil
+}
+
+func Whoami() {
+ cfg, err := conf.CurrentUser()
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ fmt.Fprint(os.Stdout, "not logged in.", cmdutil.Newline)
+ return
+ }
+ cmdutil.Fatal(err)
+ }
+
+ if cfg.AppSlug != "" {
+ fmt.Fprintf(os.Stdout, "logged in as app %s%s", cfg.AppSlug, cmdutil.Newline)
+ } else {
+ fmt.Fprintf(os.Stdout, "logged in as %s%s", cfg.Email, cmdutil.Newline)
+ }
+}
diff --git a/cli/cmd/encore/bits/add.go b/cli/cmd/encore/bits/add.go
new file mode 100644
index 0000000000..dbad38efbf
--- /dev/null
+++ b/cli/cmd/encore/bits/add.go
@@ -0,0 +1,61 @@
+package bits
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/cockroachdb/errors"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/pkg/bits"
+)
+
+var addCmd = &cobra.Command{
+ Use: "add []",
+ Short: "Add an Encore Bit to your application",
+ Args: cobra.MinimumNArgs(1),
+
+ DisableFlagsInUseLine: true,
+ Run: func(c *cobra.Command, args []string) {
+ slug := args[0]
+ ctx := context.Background()
+ bit, err := bits.Get(ctx, slug)
+ if errors.Is(err, errBitNotFound) {
+ cmdutil.Fatalf("encore bit not found: %s", slug)
+ } else if err != nil {
+ cmdutil.Fatalf("could not lookup encore bit: %v", err)
+ }
+
+ workdir, err := os.MkdirTemp("", "encore-bit")
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ defer os.RemoveAll(workdir)
+
+ //prefix := args[0]
+ //if len(args) > 1 {
+ // prefix = args[1]
+ //}
+
+ fmt.Fprintf(os.Stderr, "Downloading Encore Bit: %s\n", bit.Title)
+ if err := bits.Extract(ctx, bit, workdir); err != nil {
+ cmdutil.Fatalf("download failed: %v", err)
+ }
+
+ meta, err := bits.Describe(ctx, workdir)
+ if err != nil {
+ cmdutil.Fatalf("could not parse bit metadata: %v", err)
+ }
+
+ fmt.Fprintf(os.Stderr, "successfully got bit: %+v\n", meta)
+
+ //fmt.Fprintf(os.Stderr, "\n\nSuccessfully added Encore Bit: %s!\n", bit.Title)
+ //fmt.Fprintf(os.Stderr, "You can find the new bit under the %s/ directory.\n", prefix)
+ },
+}
+
+func init() {
+ bitsCmd.AddCommand(addCmd)
+}
diff --git a/cli/cmd/encore/bits/api.go b/cli/cmd/encore/bits/api.go
new file mode 100644
index 0000000000..977230bf4d
--- /dev/null
+++ b/cli/cmd/encore/bits/api.go
@@ -0,0 +1,64 @@
+package bits
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/cockroachdb/errors"
+)
+
+type Bit struct {
+ ID int64
+ Slug string
+ Title string
+ Description string
+ GitRepo string
+ GitBranch string
+}
+
+type ListResponse struct {
+ Bits []*Bit
+}
+
+func List(ctx context.Context) ([]*Bit, error) {
+ resp, err := http.Get("https://automativity.encore.dev/bits")
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ slurp, _ := io.ReadAll(resp.Body)
+ return nil, errors.Newf("got status %d: %s", resp.StatusCode, slurp)
+ }
+ var data ListResponse
+ if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
+ return nil, errors.Wrap(err, "decode json response")
+ }
+ return data.Bits, nil
+}
+
+var errBitNotFound = errors.New("bit not found")
+
+func Get(ctx context.Context, slug string) (*Bit, error) {
+ resp, err := http.Get("https://automativity.encore.dev/bits/" + url.PathEscape(slug))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 404 {
+ return nil, errBitNotFound
+ } else if resp.StatusCode != 200 {
+ slurp, _ := io.ReadAll(resp.Body)
+ return nil, errors.Newf("got status %d: %s", resp.StatusCode, slurp)
+ }
+ var bit Bit
+ if err := json.NewDecoder(resp.Body).Decode(&bit); err != nil {
+ return nil, errors.Wrap(err, "decode json response")
+ }
+ return &bit, nil
+}
diff --git a/cli/cmd/encore/bits/bits.go b/cli/cmd/encore/bits/bits.go
new file mode 100644
index 0000000000..50995c351a
--- /dev/null
+++ b/cli/cmd/encore/bits/bits.go
@@ -0,0 +1,16 @@
+package bits
+
+import (
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/root"
+)
+
+var bitsCmd = &cobra.Command{
+ Use: "bits",
+ Short: "Commands to manage encore bits, reusable functionality for Encore applications",
+}
+
+func init() {
+ root.Cmd.AddCommand(bitsCmd)
+}
diff --git a/cli/cmd/encore/bits/list.go b/cli/cmd/encore/bits/list.go
new file mode 100644
index 0000000000..c5877fe13e
--- /dev/null
+++ b/cli/cmd/encore/bits/list.go
@@ -0,0 +1,37 @@
+package bits
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "text/tabwriter"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/pkg/bits"
+)
+
+var listCmd = &cobra.Command{
+ Use: "list",
+ Short: "Lists available Encore Bits to add to your application",
+ Args: cobra.ExactArgs(0),
+ Run: func(c *cobra.Command, args []string) {
+ bits, err := bits.List(context.Background())
+ if err != nil {
+ cmdutil.Fatalf("could not list encore bits: %v", err)
+ }
+
+ tw := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\t', 0)
+ fmt.Fprintln(tw, "ID\tTitle\tDescription")
+ for _, bit := range bits {
+ fmt.Fprintf(tw, "%s\t%s\t%s\n", bit.Slug, bit.Title, bit.Description)
+ fmt.Fprintln(tw)
+ }
+ tw.Flush()
+ },
+}
+
+func init() {
+ bitsCmd.AddCommand(listCmd)
+}
diff --git a/cli/cmd/encore/build.go b/cli/cmd/encore/build.go
new file mode 100644
index 0000000000..954ee4a14e
--- /dev/null
+++ b/cli/cmd/encore/build.go
@@ -0,0 +1,157 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/pkg/appfile"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var (
+ targetOS = cmdutil.Oneof{
+ Value: "linux",
+ Allowed: []string{"linux"},
+ Flag: "os",
+ Desc: "the target operating system",
+ }
+ targetArch = cmdutil.Oneof{
+ Value: "amd64",
+ Allowed: []string{"amd64", "arm64"},
+ Flag: "arch",
+ Desc: "the target architecture",
+ }
+)
+
+func init() {
+ buildCmd := &cobra.Command{
+ Use: "build",
+ Aliases: []string{"eject"},
+ Short: "build provides ways to build your application for deployment",
+ }
+
+ p := buildParams{
+ CgoEnabled: os.Getenv("CGO_ENABLED") == "1",
+ }
+ dockerBuildCmd := &cobra.Command{
+ Use: "docker IMAGE_TAG",
+ Short: "docker builds a portable docker image of your Encore application",
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ p.Goarch = targetArch.Value
+ p.Goos = targetOS.Value
+ p.AppRoot, _ = determineAppRoot()
+ p.WorkspaceRoot = determineWorkspaceRoot(p.AppRoot)
+ file, err := appfile.ParseFile(filepath.Join(p.AppRoot, appfile.Name))
+ if err == nil {
+ if !cmd.Flag("base").Changed && file.Lang == appfile.LangTS {
+ p.BaseImg = "node:slim"
+ }
+ if !cmd.Flag("cgo").Changed {
+ p.CgoEnabled = file.Build.CgoEnabled
+ }
+ }
+ p.ImageTag = args[0]
+ dockerBuild(p)
+ },
+ }
+
+ dockerBuildCmd.Flags().BoolVarP(&p.Push, "push", "p", false, "push image to remote repository")
+ dockerBuildCmd.Flags().StringVar(&p.BaseImg, "base", "scratch", "base image to build from")
+ dockerBuildCmd.Flags().BoolVar(&p.CgoEnabled, "cgo", false, "enable cgo")
+ dockerBuildCmd.Flags().BoolVar(&p.SkipInfraConf, "skip-config", false, "do not read or generate a infra configuration file")
+ dockerBuildCmd.Flags().StringVar(&p.InfraConfPath, "config", "", "infra configuration file path")
+ p.Services = dockerBuildCmd.Flags().StringSlice("services", nil, "services to include in the image")
+ p.Gateways = dockerBuildCmd.Flags().StringSlice("gateways", nil, "gateways to include in the image")
+ targetOS.AddFlag(dockerBuildCmd)
+ targetArch.AddFlag(dockerBuildCmd)
+ rootCmd.AddCommand(buildCmd)
+ buildCmd.AddCommand(dockerBuildCmd)
+}
+
+type buildParams struct {
+ AppRoot string
+ WorkspaceRoot string
+ ImageTag string
+ Push bool
+ BaseImg string
+ Goos string
+ Goarch string
+ CgoEnabled bool
+ SkipInfraConf bool
+ InfraConfPath string
+ Services *[]string
+ Gateways *[]string
+}
+
+func dockerBuild(p buildParams) {
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-interrupt
+ cancel()
+ }()
+
+ daemon := setupDaemon(ctx)
+ params := &daemonpb.DockerExportParams{
+ BaseImageTag: p.BaseImg,
+ }
+ if p.Push {
+ params.PushDestinationTag = p.ImageTag
+ } else {
+ params.LocalDaemonTag = p.ImageTag
+ }
+
+ var services, gateways []string
+ if p.Services != nil {
+ services = *p.Services
+ }
+ if p.Gateways != nil {
+ gateways = *p.Gateways
+ }
+ var err error
+ cfgPath := ""
+ if p.InfraConfPath != "" {
+ cfgPath, err = filepath.Abs(p.InfraConfPath)
+ if err != nil {
+ cmdutil.Fatalf("failed to resolve absolute path for %s: %v", p.InfraConfPath, err)
+ }
+ }
+ stream, err := daemon.Export(ctx, &daemonpb.ExportRequest{
+ AppRoot: p.AppRoot,
+ WorkspaceRoot: p.WorkspaceRoot,
+ CgoEnabled: p.CgoEnabled,
+ Goos: p.Goos,
+ Goarch: p.Goarch,
+ Environ: os.Environ(),
+ Format: &daemonpb.ExportRequest_Docker{
+ Docker: params,
+ },
+ InfraConfPath: cfgPath,
+ Services: services,
+ Gateways: gateways,
+ SkipInfraConf: p.SkipInfraConf,
+ })
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "fatal: ", err)
+ os.Exit(1)
+ }
+ if code := cmdutil.StreamCommandOutput(stream, cmdutil.ConvertJSONLogs()); code != 0 {
+ os.Exit(code)
+ }
+}
+
+func or(a, b string) string {
+ if a != "" {
+ return a
+ }
+ return b
+}
diff --git a/cli/cmd/encore/check.go b/cli/cmd/encore/check.go
index a813e4b064..5c31ad2844 100644
--- a/cli/cmd/encore/check.go
+++ b/cli/cmd/encore/check.go
@@ -6,13 +6,22 @@ import (
"os"
"os/signal"
- daemonpb "encr.dev/proto/encore/daemon"
"github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var (
+ codegenDebug bool
+ checkParseTests bool
)
var checkCmd = &cobra.Command{
Use: "check",
- Short: "Checks your application for errors",
+ Short: "Checks your application for compile-time errors using Encore's compiler.",
+
+ DisableFlagsInUseLine: true,
Run: func(cmd *cobra.Command, args []string) {
appRoot, relPath := determineAppRoot()
runChecks(appRoot, relPath)
@@ -21,6 +30,8 @@ var checkCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(checkCmd)
+ checkCmd.Flags().BoolVar(&codegenDebug, "codegen-debug", false, "Dump generated code (for debugging Encore's code generation)")
+ checkCmd.Flags().BoolVar(&checkParseTests, "tests", false, "Parse tests as well")
}
func runChecks(appRoot, relPath string) {
@@ -34,10 +45,16 @@ func runChecks(appRoot, relPath string) {
}()
daemon := setupDaemon(ctx)
- stream, err := daemon.Check(ctx, &daemonpb.CheckRequest{AppRoot: appRoot, WorkingDir: relPath})
+ stream, err := daemon.Check(ctx, &daemonpb.CheckRequest{
+ AppRoot: appRoot,
+ WorkingDir: relPath,
+ CodegenDebug: codegenDebug,
+ ParseTests: checkParseTests,
+ Environ: os.Environ(),
+ })
if err != nil {
fmt.Fprintln(os.Stderr, "fatal: ", err)
os.Exit(1)
}
- streamCommandOutput(stream)
+ os.Exit(cmdutil.StreamCommandOutput(stream, nil))
}
diff --git a/cli/cmd/encore/cmdutil/autocompletes.go b/cli/cmd/encore/cmdutil/autocompletes.go
new file mode 100644
index 0000000000..d484c75848
--- /dev/null
+++ b/cli/cmd/encore/cmdutil/autocompletes.go
@@ -0,0 +1,87 @@
+package cmdutil
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+)
+
+func AutoCompleteFromStaticList(args ...string) func(cmd *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return func(cmd *cobra.Command, _ []string, toComplete string) (rtn []string, dir cobra.ShellCompDirective) {
+ toComplete = strings.ToLower(toComplete)
+
+ for _, option := range args {
+ before, _, _ := strings.Cut(option, "\t")
+
+ if strings.HasPrefix(before, toComplete) {
+ rtn = append(rtn, option)
+ }
+ }
+
+ return rtn, cobra.ShellCompDirectiveNoFileComp
+ }
+}
+
+func AutoCompleteAppSlug(cmd *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ // incase of not being logged in or an error, we give no auto competition
+ _, err := conf.CurrentUser()
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+
+ apps, err := platform.ListApps(cmd.Context())
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+
+ toComplete = strings.ToLower(toComplete)
+
+ rtn := make([]string, 0, len(apps))
+ for _, app := range apps {
+ if strings.HasPrefix(strings.ToLower(app.Slug), toComplete) {
+ desc := app.Description
+ if desc == "" {
+ desc = app.Name
+ }
+
+ rtn = append(rtn, fmt.Sprintf("%s\t%s", app.Slug, desc))
+ }
+ }
+
+ return rtn, cobra.ShellCompDirectiveNoFileComp
+}
+
+func AutoCompleteEnvSlug(cmd *cobra.Command, args []string, toComplete string) (rtn []string, dir cobra.ShellCompDirective) {
+ toComplete = strings.ToLower(toComplete)
+
+ // Support the local environment
+ if strings.HasPrefix("local", toComplete) {
+ rtn = append(rtn, "local\tThis local development environment")
+ }
+
+ _, err := conf.CurrentUser()
+ if err != nil {
+ return rtn, cobra.ShellCompDirectiveError
+ }
+
+ // Assume the app slug is the first argument
+ appSlug := args[len(args)-1]
+
+ // Get the environments for the app and filter by what the user has already entered
+ envs, err := platform.ListEnvs(cmd.Context(), appSlug)
+ if err != nil {
+ return rtn, cobra.ShellCompDirectiveError
+ }
+
+ for _, env := range envs {
+ if strings.HasPrefix(strings.ToLower(env.Slug), toComplete) {
+ rtn = append(rtn, fmt.Sprintf("%s\tA %s enviroment running on %s", env.Slug, env.Type, env.Cloud))
+ }
+ }
+
+ return rtn, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/cli/cmd/encore/cmdutil/cmdutil.go b/cli/cmd/encore/cmdutil/cmdutil.go
new file mode 100644
index 0000000000..c1c8883b25
--- /dev/null
+++ b/cli/cmd/encore/cmdutil/cmdutil.go
@@ -0,0 +1,187 @@
+package cmdutil
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/fatih/color"
+ "golang.org/x/crypto/ssh/terminal"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/cli/internal/manifest"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/errinsrc"
+ "encr.dev/pkg/errlist"
+)
+
+var (
+ ErrNoEncoreApp = errors.New("no encore.app found in directory (or any of the parent directories)")
+ ErrEncoreAppIsDir = errors.New("encore.app is a directory, not a file")
+)
+
+// MaybeAppRoot determines the app root by looking for the "encore.app" file,
+// initially in the current directory and then recursively in parent directories
+// up to the filesystem root.
+//
+// It reports the absolute path to the app root, and the
+// relative path from the app root to the working directory.
+func MaybeAppRoot() (appRoot, relPath string, err error) {
+ dir, err := os.Getwd()
+ if err != nil {
+ return "", "", err
+ }
+ return FindAppRootFromDir(dir)
+}
+
+func FindAppRootFromDir(dir string) (appRoot, relPath string, err error) {
+ rel := "."
+ for {
+ path := filepath.Join(dir, "encore.app")
+ fi, err := os.Stat(path)
+ if errors.Is(err, fs.ErrNotExist) {
+ dir2 := filepath.Dir(dir)
+ if dir2 == dir {
+ return "", "", ErrNoEncoreApp
+ }
+ rel = filepath.Join(filepath.Base(dir), rel)
+ dir = dir2
+ continue
+ } else if err != nil {
+ return "", "", err
+ } else if fi.IsDir() {
+ return "", "", ErrEncoreAppIsDir
+ } else {
+ return dir, rel, nil
+ }
+ }
+}
+
+// AppRoot is like MaybeAppRoot but instead of returning an error
+// it prints it to stderr and exits.
+func AppRoot() (appRoot, relPath string) {
+ appRoot, relPath, err := MaybeAppRoot()
+ if err != nil {
+ Fatal(err)
+ }
+ return appRoot, relPath
+}
+
+// WorkspaceRoot determines the workspace root by looking for the .git folder in app root or parents to it.
+// It reports the absolute path to the workspace root.
+func WorkspaceRoot(appRoot string) string {
+ dir := appRoot
+ for {
+ path := filepath.Join(dir, ".git")
+ fi, err := os.Stat(path)
+ if errors.Is(err, fs.ErrNotExist) {
+ dir2 := filepath.Dir(dir)
+ if dir2 == dir {
+ return appRoot
+ }
+ dir = dir2
+ continue
+ } else if err != nil {
+ Fatal(err)
+ } else if !fi.IsDir() {
+ continue
+ } else {
+ return dir
+ }
+ }
+}
+
+func AppSlugOrLocalID() string {
+ appRoot, _ := AppRoot()
+ appID, _ := appfile.Slug(appRoot)
+ if appID == "" {
+ mf, err := manifest.ReadOrCreate(appRoot)
+ if err != nil {
+ Fatalf("failed to read app manifest: %v", err)
+ }
+ appID = mf.LocalID
+ }
+ return appID
+}
+
+// AppSlug reports the current app's app slug.
+// It throws a fatal error if the app is not connected with the Encore Platform.
+func AppSlug() string {
+ appRoot, _ := AppRoot()
+ appSlug, err := appfile.Slug(appRoot)
+ if err != nil {
+ Fatal(err)
+ } else if appSlug == "" {
+ Fatal("app is not linked with the Encore Platform (see 'encore app link')")
+ }
+ return appSlug
+}
+
+func Fatal(args ...any) {
+ // Prettify gRPC errors
+ for i, arg := range args {
+ if err, ok := arg.(error); ok {
+ if s, ok := status.FromError(err); ok {
+ args[i] = s.Message()
+ }
+ }
+ }
+
+ red := color.New(color.FgRed)
+ _, _ = red.Fprint(os.Stderr, "error: ")
+ _, _ = red.Fprintln(os.Stderr, args...)
+ os.Exit(1)
+}
+
+func Fatalf(format string, args ...any) {
+ // Prettify gRPC errors
+ for i, arg := range args {
+ if err, ok := arg.(error); ok {
+ if s, ok := status.FromError(err); ok {
+ args[i] = s.Message()
+ }
+ }
+ }
+
+ Fatal(fmt.Sprintf(format, args...))
+}
+
+func DisplayError(out *os.File, err []byte) {
+ if len(err) == 0 {
+ return
+ }
+
+ // Get the width of the terminal we're rendering in
+ // if we can so we render using the most space possible.
+ width, _, sizeErr := terminal.GetSize(int(out.Fd()))
+ if sizeErr == nil {
+ errinsrc.TerminalWidth = width
+ }
+
+ // Unmarshal the error into a structured errlist
+ errList := errlist.New(nil)
+ if err := json.Unmarshal(err, &errList); err != nil {
+ Fatalf("unable to parse error: %v", err)
+ }
+
+ if errList.Len() == 0 {
+ return
+ }
+
+ _, _ = os.Stderr.Write([]byte(errList.Error()))
+}
+
+var Newline string
+
+func init() {
+ switch runtime.GOOS {
+ case "windows":
+ Newline = "\r\n"
+ default:
+ Newline = "\n"
+ }
+}
diff --git a/cli/cmd/encore/cmdutil/daemon.go b/cli/cmd/encore/cmdutil/daemon.go
new file mode 100644
index 0000000000..728532cd96
--- /dev/null
+++ b/cli/cmd/encore/cmdutil/daemon.go
@@ -0,0 +1,188 @@
+package cmdutil
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ "github.com/golang/protobuf/ptypes/empty"
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/internal/version"
+ "encr.dev/pkg/xos"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+func IsDaemonRunning(ctx context.Context) bool {
+ socketPath, err := daemonSockPath()
+ if err != nil {
+ return false
+ }
+ if _, err := xos.SocketStat(socketPath); err == nil {
+ // The socket exists; check that it is responsive.
+ if cc, err := dialDaemon(ctx, socketPath); err == nil {
+ _ = cc.Close()
+ return true
+ }
+ // socket is not responding, remove it
+ _ = os.Remove(socketPath)
+ }
+ return false
+
+}
+
+// ConnectDaemon returns a client connection to the Encore daemon.
+// By default, it will start the daemon if it is not already running.
+func ConnectDaemon(ctx context.Context) daemonpb.DaemonClient {
+ socketPath, err := daemonSockPath()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "fatal: ", err)
+ os.Exit(1)
+ }
+
+ if _, err := xos.SocketStat(socketPath); err == nil {
+ // The socket exists; check that it is responsive.
+ if cc, err := dialDaemon(ctx, socketPath); err == nil {
+ // Make sure the daemon is running an up-to-date version;
+ // restart it otherwise.
+ cl := daemonpb.NewDaemonClient(cc)
+ if resp, err := cl.Version(ctx, &empty.Empty{}); err == nil {
+ diff := version.Compare(resp.Version)
+ switch {
+ case diff < 0:
+ // Daemon is running a newer version
+ return cl
+ case diff == 0:
+ if configHash, err := version.ConfigHash(); err != nil {
+ Fatal("unable to get config path: ", err)
+ } else if configHash == resp.ConfigHash {
+ return cl
+ }
+
+ // If we're running a development release, and so is the daemon, don't restart.
+ // This is to avoid spurious restarts during development.
+ if version.Channel == version.DevBuild && version.ChannelFor(resp.Version) == version.DevBuild {
+ return cl
+ }
+
+ // Daemon is running the same version but different config
+ fmt.Fprintf(os.Stderr, "encore: restarting daemon due to configuration change.\n")
+ case diff > 0:
+ fmt.Fprintf(os.Stderr, "encore: daemon is running an outdated version (%s), restarting.\n", resp.Version)
+ }
+ }
+ }
+ // Remove the socket file which triggers the daemon to exit.
+ _ = os.Remove(socketPath)
+ }
+
+ // Start the daemon.
+ if err := StartDaemonInBackground(ctx); err != nil {
+ Fatal("starting daemon: ", err)
+ }
+ cc, err := dialDaemon(ctx, socketPath)
+ if err != nil {
+ Fatal("dialing daemon: ", err)
+ }
+ return daemonpb.NewDaemonClient(cc)
+}
+
+func StopDaemon() {
+ socketPath, err := daemonSockPath()
+ if err != nil {
+ Fatal("stopping daemon: ", err)
+ }
+ if _, err := xos.SocketStat(socketPath); err == nil {
+ _ = os.Remove(socketPath)
+ }
+}
+
+// daemonSockPath reports the path to the Encore daemon unix socket.
+func daemonSockPath() (string, error) {
+ cacheDir, err := os.UserCacheDir()
+ if err != nil {
+ return "", fmt.Errorf("could not determine cache dir: %v", err)
+ }
+ return filepath.Join(cacheDir, "encore", "encored.sock"), nil
+}
+
+// StartDaemonInBackground starts the Encore daemon in the background.
+func StartDaemonInBackground(ctx context.Context) error {
+ socketPath, err := daemonSockPath()
+ if err != nil {
+ return err
+ }
+
+ // nosemgrep
+ exe, err := os.Executable()
+ if err != nil {
+ exe, err = exec.LookPath("encore")
+ }
+ if err != nil {
+ return fmt.Errorf("could not determine location of encore executable: %v", err)
+ }
+ // nosemgrep
+ cmd := exec.Command(exe, "daemon", "-f")
+ cmd.SysProcAttr = xos.CreateNewProcessGroup()
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("could not start encore daemon: %v", err)
+ }
+
+ // Wait for it to come up
+ for i := 0; i < 50; i++ {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ time.Sleep(100 * time.Millisecond)
+ if _, err := xos.SocketStat(socketPath); err == nil {
+ return nil
+ }
+ }
+ return fmt.Errorf("timed out waiting for daemon to start")
+}
+
+func dialDaemon(ctx context.Context, socketPath string) (*grpc.ClientConn, error) {
+ ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
+ defer cancel()
+
+ dialer := func(ctx context.Context, addr string) (net.Conn, error) {
+ return (&net.Dialer{}).DialContext(ctx, "unix", socketPath)
+ }
+ // Set max message size to 16mb (up from default 4mb) for json formatted debug metadata for large applications.
+ return grpc.DialContext(ctx, "",
+ grpc.WithInsecure(),
+ grpc.WithBlock(),
+ grpc.WithUnaryInterceptor(errInterceptor),
+ grpc.WithContextDialer(dialer),
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(16*1024*1024)),
+ )
+}
+
+func errInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ err := invoker(ctx, method, req, reply, cc, opts...)
+ if err != nil {
+ if st, ok := status.FromError(err); ok {
+ if st.Code() == codes.Unauthenticated {
+ Fatal("not logged in: run 'encore auth login' first")
+ }
+ for _, detail := range st.Details() {
+ switch t := detail.(type) {
+ case *errdetails.PreconditionFailure:
+ for _, violation := range t.Violations {
+ if violation.Type == "INVALID_REFRESH_TOKEN" {
+ Fatal("OAuth refresh token was invalid. Please run `encore auth login` again.")
+ }
+ }
+ }
+ }
+ }
+ }
+ return err
+}
diff --git a/cli/cmd/encore/cmdutil/output.go b/cli/cmd/encore/cmdutil/output.go
new file mode 100644
index 0000000000..be78f7802e
--- /dev/null
+++ b/cli/cmd/encore/cmdutil/output.go
@@ -0,0 +1,111 @@
+package cmdutil
+
+import (
+ "errors"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+type Oneof struct {
+ Value string
+ Allowed []string
+ Flag string // defaults to "output" if empty
+ FlagShort string // defaults to "o" if both Flag and FlagShort are empty
+ Desc string // usage desc
+ TypeDesc string // type description, defaults to the name of the flag
+ NoOptDefVal string // default value when no option is provided
+}
+
+func (o *Oneof) AddFlag(cmd *cobra.Command) {
+ name, short := o.FlagName()
+ cmd.Flags().AddFlag(
+ &pflag.Flag{
+ Name: name,
+ NoOptDefVal: o.NoOptDefVal,
+ Shorthand: short,
+ Usage: o.Usage(),
+ Value: o,
+ DefValue: o.String(),
+ })
+ _ = cmd.RegisterFlagCompletionFunc(name, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return o.Allowed, cobra.ShellCompDirectiveNoFileComp
+ })
+}
+
+func (o *Oneof) FlagName() (name, short string) {
+ name, short = o.Flag, o.FlagShort
+ if name == "" {
+ name, short = "output", "o"
+ }
+ return name, short
+}
+
+func (o *Oneof) String() string {
+ return o.Value
+}
+
+func (o *Oneof) Type() string {
+ if o.TypeDesc != "" {
+ return o.TypeDesc
+ }
+ name, _ := o.FlagName()
+ return name
+}
+
+func (o *Oneof) Set(v string) error {
+ if slices.Contains(o.Allowed, v) {
+ o.Value = v
+ return nil
+ }
+
+ var b strings.Builder
+ b.WriteString("must be one of ")
+ o.oneOf(&b)
+ return errors.New(b.String())
+}
+
+func (o *Oneof) Usage() string {
+ var b strings.Builder
+ desc := o.Desc
+ if desc == "" {
+ desc = "Output format"
+ }
+ b.WriteString(desc + ". One of (")
+ o.oneOf(&b)
+ b.WriteString(").")
+ return b.String()
+}
+
+// Alternatives lists the alternatives in the format "a|b|c".
+func (o *Oneof) Alternatives() string {
+ var b strings.Builder
+ for i, s := range o.Allowed {
+ if i > 0 {
+ b.WriteByte('|')
+ }
+ b.WriteString(s)
+ }
+ return b.String()
+}
+
+func (o *Oneof) oneOf(b *strings.Builder) {
+ n := len(o.Allowed)
+ for i, s := range o.Allowed {
+ if i > 0 {
+ switch {
+ case n == 2:
+ b.WriteString(" or ")
+ case i == n-1:
+ b.WriteString(", or ")
+ default:
+ b.WriteString(", ")
+ }
+ }
+
+ b.WriteString(strconv.Quote(s))
+ }
+}
diff --git a/cli/cmd/encore/cmdutil/stream.go b/cli/cmd/encore/cmdutil/stream.go
new file mode 100644
index 0000000000..e62790cf83
--- /dev/null
+++ b/cli/cmd/encore/cmdutil/stream.go
@@ -0,0 +1,203 @@
+package cmdutil
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "golang.org/x/crypto/ssh/terminal"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/pkg/ansi"
+ "encr.dev/proto/encore/daemon"
+)
+
+// CommandOutputStream is the interface for gRPC streams that
+// stream the output of a command.
+type CommandOutputStream interface {
+ Recv() (*daemon.CommandMessage, error)
+}
+
+type OutputConverter func(line []byte) []byte
+
+// StreamCommandOutput streams the output from the given command stream,
+// and reports the command's exit code.
+// If convertJSON is true, lines that look like JSON are fed through
+// zerolog's console writer.
+func StreamCommandOutput(stream CommandOutputStream, converter OutputConverter) int {
+ var outWrite io.Writer = os.Stdout
+ var errWrite io.Writer = os.Stderr
+
+ var writesDone sync.WaitGroup
+ defer writesDone.Wait()
+
+ if converter != nil {
+ // Create a pipe that we read from line-by-line so we can detect JSON lines.
+ outRead, outw := io.Pipe()
+ errRead, errw := io.Pipe()
+ outWrite = outw
+ errWrite = errw
+ defer func() { _ = outw.Close() }()
+ defer func() { _ = errw.Close() }()
+
+ for i, read := range []io.Reader{outRead, errRead} {
+ read := read
+ stdout := i == 0
+ writesDone.Add(1)
+ go func() {
+ defer writesDone.Done()
+
+ for {
+ scanner := bufio.NewScanner(read)
+ for scanner.Scan() {
+ line := append(scanner.Bytes(), '\n')
+ line = converter(line)
+ if stdout {
+ _, _ = os.Stdout.Write(line)
+ } else {
+ _, _ = os.Stderr.Write(line)
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ // The scanner failed, likely due to a too-long line. Log an error
+ // and create a new scanner since the old one is in an unrecoverable state.
+ fmt.Fprintln(os.Stderr, "failed to read output:", err)
+ scanner = bufio.NewScanner(read)
+ continue
+ } else {
+ break
+ }
+ }
+ }()
+ }
+ }
+
+ for {
+ msg, err := stream.Recv()
+ if err != nil {
+ st := status.Convert(err)
+ switch {
+ case st.Code() == codes.FailedPrecondition:
+ _, _ = fmt.Fprintln(os.Stderr, st.Message())
+ return 1
+ case err == io.EOF || st.Code() == codes.Canceled || strings.HasSuffix(err.Error(), "error reading from server: EOF"):
+ return 0
+ default:
+ log.Fatal().Err(err).Msg("connection failure")
+ }
+ }
+
+ switch m := msg.Msg.(type) {
+ case *daemon.CommandMessage_Output:
+ if m.Output.Stdout != nil {
+ _, _ = outWrite.Write(m.Output.Stdout)
+ }
+ if m.Output.Stderr != nil {
+ _, _ = errWrite.Write(m.Output.Stderr)
+ }
+ case *daemon.CommandMessage_Errors:
+ DisplayError(os.Stderr, m.Errors.Errinsrc)
+
+ case *daemon.CommandMessage_Exit:
+ return int(m.Exit.Code)
+ }
+ }
+}
+
+type ConvertLogOptions struct {
+ Color bool
+}
+
+type ConvertLogOption func(*ConvertLogOptions)
+
+func Colorize(enable bool) ConvertLogOption {
+ return func(clo *ConvertLogOptions) {
+ clo.Color = enable
+ }
+}
+
+func ConvertJSONLogs(opts ...ConvertLogOption) OutputConverter {
+ // Default to colorized output.
+ options := ConvertLogOptions{Color: true}
+
+ for _, opt := range opts {
+ opt(&options)
+ }
+
+ var logMutex sync.Mutex
+ logLineBuffer := bytes.NewBuffer(make([]byte, 0, 1024))
+ cout := zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) {
+ w.Out = logLineBuffer
+ w.FieldsExclude = []string{"stack"}
+ w.FormatExtra = func(vals map[string]any, buf *bytes.Buffer) error {
+ if stack, ok := vals["stack"]; ok {
+ return FormatStack(stack, buf)
+ }
+ return nil
+ }
+ })
+ if !options.Color {
+ cout.NoColor = true
+ }
+
+ return func(line []byte) []byte {
+ // If this isn't a JSON log line, just return it as-is
+ if len(line) == 0 || line[0] != '{' {
+ return line
+ }
+
+ // Otherwise grab the converter buffer and reset it
+ logMutex.Lock()
+ defer logMutex.Unlock()
+ logLineBuffer.Reset()
+
+ // Then convert the JSON log line to pretty formatted text
+ _, err := cout.Write(line)
+ if err != nil {
+ return line
+ }
+ out := make([]byte, len(logLineBuffer.Bytes()))
+ copy(out, logLineBuffer.Bytes())
+ return out
+ }
+}
+
+func FormatStack(val any, buf *bytes.Buffer) error {
+ var frames []struct {
+ File string
+ Line int
+ Func string
+ }
+
+ if jsonRepr, err := json.Marshal(val); err != nil {
+ return err
+ } else if err := json.Unmarshal(jsonRepr, &frames); err != nil {
+ return err
+ }
+ for _, f := range frames {
+ fmt.Fprintf(buf, "\n %s\n %s",
+ f.Func,
+ aurora.Gray(12, fmt.Sprintf("%s:%d", f.File, f.Line)))
+ }
+ return nil
+}
+
+func ClearTerminalExceptFirstNLines(n int) {
+ // Clear the screen except for the first line.
+ if _, height, err := terminal.GetSize(int(os.Stdout.Fd())); err == nil {
+ count := height - (1 + n)
+ if count > 0 {
+ _, _ = os.Stdout.Write(bytes.Repeat([]byte{'\n'}, count))
+ }
+ _, _ = fmt.Fprint(os.Stdout, ansi.SetCursorPosition(2, 1)+ansi.ClearScreen(ansi.CursorToBottom))
+ }
+}
diff --git a/cli/cmd/encore/config/config.go b/cli/cmd/encore/config/config.go
new file mode 100644
index 0000000000..0723ba80fd
--- /dev/null
+++ b/cli/cmd/encore/config/config.go
@@ -0,0 +1,127 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ "encr.dev/internal/userconfig"
+ "github.com/spf13/cobra"
+)
+
+var (
+ forceApp, forceGlobal bool
+ viewAllSettings bool
+)
+
+var autoCompleteConfigKeys = cmdutil.AutoCompleteFromStaticList(userconfig.Keys()...)
+
+var longDocs = `Gets or sets configuration values for customizing the behavior of the Encore CLI.
+
+Configuration options can be set both for individual Encore applications,
+as well as globally for the local user.
+
+Configuration options can be set using ` + bt("encore config ") + `,
+and options can similarly be read using ` + bt("encore config ") + `.
+
+When running ` + bt("encore config") + ` within an Encore application,
+it automatically sets and gets configuration for that application.
+
+To set or get global configuration, use the ` + bt("--global") + ` flag.
+
+Available configuration settings are:
+
+` + userconfig.CLIDocs()
+
+var configCmd = &cobra.Command{
+ Use: "config []",
+ Short: "Get or set a configuration value",
+ Long: longDocs,
+ Args: cobra.RangeArgs(0, 2),
+
+ Run: func(cmd *cobra.Command, args []string) {
+ appRoot, _, _ := cmdutil.MaybeAppRoot()
+
+ appScope := appRoot != ""
+ if forceApp {
+ appScope = true
+ } else if forceGlobal {
+ appScope = false
+ }
+
+ if appScope && appRoot == "" {
+ // If the user specified --app, error if there is no app.
+ cmdutil.Fatal(cmdutil.ErrNoEncoreApp)
+ }
+
+ if len(args) == 2 {
+ var err error
+ if appScope {
+ err = userconfig.SetForApp(appRoot, args[0], args[1])
+ } else {
+ err = userconfig.SetGlobal(args[0], args[1])
+ }
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ } else {
+ var (
+ cfg *userconfig.Config
+ err error
+ )
+ if appScope {
+ appRoot, _ := cmdutil.AppRoot()
+ cfg, err = userconfig.ForApp(appRoot).Get()
+ } else {
+ cfg, err = userconfig.Global().Get()
+ }
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+
+ if viewAllSettings {
+ if len(args) > 0 {
+ cmdutil.Fatalf("cannot specify a settings key when using --all")
+ }
+ s := strings.TrimSuffix(cfg.Render(), "\n")
+ fmt.Println(s)
+ return
+ }
+
+ if len(args) == 0 {
+ // No args are only allowed when --all is specified.
+ _ = cmd.Usage()
+ os.Exit(1)
+ }
+
+ val, ok := cfg.GetByKey(args[0])
+ if !ok {
+ cmdutil.Fatalf("unknown key %q", args[0])
+ }
+ fmt.Printf("%v\n", val)
+ }
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // Completing the first argument, the config key
+ return autoCompleteConfigKeys(cmd, args, toComplete)
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ },
+}
+
+func init() {
+ configCmd.Flags().BoolVar(&viewAllSettings, "all", false, "view all settings")
+ configCmd.Flags().BoolVar(&forceApp, "app", false, "set the value for the current app")
+ configCmd.Flags().BoolVar(&forceGlobal, "global", false, "set the value at the global level")
+ configCmd.MarkFlagsMutuallyExclusive("app", "global")
+
+ root.Cmd.AddCommand(configCmd)
+}
+
+// bt renders a backtick-enclosed string.
+func bt(val string) string {
+ return fmt.Sprintf("`%s`", val)
+}
diff --git a/cli/cmd/encore/daemon.go b/cli/cmd/encore/daemon.go
index f8897f2857..c2d11933cf 100644
--- a/cli/cmd/encore/daemon.go
+++ b/cli/cmd/encore/daemon.go
@@ -3,19 +3,14 @@ package main
import (
"context"
"fmt"
- "net"
"os"
- "os/exec"
- "path/filepath"
- "time"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
daemonpkg "encr.dev/cli/cmd/encore/daemon"
- "encr.dev/cli/internal/xos"
+ "encr.dev/internal/env"
daemonpb "encr.dev/proto/encore/daemon"
- "github.com/golang/protobuf/ptypes/empty"
- "github.com/spf13/cobra"
- "golang.org/x/mod/semver"
- "google.golang.org/grpc"
)
var daemonizeForeground bool
@@ -25,9 +20,9 @@ var daemonCmd = &cobra.Command{
Short: "Starts the encore daemon",
Run: func(cc *cobra.Command, args []string) {
if daemonizeForeground {
- daemonpkg.Main(Version)
+ daemonpkg.Main()
} else {
- if err := daemonize(context.Background()); err != nil {
+ if err := cmdutil.StartDaemonInBackground(context.Background()); err != nil {
fatal(err)
}
fmt.Fprintln(os.Stdout, "encore daemon is now running")
@@ -38,96 +33,20 @@ var daemonCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(daemonCmd)
daemonCmd.Flags().BoolVarP(&daemonizeForeground, "foreground", "f", false, "Start the daemon in the foreground")
+ daemonCmd.AddCommand(daemonEnvCmd)
}
-// daemonize starts the Encore daemon in the background.
-func daemonize(ctx context.Context) error {
- socketPath, err := daemonSockPath()
- if err != nil {
- return err
- }
-
- exe, err := os.Executable()
- if err != nil {
- exe, err = exec.LookPath("encore")
- }
- if err != nil {
- return fmt.Errorf("could not determine location of encore executable: %v", err)
- }
- cmd := exec.Command(exe, "daemon", "-f")
- cmd.SysProcAttr = xos.CreateNewProcessGroup()
- if err := cmd.Start(); err != nil {
- return fmt.Errorf("could not start encore daemon: %v", err)
- }
-
- // Wait for it to come up
- for i := 0; i < 50; i++ {
- if err := ctx.Err(); err != nil {
- return err
- }
- time.Sleep(100 * time.Millisecond)
- if _, err := xos.SocketStat(socketPath); err == nil {
- return nil
- }
- }
- return fmt.Errorf("timed out waiting for daemon to start")
-}
-
-// daemonSockPath reports the path to the Encore daemon unix socket.
-func daemonSockPath() (string, error) {
- cacheDir, err := os.UserCacheDir()
- if err != nil {
- return "", fmt.Errorf("could not determine cache dir: %v", err)
- }
- return filepath.Join(cacheDir, "encore", "encored.sock"), nil
-}
-
-// setupDaemon sets up the Encore daemon if it isn't already running
-// and returns a client connected to it.
func setupDaemon(ctx context.Context) daemonpb.DaemonClient {
- socketPath, err := daemonSockPath()
- if err != nil {
- fmt.Fprintln(os.Stderr, "fatal: ", err)
- os.Exit(1)
- }
-
- if _, err := xos.SocketStat(socketPath); err == nil {
- // The socket exists; check that it is responsive.
- if cc, err := dialDaemon(ctx, socketPath); err == nil {
- // Make sure the daemon is running an up-to-date version;
- // restart it otherwise.
- cl := daemonpb.NewDaemonClient(cc)
- if resp, err := cl.Version(ctx, &empty.Empty{}); err == nil {
- if semver.Compare(Version, resp.Version) >= 0 {
- return cl
- }
- fmt.Fprintf(os.Stderr, "encore: daemon is running an outdated version (%s), restarting.\n", resp.Version)
- }
- }
- // Remove the socket file which triggers the daemon to exit.
- os.Remove(socketPath)
- }
-
- // Start the daemon.
- if err := daemonize(ctx); err != nil {
- fatal("starting daemon: ", err)
- }
- cc, err := dialDaemon(ctx, socketPath)
- if err != nil {
- fatal("dialing daemon: ", err)
- }
- return daemonpb.NewDaemonClient(cc)
+ return cmdutil.ConnectDaemon(ctx)
}
-func dialDaemon(ctx context.Context, socketPath string) (*grpc.ClientConn, error) {
- ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
- defer cancel()
-
- dialer := func(ctx context.Context, addr string) (net.Conn, error) {
- return (&net.Dialer{}).DialContext(ctx, "unix", socketPath)
- }
- return grpc.DialContext(ctx, "",
- grpc.WithInsecure(),
- grpc.WithBlock(),
- grpc.WithContextDialer(dialer))
+var daemonEnvCmd = &cobra.Command{
+ Use: "env",
+ Short: "Prints Encore environment information",
+ Run: func(cc *cobra.Command, args []string) {
+ envs := env.List()
+ for _, e := range envs {
+ fmt.Println(e)
+ }
+ },
}
diff --git a/cli/cmd/encore/daemon/daemon.go b/cli/cmd/encore/daemon/daemon.go
index 07bd91254f..2ce07e859e 100644
--- a/cli/cmd/encore/daemon/daemon.go
+++ b/cli/cmd/encore/daemon/daemon.go
@@ -1,72 +1,120 @@
package daemon
import (
+ "context"
+ "database/sql"
+ "embed"
+ _ "embed" // for go:embed
"fmt"
"io"
+ "io/fs"
"net"
"net/http"
+ "net/http/pprof"
+ "net/netip"
"os"
+ "os/signal"
"path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
"time"
+ "github.com/cenkalti/backoff/v4"
+ "github.com/cockroachdb/errors"
+ "github.com/golang-migrate/migrate/v4"
+ "github.com/golang-migrate/migrate/v4/database"
+ "github.com/golang-migrate/migrate/v4/database/sqlite3"
+ "github.com/golang-migrate/migrate/v4/source/iofs"
+ _ "github.com/mattn/go-sqlite3" // for "sqlite3" driver
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
"encr.dev/cli/daemon"
+ "encr.dev/cli/daemon/apps"
"encr.dev/cli/daemon/dash"
+ "encr.dev/cli/daemon/engine"
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/cli/daemon/engine/trace2/sqlite"
+ "encr.dev/cli/daemon/mcp"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/objects"
"encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/runtime"
- "encr.dev/cli/daemon/runtime/trace"
"encr.dev/cli/daemon/secret"
"encr.dev/cli/daemon/sqldb"
- "encr.dev/cli/internal/conf"
- "encr.dev/cli/internal/xos"
+ "encr.dev/cli/daemon/sqldb/docker"
+ "encr.dev/cli/daemon/sqldb/external"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+ "encr.dev/pkg/eerror"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/watcher"
+ "encr.dev/pkg/xos"
daemonpb "encr.dev/proto/encore/daemon"
- "encr.dev/proto/encore/server/remote"
- "github.com/rs/zerolog"
- "github.com/rs/zerolog/log"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/oauth"
- "google.golang.org/grpc/keepalive"
)
// Main runs the daemon.
-func Main(version string) {
- if err := runMain(version); err != nil {
+func Main() {
+ watcher.BumpRLimitSoftToHardLimit()
+
+ if err := redirectLogOutput(); err != nil {
+ log.Error().Err(err).Msg("could not setup daemon log file, skipping")
+ }
+ if err := runMain(); err != nil {
log.Fatal().Err(err).Msg("daemon failed")
}
}
-func runMain(version string) (err error) {
- // xit receives signals from the different subsystems
+func runMain() (err error) {
+ ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT)
+ defer cancel()
+
+ // exit receives signals from the different subsystems
// that something went wrong and it's time to exit.
// Sending nil indicates it's time to gracefully exit.
exit := make(chan error)
- d := &Daemon{exit: exit, Version: version}
+ d := &Daemon{dev: conf.DevDaemon, exit: exit}
defer handleBailout(&err)
defer d.closeAll()
- d.init()
+ d.init(ctx)
d.serve()
- return <-exit
+ select {
+ case err := <-exit:
+ return err
+ case <-ctx.Done():
+ return nil
+ }
}
// Daemon orchestrates setting up the different daemon subsystems.
type Daemon struct {
- Log zerolog.Logger
- Daemon *net.UnixListener
- Runtime *net.TCPListener
- DBProxy *net.TCPListener
- Dash *net.TCPListener
- Version string
-
- Remote remote.RemoteClient
- Secret *secret.Manager
- RunMgr *run.Manager
- ClusterMgr *sqldb.ClusterManager
- Trace *trace.Store
- DashSrv *dash.Server
- Server *daemon.Server
+ Daemon *net.UnixListener
+ Runtime *retryingTCPListener
+ DBProxy *retryingTCPListener
+ Dash *retryingTCPListener
+ Debug *retryingTCPListener
+ ObjectStorage *retryingTCPListener
+ MCP *retryingTCPListener
+ EncoreDB *sql.DB
+
+ Apps *apps.Manager
+ Secret *secret.Manager
+ RunMgr *run.Manager
+ NS *namespace.Manager
+ ClusterMgr *sqldb.ClusterManager
+ ObjectsMgr *objects.ClusterManager
+ MCPMgr *mcp.Manager
+ PublicBuckets *objects.PublicBucketServer
+ Trace trace2.Store
+ Server *daemon.Server
+ dev bool // whether we're in development mode
// exit is a channel that shuts down the daemon when sent on.
// A nil error indicates graceful exit.
@@ -76,24 +124,66 @@ type Daemon struct {
close []io.Closer
}
-func (d *Daemon) init() {
+func (d *Daemon) init(ctx context.Context) {
d.Daemon = d.listenDaemonSocket()
- d.Runtime = d.listenTCP()
- d.DBProxy = d.listenTCP()
- d.Dash = d.listenTCP()
-
- d.Trace = trace.NewStore()
- d.ClusterMgr = sqldb.NewClusterManager()
- d.Remote = d.setupRemoteClient()
- d.Secret = secret.New(d.Remote)
+ d.Dash = d.listenTCPRetry("dashboard", env.EncoreDevDashListenAddr(), 9400)
+ d.DBProxy = d.listenTCPRetry("dbproxy", option.None[string](), 9500)
+ d.Runtime = d.listenTCPRetry("runtime", option.None[string](), 9600)
+ d.Debug = d.listenTCPRetry("debug", option.None[string](), 9700)
+ d.ObjectStorage = d.listenTCPRetry("objectstorage", env.EncoreObjectStorageListAddr(), 9800)
+ d.MCP = d.listenTCPRetry("mcp", env.EncoreMCPSSEListenAddr(), 9900)
+ d.EncoreDB = d.openDB()
+
+ d.Apps = apps.NewManager(d.EncoreDB)
+ d.close = append(d.close, d.Apps)
+
+ // If ENCORE_SQLDB_HOST is set, use the external cluster instead of
+ // creating our own docker container cluster.
+ var sqldbDriver sqldb.Driver = &docker.Driver{}
+ if host := os.Getenv("ENCORE_SQLDB_HOST"); host != "" {
+ sqldbDriver = &external.Driver{
+ Host: host,
+ Database: os.Getenv("ENCORE_SQLDB_DATABASE"),
+ SuperuserUsername: os.Getenv("ENCORE_SQLDB_USER"),
+ SuperuserPassword: os.Getenv("ENCORE_SQLDB_PASSWORD"),
+ }
+ log.Info().Msgf("using external postgres cluster: %s", host)
+ }
+
+ d.NS = namespace.NewManager(d.EncoreDB)
+ d.Secret = secret.New()
+ d.ClusterMgr = sqldb.NewClusterManager(sqldbDriver, d.Apps, d.NS, d.Secret)
+ d.ObjectsMgr = objects.NewClusterManager(d.NS)
+ d.PublicBuckets = objects.NewPublicBucketServer("http://"+d.ObjectStorage.ClientAddr(), d.ObjectsMgr.PersistentStoreFallback)
+
+ traceStore := sqlite.New(d.EncoreDB)
+ go traceStore.CleanEvery(ctx, 1*time.Minute, 500, 100, 10000)
+ d.Trace = traceStore
+
d.RunMgr = &run.Manager{
- RuntimePort: tcpPort(d.Runtime),
- DBProxyPort: tcpPort(d.DBProxy),
- DashPort: tcpPort(d.Dash),
- Secret: d.Secret,
+ RuntimePort: d.Runtime.Port(),
+ DBProxyPort: d.DBProxy.Port(),
+ DashBaseURL: fmt.Sprintf("http://%s", d.Dash.ClientAddr()),
+ Secret: d.Secret,
+ ClusterMgr: d.ClusterMgr,
+ ObjectsMgr: d.ObjectsMgr,
+ PublicBuckets: d.PublicBuckets,
}
- d.DashSrv = dash.NewServer(d.RunMgr, d.Trace)
- d.Server = daemon.New(d.Version, d.RunMgr, d.ClusterMgr, d.Secret, d.Remote)
+ d.MCPMgr = mcp.NewManager(
+ d.Apps,
+ d.ClusterMgr,
+ d.NS,
+ d.Trace,
+ d.RunMgr,
+ fmt.Sprintf("http://%s", d.MCP.ClientAddr()),
+ )
+
+ // Register namespace deletion handlers.
+ d.NS.RegisterDeletionHandler(d.ClusterMgr)
+ d.NS.RegisterDeletionHandler(d.RunMgr)
+ d.NS.RegisterDeletionHandler(d.ObjectsMgr)
+
+ d.Server = daemon.New(d.Apps, d.RunMgr, d.ClusterMgr, d.Secret, d.NS, d.MCPMgr)
}
func (d *Daemon) serve() {
@@ -101,6 +191,9 @@ func (d *Daemon) serve() {
go d.serveRuntime()
go d.serveDBProxy()
go d.serveDash()
+ go d.serveDebug()
+ go d.serveObjects()
+ go d.serveMCP()
}
// listenDaemonSocket listens on the encored.sock UNIX socket
@@ -117,7 +210,7 @@ func (d *Daemon) listenDaemonSocket() *net.UnixListener {
// If the daemon socket already exists, remove it so we can take over listening.
if _, err := xos.SocketStat(socketPath); err == nil {
- os.Remove(socketPath)
+ _ = os.Remove(socketPath)
}
ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
if err != nil {
@@ -132,34 +225,44 @@ func (d *Daemon) listenDaemonSocket() *net.UnixListener {
return ln
}
-// setupRemoteClient sets up a grpc client to Encore's backend service.
-func (d *Daemon) setupRemoteClient() remote.RemoteClient {
- ts := &conf.TokenSource{}
- dialOpts := []grpc.DialOption{
- grpc.WithTransportCredentials(credentials.NewTLS(nil)),
- grpc.WithPerRPCCredentials(oauth.TokenSource{TokenSource: ts}),
- grpc.WithKeepaliveParams(keepalive.ClientParameters{
- Time: 20 * time.Second,
- }),
- }
- conn, err := grpc.Dial("remote.encoreapis.com:443", dialOpts...)
+func failedPreconditionError(msg, typ, desc string) error {
+ st, err := status.New(codes.FailedPrecondition, msg).WithDetails(
+ &errdetails.PreconditionFailure{
+ Violations: []*errdetails.PreconditionFailure_Violation{
+ {
+ Type: typ,
+ Description: desc,
+ },
+ },
+ },
+ )
if err != nil {
- fatalf("failed to dial encore server: %v", err)
+ panic(err)
}
- d.closeOnExit(conn)
- return remote.NewRemoteClient(conn)
+ return st.Err()
+}
+
+func ErrInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
+ resp, err = handler(ctx, req)
+ if errors.Is(err, conf.ErrInvalidRefreshToken) {
+ return nil, failedPreconditionError("invalid refresh token", "INVALID_REFRESH_TOKEN", "invalid refresh token")
+ } else if errors.Is(err, conf.ErrNotLoggedIn) {
+ return nil, status.Error(codes.Unauthenticated, "not logged in")
+ }
+ return resp, err
}
func (d *Daemon) serveDaemon() {
log.Info().Stringer("addr", d.Daemon.Addr()).Msg("serving daemon")
- srv := grpc.NewServer()
+ srv := grpc.NewServer(grpc.UnaryInterceptor(ErrInterceptor))
daemonpb.RegisterDaemonServer(srv, d.Server)
d.exit <- srv.Serve(d.Daemon)
}
func (d *Daemon) serveRuntime() {
log.Info().Stringer("addr", d.Runtime.Addr()).Msg("serving runtime")
- srv := runtime.NewServer(d.RunMgr, d.Trace, d.Remote)
+ rec := trace2.NewRecorder(d.Trace)
+ srv := engine.NewServer(d.RunMgr, rec)
d.exit <- http.Serve(d.Runtime, srv)
}
@@ -168,24 +271,139 @@ func (d *Daemon) serveDBProxy() {
d.exit <- d.ClusterMgr.ServeProxy(d.DBProxy)
}
+func (d *Daemon) serveMCP() {
+ log.Info().Stringer("addr", d.MCP.Addr()).Msg("serving mcp")
+ d.exit <- d.MCPMgr.Serve(d.MCP)
+}
+
+func (d *Daemon) serveObjects() {
+ log.Info().Stringer("addr", d.ObjectStorage.Addr()).Msg("serving object storage")
+ d.exit <- d.PublicBuckets.Serve(d.ObjectStorage)
+}
+
func (d *Daemon) serveDash() {
log.Info().Stringer("addr", d.Dash.Addr()).Msg("serving dash")
- srv := dash.NewServer(d.RunMgr, d.Trace)
+ srv := dash.NewServer(d.Apps, d.RunMgr, d.NS, d.Trace, d.Dash.Port())
d.exit <- http.Serve(d.Dash, srv)
}
-// listenTCP listens for TCP connections on a random port on localhost.
-func (d *Daemon) listenTCP() *net.TCPListener {
- ln, err := net.Listen("tcp", "127.0.0.1:0")
+func (d *Daemon) serveDebug() {
+ log.Info().Stringer("addr", d.Debug.Addr()).Msg("serving debug")
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/debug/pprof/", pprof.Index)
+ mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
+ mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
+ mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+ mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
+ d.exit <- http.Serve(d.Debug, mux)
+}
+
+// listenTCPRetry listens for TCP connections on the given port, retrying
+// in the background if it's already in use.
+func (d *Daemon) listenTCPRetry(component string, addrOverride option.Option[string], defaultPort uint16) *retryingTCPListener {
+ addr, err := parseInterface(addrOverride.GetOrElse("127.0.0.1:0"))
if err != nil {
- fatal(err)
+ log.Fatal().Str("component", component).Err(err).Msg("failed to parse interface")
+ }
+ if addr.Port() == 0 {
+ addr = netip.AddrPortFrom(addr.Addr(), defaultPort)
}
+ ln := listenLocalhostTCP(component, addr)
d.closeOnExit(ln)
- return ln.(*net.TCPListener)
+ return ln
+}
+
+func (d *Daemon) openDB() *sql.DB {
+ dir, err := conf.Dir()
+ if err != nil {
+ fatal(err)
+ } else if err := os.MkdirAll(dir, 0755); err != nil {
+ fatal(err)
+ }
+
+ dbPath := filepath.Join(dir, "encore.db")
+
+ // Create the database file if it doesn't exist, as
+ // we've observed some failures to open the database file when it doesn't already exist.
+ if _, err := os.Stat(dbPath); os.IsNotExist(err) {
+ if f, err := os.OpenFile(dbPath, os.O_CREATE|os.O_WRONLY, 0600); err == nil {
+ _ = f.Close()
+ }
+ }
+
+ db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?cache=shared&_journal=wal&_txlock=immediate", dbPath))
+ if err != nil {
+ fatal(err)
+ }
+
+ // Initialize db schema
+ if err := d.runDBMigrations(db); err != nil {
+ fatalf("unable to migrate management database: %v", err)
+ }
+ d.closeOnExit(db)
+
+ return db
}
-func tcpPort(ln net.Listener) int {
- return ln.Addr().(*net.TCPAddr).Port
+//go:embed migrations
+var dbMigrations embed.FS
+
+func (d *Daemon) runDBMigrations(db *sql.DB) error {
+ {
+ // Convert old-style schema definition to golang-migrate, if necessary.
+ var isLegacy bool
+ err := db.QueryRow(`
+ SELECT COUNT(*) > 0 FROM pragma_table_info('schema_migrations') WHERE name = 'dummy'
+ `).Scan(&isLegacy)
+ if err != nil {
+ return err
+ } else if isLegacy {
+ _, _ = db.Exec("DROP TABLE schema_migrations;")
+ }
+ }
+
+ src, err := iofs.New(dbMigrations, "migrations")
+ if err != nil {
+ return fmt.Errorf("read db migrations: %v", err)
+ }
+ instance, err := sqlite3.WithInstance(db, &sqlite3.Config{})
+ if err != nil {
+ return fmt.Errorf("initialize migration instance: %v", err)
+ }
+ m, err := migrate.NewWithInstance("iofs", src, "encore", instance)
+ if err != nil {
+ return fmt.Errorf("setup migrate instance: %v", err)
+ }
+
+ err = m.Up()
+ if errors.Is(err, migrate.ErrNoChange) {
+ return nil
+ }
+
+ // If we have a dirty migration, reset the dirty flag and try again.
+ // This is safe since all migrations run inside transactions.
+ var dirty migrate.ErrDirty
+ if errors.As(err, &dirty) {
+ // Find the version that preceded the dirty version so
+ // we can force the migration to that version and then
+ // re-apply the migration.
+ var prevVer uint
+ prevVer, err = src.Prev(uint(dirty.Version))
+ targetVer := int(prevVer)
+ if errors.Is(err, fs.ErrNotExist) {
+ // No previous migration exists
+ targetVer = database.NilVersion
+ } else if err != nil {
+ return errors.Wrap(err, "failed to find previous version")
+ }
+
+ if err = m.Force(targetVer); err == nil {
+ err = m.Up()
+ }
+ }
+
+ return err
}
// detectSocketClose polls for the unix socket at socketPath to be removed
@@ -205,7 +423,7 @@ func detectSocketClose(ln *net.UnixListener, socketPath string) error {
for {
time.Sleep(200 * time.Millisecond)
fi, err := xos.SocketStat(socketPath)
- if os.IsNotExist(err) {
+ if errors.Is(err, fs.ErrNotExist) {
// Socket was removed; don't remove it again
return nil
} else if err != nil {
@@ -228,7 +446,7 @@ func (d *Daemon) closeOnExit(c io.Closer) {
func (d *Daemon) closeAll() {
for _, c := range d.close {
- c.Close()
+ _ = c.Close()
}
}
@@ -253,3 +471,199 @@ func handleBailout(err *error) {
}
}
}
+
+// redirectLogOutput redirects the global logger to also write to a file.
+func redirectLogOutput() error {
+ logPath := env.EncoreDaemonLogPath()
+ if err := os.MkdirAll(filepath.Dir(logPath), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ return err
+ }
+ log.Info().Msgf("writing output to %s", logPath)
+
+ zerolog.TimeFieldFormat = time.RFC3339Nano
+ consoleWriter := zerolog.ConsoleWriter{
+ Out: os.Stderr,
+ FieldsExclude: []string{zerolog.ErrorStackFieldName},
+ }
+ consoleWriter.FormatExtra = eerror.ZeroLogConsoleExtraFormatter
+ consoleWriter.TimeFormat = time.TimeOnly
+ zerolog.ErrorStackMarshaler = eerror.ZeroLogStackMarshaller
+ log.Logger = log.With().Caller().Stack().Logger().Output(io.MultiWriter(consoleWriter, f))
+ return nil
+}
+
+// retryingTCPListener is a TCP listener that attempts multiple times
+// to listen on a given port. It is designed to handle race conditions
+// between multiple daemon processes handing off to each other
+// and the port still being in use momentarily.
+type retryingTCPListener struct {
+ component string
+ addr netip.AddrPort
+ ctx context.Context
+ cancel func() // call to cancel ctx
+
+ // doneListening is closed when the underlying listener is open,
+ // or it gave up due to an error.
+ doneListening chan struct{}
+ underlying net.Listener
+ listenErr error
+}
+
+func listenLocalhostTCP(component string, addr netip.AddrPort) *retryingTCPListener {
+ ctx, cancel := context.WithCancel(context.Background())
+ ln := &retryingTCPListener{
+ component: component,
+ addr: addr,
+ ctx: ctx,
+ cancel: cancel,
+ doneListening: make(chan struct{}),
+ }
+ go ln.listen()
+ return ln
+}
+
+func (ln *retryingTCPListener) Accept() (net.Conn, error) {
+ select {
+ case <-ln.ctx.Done():
+ return nil, net.ErrClosed
+ case <-ln.doneListening:
+ if ln.listenErr != nil {
+ return nil, ln.listenErr
+ }
+ return ln.underlying.Accept()
+ }
+}
+
+func (ln *retryingTCPListener) Close() error {
+ ln.cancel()
+ select {
+ case <-ln.doneListening:
+ if ln.listenErr == nil {
+ return ln.underlying.Close()
+ }
+ default:
+ }
+ return nil
+}
+
+func (ln *retryingTCPListener) Addr() net.Addr {
+ return &net.TCPAddr{IP: net.IP(ln.addr.Addr().AsSlice()), Port: int(ln.addr.Port())}
+}
+
+func (ln *retryingTCPListener) ClientAddr() string {
+ // If our addr is 0.0.0.0 or the ipv6 equivalent, return 127.0.0.1 instead
+ // so that clients can connect to us.
+ if ln.addr.Addr().IsUnspecified() {
+ if ln.addr.Addr().Is6() {
+ return fmt.Sprintf("[::1]:%d", ln.addr.Port())
+ }
+ return fmt.Sprintf("127.0.0.1:%d", ln.addr.Port())
+ }
+ return ln.addr.String()
+}
+
+func (ln *retryingTCPListener) Port() int {
+ return int(ln.addr.Port())
+}
+
+func (ln *retryingTCPListener) listen() {
+ defer close(ln.doneListening)
+
+ logger := log.With().Str("component", ln.component).Int("port", ln.Port()).Logger()
+ addr := ln.addr.String()
+
+ b := backoff.NewExponentialBackOff()
+ b.InitialInterval = 50 * time.Millisecond
+ b.MaxInterval = 500 * time.Millisecond
+ b.MaxElapsedTime = 5 * time.Second
+
+ ln.listenErr = backoff.Retry(func() (err error) {
+ if err := ln.ctx.Err(); err != nil {
+ return backoff.Permanent(err)
+ }
+ ln.underlying, err = net.Listen("tcp", addr)
+ if err != nil {
+ logger.Error().Err(ln.listenErr).Msg("unable to listen, retrying")
+ }
+ return err
+ }, b)
+
+ if ln.listenErr != nil {
+ logger.Error().Err(ln.listenErr).Msg("unable to listen, giving up")
+ } else {
+ logger.Info().Msg("listening on port")
+ }
+}
+
+func parseInterface(s string) (netip.AddrPort, error) {
+ addr, portStr, _, err := splitAddrPort(s)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+
+ port, err := strconv.ParseUint(portStr, 10, 16)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+
+ // Is addr a valid ip? If so we're done.
+ if ip, err := netip.ParseAddr(addr); err == nil {
+ return netip.AddrPortFrom(ip, uint16(port)), nil
+ }
+
+ // Otherwise perform name resolution.
+ ips, err := net.LookupIP(addr)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ if len(ips) == 0 {
+ return netip.AddrPort{}, fmt.Errorf("no IP addresses found for %s", addr)
+ }
+
+ // Prefer IPv4 addresses.
+ for _, ip := range ips {
+ if ip.To4() != nil {
+ if addr, err := netip.ParseAddr(ip.String()); err == nil {
+ return netip.AddrPortFrom(addr, uint16(port)), nil
+ }
+ }
+ }
+
+ if addr, err := netip.ParseAddr(ips[0].String()); err == nil {
+ return netip.AddrPortFrom(addr, uint16(port)), nil
+ }
+ return netip.AddrPort{}, fmt.Errorf("unable to parse IP address %s", addr)
+}
+
+// splitAddrPort splits s into an IP address string and a port
+// string. It splits strings shaped like "foo:bar" or "[foo]:bar",
+// without further validating the substrings. v6 indicates whether the
+// ip string should parse as an IPv6 address or an IPv4 address, in
+// order for s to be a valid ip:port string.
+func splitAddrPort(s string) (ip, port string, v6 bool, err error) {
+ i := strings.LastIndexByte(s, ':')
+ if i == -1 {
+ return "", "", false, errors.New("not an ip:port")
+ }
+
+ ip, port = s[:i], s[i+1:]
+ if len(ip) == 0 {
+ return "", "", false, errors.New("no IP")
+ }
+ if len(port) == 0 {
+ return "", "", false, errors.New("no port")
+ }
+ if ip[0] == '[' {
+ if len(ip) < 2 || ip[len(ip)-1] != ']' {
+ return "", "", false, errors.New("missing ]")
+ }
+ ip = ip[1 : len(ip)-1]
+ v6 = true
+ }
+
+ return ip, port, v6, nil
+}
diff --git a/cli/cmd/encore/daemon/migrations/1_initial_schema.up.sql b/cli/cmd/encore/daemon/migrations/1_initial_schema.up.sql
new file mode 100644
index 0000000000..d28954ba14
--- /dev/null
+++ b/cli/cmd/encore/daemon/migrations/1_initial_schema.up.sql
@@ -0,0 +1,40 @@
+CREATE TABLE IF NOT EXISTS app (
+ root TEXT PRIMARY KEY,
+ local_id TEXT NOT NULL,
+ platform_id TEXT NULL, -- NULL if not linked
+ updated_at TEXT NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS trace_event (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ app_id TEXT NOT NULL, -- platform_id or local_id
+ trace_id TEXT NOT NULL,
+ span_id TEXT NOT NULL,
+ event_data TEXT NOT NULL -- json
+);
+
+CREATE INDEX IF NOT EXISTS trace_event_span_key ON trace_event (trace_id, span_id);
+
+CREATE TABLE IF NOT EXISTS trace_span_index (
+ trace_id TEXT NOT NULL,
+ span_id TEXT NOT NULL,
+ app_id TEXT NOT NULL, -- platform_id or local_id
+ span_type INTEGER NOT NULL, -- enum
+
+ -- request fields
+ started_at INTEGER NULL, -- unix nanosecond
+ is_root BOOLEAN NULL,
+ service_name TEXT NULL,
+ endpoint_name TEXT NULL,
+ topic_name TEXT NULL,
+ subscription_name TEXT NULL,
+ message_id TEXT NULL,
+ external_request_id TEXT NULL,
+
+ -- response fields
+ has_response BOOLEAN NOT NULL,
+ is_error BOOLEAN NULL,
+ duration_nanos INTEGER NULL,
+ user_id TEXT NULL,
+ PRIMARY KEY (trace_id, span_id)
+);
diff --git a/cli/cmd/encore/daemon/migrations/2_infra_namespaces.up.sql b/cli/cmd/encore/daemon/migrations/2_infra_namespaces.up.sql
new file mode 100644
index 0000000000..dcdbb05359
--- /dev/null
+++ b/cli/cmd/encore/daemon/migrations/2_infra_namespaces.up.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS namespace (
+ id TEXT PRIMARY KEY, -- uuid
+ app_id TEXT NOT NULL, -- platform_id or local_id
+ name TEXT NOT NULL,
+ active BOOL NOT NULL DEFAULT FALSE,
+ created_at TIMESTAMP NOT NULL,
+ last_active_at TIMESTAMP NULL,
+ UNIQUE (app_id, name)
+);
+
+-- Ensure there's a single active namespace per app.
+CREATE UNIQUE INDEX active_namespace ON namespace (app_id) WHERE active = true;
diff --git a/cli/cmd/encore/daemon/migrations/3_test_tracing.up.sql b/cli/cmd/encore/daemon/migrations/3_test_tracing.up.sql
new file mode 100644
index 0000000000..0ec669f547
--- /dev/null
+++ b/cli/cmd/encore/daemon/migrations/3_test_tracing.up.sql
@@ -0,0 +1,3 @@
+ALTER TABLE trace_span_index ADD COLUMN test_skipped BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE trace_span_index ADD COLUMN src_file TEXT NULL;
+ALTER TABLE trace_span_index ADD COLUMN src_line INTEGER NULL;
diff --git a/cli/cmd/encore/db.go b/cli/cmd/encore/db.go
index 893fbc5e1f..bcb9b8d137 100644
--- a/cli/cmd/encore/db.go
+++ b/cli/cmd/encore/db.go
@@ -10,9 +10,14 @@ import (
"runtime"
"strings"
- daemonpb "encr.dev/proto/encore/daemon"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/daemon/sqldb/docker"
+ daemonpb "encr.dev/proto/encore/daemon"
)
var dbCmd = &cobra.Command{
@@ -20,53 +25,88 @@ var dbCmd = &cobra.Command{
Short: "Database management commands",
}
-var resetAll bool
+var (
+ resetAll bool
+ testDB bool
+ shadowDB bool
+ write bool
+ admin bool
+ superuser bool
+ nsName string
+)
+
+func getDBRole() daemonpb.DBRole {
+ switch {
+ case superuser:
+ return daemonpb.DBRole_DB_ROLE_SUPERUSER
+ case admin:
+ return daemonpb.DBRole_DB_ROLE_ADMIN
+ case write:
+ return daemonpb.DBRole_DB_ROLE_WRITE
+ default:
+ return daemonpb.DBRole_DB_ROLE_READ
+ }
+}
var dbResetCmd = &cobra.Command{
- Use: "reset [servicenames...]",
- Short: "Resets the databases for the given services, or the current directory if unspecified",
+ Use: "reset ",
+ Short: "Resets the databases with the given names. Use --all to reset all databases.",
Run: func(command *cobra.Command, args []string) {
- appRoot, relPath := determineAppRoot()
- svcNames := args
- if resetAll && len(svcNames) > 0 {
- fatal("cannot specify both --all and service names")
- }
- if !resetAll && len(svcNames) == 0 {
- pkgs, err := resolvePackages(filepath.Join(appRoot, relPath), ".")
- if err != nil {
- log.Fatal().Err(err).Msg("could not resolve packages")
+ appRoot, _ := determineAppRoot()
+ dbNames := args
+ if resetAll {
+ if len(dbNames) > 0 {
+ fatal("cannot specify both --all and database names")
+ }
+ dbNames = nil
+ } else {
+ if len(dbNames) == 0 {
+ fatal("no database names given")
}
- svcNames = []string{filepath.Base(pkgs[0])}
}
ctx := context.Background()
daemon := setupDaemon(ctx)
stream, err := daemon.DBReset(ctx, &daemonpb.DBResetRequest{
- AppRoot: appRoot,
- Services: svcNames,
+ AppRoot: appRoot,
+ DatabaseNames: dbNames,
+ ClusterType: dbClusterType(),
+ Namespace: nonZeroPtr(nsName),
})
if err != nil {
fatal("reset databases: ", err)
}
- streamCommandOutput(stream)
+ os.Exit(cmdutil.StreamCommandOutput(stream, nil))
},
}
var dbEnv string
var dbShellCmd = &cobra.Command{
- Use: "shell [service-name]",
+ Use: "shell DATABASE_NAME [--env=] [--test|--shadow]",
Short: "Connects to the database via psql shell",
- Args: cobra.MaximumNArgs(1),
+ Long: `Defaults to connecting to your local environment.
+Specify --env to connect to another environment.
+
+Use --test to connect to databases used for integration testing.
+Use --shadow to connect to the shadow database, used for database drift detection
+when using tools like Prisma.
+--test and --shadow imply --env=local.
+`,
+ Args: cobra.MaximumNArgs(1),
+
+ DisableFlagsInUseLine: true,
Run: func(command *cobra.Command, args []string) {
appRoot, relPath := determineAppRoot()
ctx := context.Background()
daemon := setupDaemon(ctx)
- svcName := ""
+ dbName := ""
if len(args) > 0 {
- svcName = args[0]
+ dbName = args[0]
+ // Ignore the trailing slash to support auto-completion of directory names
+ dbName = strings.TrimSuffix(dbName, "/")
} else {
// Find the enclosing service by looking for the "migrations" folder
SvcNameLoop:
@@ -75,24 +115,31 @@ var dbShellCmd = &cobra.Command{
if _, err := os.Stat(filepath.Join(absPath, "migrations")); err == nil {
pkgs, err := resolvePackages(absPath, ".")
if err == nil && len(pkgs) > 0 {
- svcName = filepath.Base(pkgs[0])
+ dbName = filepath.Base(pkgs[0])
break SvcNameLoop
}
}
}
- if svcName == "" {
+ if dbName == "" {
fatal("could not find an Encore service with a database in this directory (or any of the parent directories).\n\n" +
- "Note: You can specify a service name to connect to it directly using the command 'encore db shell '.")
+ "Note: You can specify a service name to connect to it directly using the command 'encore db shell '.")
}
}
+ if testDB || shadowDB {
+ dbEnv = "local"
+ }
+
resp, err := daemon.DBConnect(ctx, &daemonpb.DBConnectRequest{
- AppRoot: appRoot,
- SvcName: svcName,
- EnvName: dbEnv,
+ AppRoot: appRoot,
+ DbName: dbName,
+ EnvName: dbEnv,
+ ClusterType: dbClusterType(),
+ Namespace: nonZeroPtr(nsName),
+ Role: getDBRole(),
})
if err != nil {
- fatalf("could not connect to the database for service %s: %v", svcName, err)
+ fatalf("could not connect to the database for service %s: %v", dbName, err)
}
// If we have the psql binary, use that.
@@ -104,15 +151,15 @@ var dbShellCmd = &cobra.Command{
fmt.Fprintln(os.Stderr, "encore: no 'psql' executable found in $PATH; using docker to run 'psql' instead.\n\nNote: install psql to hide this message.")
dsn := resp.Dsn
- if runtime.GOOS == "darwin" {
- // Docker for Mac's networking setup requires
+ if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
+ // Docker for {Mac, Windows}'s networking setup requires
// using "host.docker.internal" instead of "localhost"
for _, rep := range []string{"localhost", "127.0.0.1"} {
dsn = strings.Replace(dsn, rep, "host.docker.internal", -1)
}
}
- cmd = exec.Command("docker", "run", "-it", "--rm", "--network=host", "postgres", "psql", dsn)
+ cmd = exec.Command("docker", "run", "-it", "--rm", "--network=host", docker.Image, "psql", dsn)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -126,13 +173,27 @@ var dbShellCmd = &cobra.Command{
log.Fatal().Err(err).Msg("psql failed")
}
},
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) > 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return nil, cobra.ShellCompDirectiveFilterDirs
+ },
}
var dbProxyPort int32
var dbProxyCmd = &cobra.Command{
- Use: "proxy [--env=]",
+ Use: "proxy [--env=] [--test|--shadow]",
Short: "Sets up a proxy tunnel to the database",
+ Long: `Set up a proxy tunnel to a database for use with other tools.
+
+Use --test to connect to databases used for integration testing.
+Use --shadow to connect to the shadow database, used for database drift detection
+when using tools like Prisma.
+
+--test and --shadow imply --env=local.
+`,
Run: func(command *cobra.Command, args []string) {
appRoot, _ := determineAppRoot()
@@ -145,76 +206,142 @@ var dbProxyCmd = &cobra.Command{
cancel()
}()
+ if testDB || shadowDB {
+ dbEnv = "local"
+ }
+
daemon := setupDaemon(ctx)
stream, err := daemon.DBProxy(ctx, &daemonpb.DBProxyRequest{
- AppRoot: appRoot,
- EnvName: dbEnv,
- Port: dbProxyPort,
+ AppRoot: appRoot,
+ EnvName: dbEnv,
+ Port: dbProxyPort,
+ ClusterType: dbClusterType(),
+ Namespace: nonZeroPtr(nsName),
+ Role: getDBRole(),
})
if err != nil {
log.Fatal().Err(err).Msg("could not setup db proxy")
}
- streamCommandOutput(stream)
+ os.Exit(cmdutil.StreamCommandOutput(stream, nil))
},
}
var dbConnURICmd = &cobra.Command{
- Use: "conn-uri [servicename]",
+ Use: "conn-uri [] [--test|--shadow]",
Short: "Outputs the database connection string",
- Args: cobra.MaximumNArgs(1),
+ Long: `Retrieve a stable connection uri for connecting to a database.
+
+Use --test to connect to databases used for integration testing.
+Use --shadow to connect to the shadow database, used for database drift detection
+when using tools like Prisma.
+
+--test and --shadow imply --env=local.
+`,
+ Args: cobra.MaximumNArgs(1),
Run: func(command *cobra.Command, args []string) {
appRoot, relPath := determineAppRoot()
ctx := context.Background()
daemon := setupDaemon(ctx)
- svcName := ""
+ dbName := ""
if len(args) > 0 {
- svcName = args[0]
+ dbName = args[0]
} else {
// Find the enclosing service by looking for the "migrations" folder
- SvcNameLoop:
+ DBNameLoop:
for p := relPath; p != "."; p = filepath.Dir(p) {
absPath := filepath.Join(appRoot, p)
if _, err := os.Stat(filepath.Join(absPath, "migrations")); err == nil {
pkgs, err := resolvePackages(absPath, ".")
if err == nil && len(pkgs) > 0 {
- svcName = filepath.Base(pkgs[0])
- break SvcNameLoop
+ dbName = filepath.Base(pkgs[0])
+ break DBNameLoop
}
}
}
- if svcName == "" {
+ if dbName == "" {
fatal("could not find Encore service with a database in this directory (or any parent directory).\n\n" +
"Note: You can specify a service name to connect to it directly using the command 'encore db conn-uri '.")
}
}
+ if testDB || shadowDB {
+ dbEnv = "local"
+ }
+
resp, err := daemon.DBConnect(ctx, &daemonpb.DBConnectRequest{
- AppRoot: appRoot,
- SvcName: svcName,
- EnvName: dbEnv,
+ AppRoot: appRoot,
+ DbName: dbName,
+ EnvName: dbEnv,
+ ClusterType: dbClusterType(),
+ Namespace: nonZeroPtr(nsName),
+ Role: getDBRole(),
})
if err != nil {
- fatalf("could not connect to the database for service %s: %v", svcName, err)
+ st, ok := status.FromError(err)
+ if ok {
+ if st.Code() == codes.NotFound {
+ fatalf("no such database found: %s", dbName)
+ }
+ }
+ fatalf("could not connect to the database for service %s: %v", dbName, err)
}
- fmt.Fprintln(os.Stdout, resp.Dsn)
+ _, _ = fmt.Fprintln(os.Stdout, resp.Dsn)
},
}
func init() {
rootCmd.AddCommand(dbCmd)
+ dbResetCmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
dbResetCmd.Flags().BoolVar(&resetAll, "all", false, "Reset all services in the application")
+ dbResetCmd.Flags().BoolVarP(&testDB, "test", "t", false, "Reset databases in the test cluster instead")
+ dbResetCmd.Flags().BoolVar(&shadowDB, "shadow", false, "Reset databases in the shadow cluster instead")
dbCmd.AddCommand(dbResetCmd)
+ dbShellCmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
dbShellCmd.Flags().StringVarP(&dbEnv, "env", "e", "local", "Environment name to connect to (such as \"prod\")")
+ dbShellCmd.Flags().BoolVarP(&testDB, "test", "t", false, "Connect to the integration test database (implies --env=local)")
+ dbShellCmd.Flags().BoolVar(&shadowDB, "shadow", false, "Connect to the shadow database (implies --env=local)")
+ dbShellCmd.Flags().BoolVar(&write, "write", false, "Connect with write privileges")
+ dbShellCmd.Flags().BoolVar(&admin, "admin", false, "Connect with admin privileges")
+ dbShellCmd.Flags().BoolVar(&superuser, "superuser", false, "Connect as a superuser")
+ dbShellCmd.MarkFlagsMutuallyExclusive("write", "admin", "superuser")
dbCmd.AddCommand(dbShellCmd)
+ dbProxyCmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
dbProxyCmd.Flags().StringVarP(&dbEnv, "env", "e", "local", "Environment name to connect to (such as \"prod\")")
dbProxyCmd.Flags().Int32VarP(&dbProxyPort, "port", "p", 0, "Port to listen on (defaults to a random port)")
+ dbProxyCmd.Flags().BoolVarP(&testDB, "test", "t", false, "Connect to the integration test database (implies --env=local)")
+ dbProxyCmd.Flags().BoolVar(&shadowDB, "shadow", false, "Connect to the shadow database (implies --env=local)")
+ dbProxyCmd.Flags().BoolVar(&write, "write", false, "Connect with write privileges")
+ dbProxyCmd.Flags().BoolVar(&admin, "admin", false, "Connect with admin privileges")
+ dbProxyCmd.Flags().BoolVar(&superuser, "superuser", false, "Connect as a superuser")
+ dbProxyCmd.MarkFlagsMutuallyExclusive("write", "admin", "superuser")
dbCmd.AddCommand(dbProxyCmd)
+ dbConnURICmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
dbConnURICmd.Flags().StringVarP(&dbEnv, "env", "e", "local", "Environment name to connect to (such as \"prod\")")
+ dbConnURICmd.Flags().BoolVarP(&testDB, "test", "t", false, "Connect to the integration test database (implies --env=local)")
+ dbConnURICmd.Flags().BoolVar(&shadowDB, "shadow", false, "Connect to the shadow database (implies --env=local)")
+ dbConnURICmd.Flags().BoolVar(&write, "write", false, "Connect with write privileges")
+ dbConnURICmd.Flags().BoolVar(&admin, "admin", false, "Connect with admin privileges")
+ dbConnURICmd.Flags().BoolVar(&superuser, "superuser", false, "Connect as a superuser")
+ dbConnURICmd.MarkFlagsMutuallyExclusive("write", "admin", "superuser")
dbCmd.AddCommand(dbConnURICmd)
}
+
+func dbClusterType() daemonpb.DBClusterType {
+ if testDB && shadowDB {
+ fatal("cannot specify both --test and --shadow")
+ }
+ switch {
+ case testDB:
+ return daemonpb.DBClusterType_DB_CLUSTER_TYPE_TEST
+ case shadowDB:
+ return daemonpb.DBClusterType_DB_CLUSTER_TYPE_SHADOW
+ default:
+ return daemonpb.DBClusterType_DB_CLUSTER_TYPE_RUN
+ }
+}
diff --git a/cli/cmd/encore/debug.go b/cli/cmd/encore/debug.go
new file mode 100644
index 0000000000..5393e043d8
--- /dev/null
+++ b/cli/cmd/encore/debug.go
@@ -0,0 +1,83 @@
+package main
+
+import (
+ "context"
+ "os"
+ "os/signal"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+func init() {
+ debugCmd := &cobra.Command{
+ Use: "debug",
+ Short: "debug is a collection of debug commands",
+ Hidden: true,
+ }
+
+ format := cmdutil.Oneof{
+ Value: "proto",
+ Allowed: []string{"proto", "json"},
+ Flag: "format",
+ FlagShort: "f",
+ Desc: "Output format",
+ }
+
+ toFormat := func() daemonpb.DumpMetaRequest_Format {
+ switch format.Value {
+ case "proto":
+ return daemonpb.DumpMetaRequest_FORMAT_PROTO
+ case "json":
+ return daemonpb.DumpMetaRequest_FORMAT_JSON
+ default:
+ return daemonpb.DumpMetaRequest_FORMAT_UNSPECIFIED
+ }
+ }
+
+ var p dumpMetaParams
+ dumpMeta := &cobra.Command{
+ Use: "meta",
+ Short: "Outputs the parsed metadata",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ p.AppRoot, p.WorkingDir = determineAppRoot()
+ p.Environ = os.Environ()
+ p.Format = toFormat()
+ dumpMeta(p)
+ },
+ }
+
+ format.AddFlag(dumpMeta)
+ dumpMeta.Flags().BoolVar(&p.ParseTests, "tests", false, "Parse tests as well")
+ rootCmd.AddCommand(debugCmd)
+ debugCmd.AddCommand(dumpMeta)
+}
+
+type dumpMetaParams struct {
+ AppRoot string
+ WorkingDir string
+ ParseTests bool
+ Format daemonpb.DumpMetaRequest_Format
+ Environ []string
+}
+
+func dumpMeta(p dumpMetaParams) {
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+
+ daemon := setupDaemon(ctx)
+ resp, err := daemon.DumpMeta(ctx, &daemonpb.DumpMetaRequest{
+ AppRoot: p.AppRoot,
+ WorkingDir: p.WorkingDir,
+ ParseTests: p.ParseTests,
+ Environ: p.Environ,
+ Format: p.Format,
+ })
+ if err != nil {
+ fatal(err)
+ }
+ _, _ = os.Stdout.Write(resp.Meta)
+}
diff --git a/cli/cmd/encore/deploy.go b/cli/cmd/encore/deploy.go
new file mode 100644
index 0000000000..02db03158a
--- /dev/null
+++ b/cli/cmd/encore/deploy.go
@@ -0,0 +1,104 @@
+package main
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/cockroachdb/errors"
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/pkg/appfile"
+)
+
+var (
+ appSlug string
+ envName string
+ commit string
+ branch string
+ format = cmdutil.Oneof{
+ Value: "text",
+ Allowed: []string{"text", "json"},
+ Flag: "format",
+ FlagShort: "f",
+ Desc: "Output format",
+ }
+)
+
+var deployAppCmd = &cobra.Command{
+ Use: "deploy --commit COMMIT_SHA | --branch BRANCH_NAME",
+ Short: "Deploy an Encore app to a cloud environment",
+ DisableFlagsInUseLine: true,
+ Run: func(c *cobra.Command, args []string) {
+ if commit != "" {
+ hb, err := hex.DecodeString(commit)
+ if err != nil || len(hb) != 20 {
+ cmdutil.Fatalf("invalid commit: %s", commit)
+ }
+ }
+ if appSlug == "" {
+ appRoot, _, err := cmdutil.MaybeAppRoot()
+ if err != nil {
+ cmdutil.Fatalf("no app found. Run deploy inside an encore app directory or specify the app with --app")
+ }
+ appSlug, err = appfile.Slug(appRoot)
+ if err != nil {
+ cmdutil.Fatalf("no app found. Run deploy inside an encore app directory or specify the app with --app")
+ }
+ }
+ rollout, err := platform.Deploy(c.Context(), appSlug, envName, commit, branch)
+ var pErr platform.Error
+ if ok := errors.As(err, &pErr); ok {
+ switch pErr.Code {
+ case "app_not_found":
+ cmdutil.Fatalf("app not found: %s", appSlug)
+ case "validation":
+ var details platform.ValidationDetails
+ err := json.Unmarshal(pErr.Detail, &details)
+ if err != nil {
+ cmdutil.Fatalf("failed to deploy: %v", err)
+ }
+ switch details.Field {
+ case "commit":
+ cmdutil.Fatalf("could not find commit: %s. Is it pushed to the remote repository?", commit)
+ case "branch":
+ cmdutil.Fatalf("could not find branch: %s. Is it pushed to the remote repository?", branch)
+ case "env":
+ cmdutil.Fatalf("could not find environment: %s/%s", appSlug, envName)
+ }
+ }
+ }
+ if err != nil {
+ cmdutil.Fatalf("failed to deploy: %v", err)
+ }
+ url := fmt.Sprintf("https://app.encore.cloud/%s/deploys/%s/%s", appSlug, rollout.EnvName, strings.TrimPrefix(rollout.ID, "roll_"))
+ switch format.Value {
+ case "text":
+ fmt.Println(aurora.Sprintf("\n%s %s\n", aurora.Bold("Started Deploy:"), url))
+ case "json":
+ output, _ := json.Marshal(map[string]string{
+ "id": strings.TrimPrefix(rollout.ID, "roll_"),
+ "env": rollout.EnvName,
+ "app": appSlug,
+ "url": url,
+ })
+ fmt.Println(string(output))
+ }
+ },
+}
+
+func init() {
+ alphaCmd.AddCommand(deployAppCmd)
+ deployAppCmd.Flags().StringVar(&appSlug, "app", "", "app slug to deploy to (default current app)")
+ deployAppCmd.Flags().StringVarP(&envName, "env", "e", "", "environment to deploy to (default primary env)")
+ deployAppCmd.Flags().StringVar(&commit, "commit", "", "commit to deploy")
+ deployAppCmd.Flags().StringVar(&branch, "branch", "", "branch to deploy")
+ format.AddFlag(deployAppCmd)
+ _ = deployAppCmd.MarkFlagRequired("env")
+ deployAppCmd.MarkFlagsMutuallyExclusive("commit", "branch")
+ deployAppCmd.MarkFlagsOneRequired("commit", "branch")
+}
diff --git a/cli/cmd/encore/exec.go b/cli/cmd/encore/exec.go
new file mode 100644
index 0000000000..6ecaca198c
--- /dev/null
+++ b/cli/cmd/encore/exec.go
@@ -0,0 +1,82 @@
+package main
+
+import (
+ "context"
+ "os"
+ "os/signal"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var execCmd = &cobra.Command{
+ Use: "exec path/to/script [args...]",
+ Short: "Runs executable scripts against the local Encore app",
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) == 0 {
+ args = []string{"."} // current directory
+ }
+ appRoot, wd := determineAppRoot()
+ execScript(appRoot, wd, args)
+ },
+}
+var execCmdAlpha = &cobra.Command{
+ Use: "exec path/to/script [args...]",
+ Short: "Runs executable scripts against the local Encore app",
+ Hidden: true,
+ Deprecated: "use \"encore exec\" instead",
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) == 0 {
+ args = []string{"."} // current directory
+ }
+ appRoot, wd := determineAppRoot()
+ execScript(appRoot, wd, args)
+ },
+}
+
+func execScript(appRoot, relWD string, args []string) {
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-interrupt
+ cancel()
+ }()
+
+ daemon := setupDaemon(ctx)
+ stream, err := daemon.ExecScript(ctx, &daemonpb.ExecScriptRequest{
+ AppRoot: appRoot,
+ WorkingDir: relWD,
+ ScriptArgs: args,
+ Environ: os.Environ(),
+ TraceFile: root.TraceFile,
+ Namespace: nonZeroPtr(nsName),
+ })
+ if err != nil {
+ fatal(err)
+ }
+
+ cmdutil.ClearTerminalExceptFirstNLines(1)
+ code := cmdutil.StreamCommandOutput(stream, cmdutil.ConvertJSONLogs())
+ os.Exit(code)
+}
+
+var alphaCmd = &cobra.Command{
+ Use: "alpha",
+ Short: "Pre-release functionality in alpha stage",
+ Hidden: true,
+}
+
+func init() {
+ rootCmd.AddCommand(alphaCmd)
+}
+
+func init() {
+ execCmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
+ alphaCmd.AddCommand(execCmdAlpha)
+ rootCmd.AddCommand(execCmd)
+}
diff --git a/cli/cmd/encore/gen.go b/cli/cmd/encore/gen.go
index b18cac212b..7f9718c213 100644
--- a/cli/cmd/encore/gen.go
+++ b/cli/cmd/encore/gen.go
@@ -2,14 +2,18 @@ package main
import (
"context"
- "io/ioutil"
+ "errors"
+ "fmt"
"os"
- "path/filepath"
- "strings"
"time"
- daemonpb "encr.dev/proto/encore/daemon"
"github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/manifest"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/clientgen"
+ daemonpb "encr.dev/proto/encore/daemon"
)
func init() {
@@ -20,64 +24,187 @@ func init() {
rootCmd.AddCommand(genCmd)
var (
- output string
- lang string
- envName string
+ output string
+ lang string
+ envName string
+ genServiceNames []string
+ excludedServices []string
+ endpointTags []string
+ excludedEndpointTags []string
+ openAPIExcludePrivateEndpoints bool
+ tsSharedTypes bool
+ target string
+ tsDefaultClient string
)
genClientCmd := &cobra.Command{
- Use: "client ",
+ Use: "client [] [--env=] [--services=foo,bar] [--excluded-services=baz,qux] [--tags=cache,mobile] [--excluded-tags=internal] [--openapi-exclude-private-endpoints]",
Short: "Generates an API client for your app",
- Args: cobra.ExactArgs(1),
+ Long: `Generates an API client for your app.
+
+By default generates the API based on your local environment.
+Use '--env=' to generate it based on your cloud environments.
+
+Supported language codes are:
+ typescript: A TypeScript client using the Fetch API
+ javascript: A JavaScript client using the Fetch API
+ go: A Go client using net/http"
+ openapi: An OpenAPI specification (EXPERIMENTAL)
+
+By default all services with a non-private API endpoint are included.
+To further narrow down the services to generate, use the '--services' flag.
+`,
+ Args: cobra.MaximumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
+ if target == "leap" {
+ lang = "typescript"
+ tsDefaultClient = "import.meta.env.VITE_CLIENT_TARGET"
+ if output == "" {
+ output = "../frontend/client.ts"
+ }
+ excludedServices = append(excludedServices, "frontend")
+ tsSharedTypes = true
+ }
+
if output == "" && lang == "" {
fatal("specify at least one of --output or --lang.")
}
- appID := args[0]
+
+ // Determine the app id, either from the argument or from the current directory.
+ var appID string
+ if len(args) == 0 {
+ // First check the encore.app file.
+ appRoot, _, err := cmdutil.MaybeAppRoot()
+ if err != nil && !errors.Is(err, cmdutil.ErrNoEncoreApp) {
+ fatal(err)
+ } else if appRoot != "" {
+ if slug, err := appfile.Slug(appRoot); err == nil {
+ appID = slug
+ }
+ }
+
+ // If we still don't have an app id, read it from the manifest.
+ if appID == "" {
+ mf, err := manifest.ReadOrCreate(appRoot)
+ if err != nil {
+ fatal(err)
+ }
+ appID = mf.AppID
+ if appID == "" {
+ appID = mf.LocalID
+ }
+ }
+ } else {
+ appID = args[0]
+ }
if lang == "" {
var ok bool
- lang, ok = detectLang(output)
+ l, ok := clientgen.Detect(output)
if !ok {
fatal("could not detect language from output.\n\nNote: you can specify the language explicitly with --lang.")
}
+ lang = string(l)
+ } else {
+ // Validate the user input for the language
+ l, err := clientgen.GetLang(lang)
+ if err != nil {
+ fatal(fmt.Sprintf("%s: supported languages are `typescript`, `javascript`, `go` and `openapi`", err))
+ }
+ lang = string(l)
}
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
daemon := setupDaemon(ctx)
+
+ if genServiceNames == nil {
+ genServiceNames = []string{"*"}
+ }
+
resp, err := daemon.GenClient(ctx, &daemonpb.GenClientRequest{
- AppId: appID,
- EnvName: envName,
- Lang: lang,
+ AppId: appID,
+ EnvName: envName,
+ Lang: lang,
+ Services: genServiceNames,
+ ExcludedServices: excludedServices,
+ EndpointTags: endpointTags,
+ ExcludedEndpointTags: excludedEndpointTags,
+ OpenapiExcludePrivateEndpoints: &openAPIExcludePrivateEndpoints,
+ TsSharedTypes: &tsSharedTypes,
+ TsClientTarget: &tsDefaultClient,
})
if err != nil {
fatal(err)
}
if output == "" {
- os.Stdout.Write(resp.Code)
+ _, _ = os.Stdout.Write(resp.Code)
} else {
- if err := ioutil.WriteFile(output, resp.Code, 0755); err != nil {
+ if err := os.WriteFile(output, resp.Code, 0755); err != nil {
fatal(err)
}
}
},
+
+ ValidArgsFunction: cmdutil.AutoCompleteAppSlug,
+ }
+
+ genWrappersCmd := &cobra.Command{
+ Use: "wrappers",
+ Short: "Generates user-facing wrapper code",
+ Long: `Manually regenerates user-facing wrapper code.
+
+This is typically not something you ever need to call during regular development,
+as Encore automatically regenerates the wrappers whenever the code-base changes.
+
+Its core use case is for CI/CD workflows where you want to run custom linters,
+which may require the user-facing wrapper code to be manually generated.`,
+ Args: cobra.ExactArgs(0),
+ Run: func(cmd *cobra.Command, args []string) {
+ appRoot, _ := determineAppRoot()
+ ctx := context.Background()
+ daemon := setupDaemon(ctx)
+ _, err := daemon.GenWrappers(ctx, &daemonpb.GenWrappersRequest{
+ AppRoot: appRoot,
+ })
+ if err != nil {
+ fatal(err)
+ } else {
+ fmt.Println("successfully generated encore wrappers.")
+ }
+ },
}
genCmd.AddCommand(genClientCmd)
+ genCmd.AddCommand(genWrappersCmd)
+
+ genClientCmd.Flags().StringVarP(&lang, "lang", "l", "", "The language to generate code for (\"typescript\", \"javascript\", \"go\", and \"openapi\" are supported)")
+ _ = genClientCmd.RegisterFlagCompletionFunc("lang", cmdutil.AutoCompleteFromStaticList(
+ "typescript\tA TypeScript client using the in-browser Fetch API",
+ "javascript\tA JavaScript client using the in-browser Fetch API",
+ "go\tA Go client using net/http",
+ "openapi\tAn OpenAPI specification",
+ ))
+
genClientCmd.Flags().StringVarP(&output, "output", "o", "", "The filename to write the generated client code to")
- genClientCmd.Flags().StringVarP(&lang, "lang", "l", "", "The language to generate code for (only \"ts\" is supported for now)")
- genClientCmd.Flags().StringVarP(&envName, "env", "e", "", "The environment to fetch the API for (defaults to the primary environment)")
-}
+ _ = genClientCmd.MarkFlagFilename("output", "go", "ts", "tsx", "js", "jsx")
-func detectLang(path string) (string, bool) {
- suffix := strings.ToLower(filepath.Ext(path))
- switch suffix {
- case ".ts":
- return "typescript", true
- default:
- return "", false
- }
+ genClientCmd.Flags().StringVarP(&envName, "env", "e", "local", "The environment to fetch the API for (defaults to the local environment)")
+ _ = genClientCmd.RegisterFlagCompletionFunc("env", cmdutil.AutoCompleteEnvSlug)
+
+ genClientCmd.Flags().StringSliceVarP(&genServiceNames, "services", "s", nil, "The names of the services to include in the output")
+ genClientCmd.Flags().StringSliceVarP(&excludedServices, "excluded-services", "x", nil, "The names of the services to exclude in the output")
+ genClientCmd.Flags().StringSliceVarP(&endpointTags, "tags", "t", nil, "The names of endpoint tags to include in the output")
+ genClientCmd.Flags().
+ StringSliceVar(&excludedEndpointTags, "excluded-tags", nil, "The names of endpoint tags to exclude in the output")
+ genClientCmd.Flags().
+ BoolVar(&openAPIExcludePrivateEndpoints, "openapi-exclude-private-endpoints", false, "Exclude private endpoints from the OpenAPI spec")
+ genClientCmd.Flags().
+ BoolVar(&tsSharedTypes, "ts:shared-types", false, "Import types from ~backend instead of re-generating them")
+ genClientCmd.Flags().StringVar(&target, "target", "", "An optional target for the client (\"leap\")")
+ _ = genClientCmd.RegisterFlagCompletionFunc("target", cmdutil.AutoCompleteFromStaticList(
+ "leap\tA TypeScript client for apps created with Leap (https://leap.new) ",
+ ))
}
diff --git a/cli/cmd/encore/init_windows.go b/cli/cmd/encore/init_windows.go
new file mode 100644
index 0000000000..2578b7a25f
--- /dev/null
+++ b/cli/cmd/encore/init_windows.go
@@ -0,0 +1,23 @@
+//go:build windows
+// +build windows
+
+package main
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+// init activates virtual terminal feature on "windows", this enables colored
+// terminal output.
+func init() {
+ setConsoleMode(windows.Stdout, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ setConsoleMode(windows.Stderr, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+}
+
+// setConsoleMode enables VT processing on stout and stderr.
+func setConsoleMode(handle windows.Handle, flag uint32) {
+ var mode uint32
+ if err := windows.GetConsoleMode(handle, &mode); err == nil {
+ windows.SetConsoleMode(handle, mode|flag)
+ }
+}
diff --git a/cli/cmd/encore/k8s/auth.go b/cli/cmd/encore/k8s/auth.go
new file mode 100644
index 0000000000..cb8ac804c6
--- /dev/null
+++ b/cli/cmd/encore/k8s/auth.go
@@ -0,0 +1,56 @@
+package k8s
+
+import (
+ "encoding/json"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/k8s/types"
+ "encr.dev/internal/conf"
+)
+
+var genAuthCmd = &cobra.Command{
+ Use: "exec-credentials",
+ Short: "Used by kubectl to get an authentication token for the Encore Kubernetes Proxy",
+ Args: cobra.NoArgs,
+ Hidden: true,
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) { generateExecCredentials() },
+}
+
+func init() {
+ kubernetesCmd.AddCommand(genAuthCmd)
+}
+
+// GenerateExecCredentials generates the Kubernetes exec credentials and writes them to stdout.
+//
+// If an error occurs, it is written to stderr and the program exits with a non-zero exit code.
+func generateExecCredentials() {
+ // Get the OAuth token from the Encore API
+ token, err := conf.DefaultTokenSource.Token()
+ if err != nil {
+ cmdutil.Fatalf("error getting token: %v", err)
+ }
+
+ // Generate the kuberentes exec credentials datastructures
+ expiryTime := types.NewTime(token.Expiry)
+ execCredentials := &types.ExecCredential{
+ TypeMeta: types.TypeMeta{
+ APIVersion: "client.authentication.k8s.io/v1",
+ Kind: "ExecCredential",
+ },
+ Status: &types.ExecCredentialStatus{
+ Token: token.AccessToken,
+ ExpirationTimestamp: &expiryTime,
+ },
+ }
+
+ // Marshal the exec credentials to JSON and write to stdout
+ output, err := json.MarshalIndent(execCredentials, "", " ")
+ if err != nil {
+ cmdutil.Fatalf("error marshalling exec credentials: %v", err)
+ }
+ _, _ = os.Stdout.Write(output)
+}
diff --git a/cli/cmd/encore/k8s/config.go b/cli/cmd/encore/k8s/config.go
new file mode 100644
index 0000000000..a5b0834134
--- /dev/null
+++ b/cli/cmd/encore/k8s/config.go
@@ -0,0 +1,279 @@
+package k8s
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/k8s/types"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+ "encr.dev/pkg/xos"
+
+ "sigs.k8s.io/yaml"
+)
+
+var configCmd = &cobra.Command{
+ Use: "configure --env=ENV_NAME",
+ Short: "Updates your kubectl config to point to the Kubernetes cluster(s) for the specified environment",
+ Run: func(cmd *cobra.Command, args []string) {
+ appSlug := cmdutil.AppSlug()
+ ctx, cancel := context.WithTimeout(cmd.Context(), 5*time.Second)
+ defer cancel()
+
+ if k8sEnvName == "" {
+ _ = cmd.Help()
+ cmdutil.Fatal("must specify environment name with --env")
+ }
+
+ err := configureForAppEnv(ctx, appSlug, k8sEnvName)
+ if err != nil {
+ cmdutil.Fatalf("error configuring kubectl: %v", err)
+ }
+ },
+}
+
+var (
+ k8sEnvName string
+)
+
+func init() {
+ configCmd.Flags().StringVarP(&k8sEnvName, "env", "e", "", "Environment name")
+ _ = configCmd.MarkFlagRequired("env")
+ kubernetesCmd.AddCommand(configCmd)
+}
+
+func configureForAppEnv(ctx context.Context, appID string, envName string) error {
+ appSlug, envName, clusters, err := platform.KubernetesClusters(ctx, appID, envName)
+ if err != nil {
+ return errors.Wrap(err, "unable to get Kubernetes clusters for environment")
+ }
+ if len(clusters) == 0 {
+ return errors.New("no Kubernetes clusters found for environment")
+ }
+
+ // Read the existing kubeconfig file
+ configFilePath := filepath.Join(types.HomeDir(), ".kube", "config")
+ cfg, err := readKubeConfig(configFilePath)
+ if err != nil {
+ return err
+ }
+
+ // Add the clusters
+ contextPrefix := fmt.Sprintf("encore_%s_%s", appSlug, envName)
+ authName := "encore-proxy-auth"
+ contextNames := make([]string, len(clusters))
+ for i, cluster := range clusters {
+ // Create a context name for the cluster
+ // by default we use the app slug and env name seperated by a underscore (e.g. encore-myapp_prod)
+ // however if the environment has multiple clusters then we also include the cluster name (e.g. encore-myapp_prod_cluster1)
+ contextName := contextPrefix
+ if len(clusters) > 1 {
+ contextName += "_" + cluster.Name
+ }
+ contextNames[i] = contextName
+
+ // Add the cluster using the cluster name as the context name
+ cfg.clusters = appendOrUpdate(cfg.clusters, map[string]any{
+ "name": contextName,
+ "cluster": map[string]any{
+ "server": fmt.Sprintf("%s/k8s-api-proxy/%s/%s/", conf.APIBaseURL, cluster.EnvID, cluster.ResID),
+ },
+ })
+
+ k8sContext := map[string]any{
+ "cluster": contextName,
+ "user": authName,
+ }
+ if cluster.DefaultNamespace != "" {
+ k8sContext["namespace"] = cluster.DefaultNamespace
+ }
+
+ cfg.contexts = appendOrUpdate(cfg.contexts, map[string]any{
+ "name": contextName,
+ "context": k8sContext,
+ })
+ }
+
+ // Remove any old contexts or clusters
+ // We do this by iterating over the existing contexts and clusters and removing any that are not in the new list
+ for i := len(cfg.contexts) - 1; i >= 0; i-- {
+ if foundContext, ok := cfg.contexts[i].(map[string]any); ok {
+ if contextName, ok := foundContext["name"].(string); ok {
+ if strings.HasPrefix(contextName, contextPrefix) && !slices.Contains(contextNames, contextName) {
+ cfg.contexts = append(cfg.contexts[:i], cfg.contexts[i+1:]...)
+ }
+ }
+ }
+ }
+ for i := len(cfg.clusters) - 1; i >= 0; i-- {
+ if foundCluster, ok := cfg.clusters[i].(map[string]any); ok {
+ if clusterName, ok := foundCluster["name"].(string); ok {
+ if strings.HasPrefix(clusterName, contextPrefix) && !slices.Contains(contextNames, clusterName) {
+ cfg.clusters = append(cfg.clusters[:i], cfg.clusters[i+1:]...)
+ }
+ }
+ }
+ }
+
+ // If we added a cluster then we need to update the encore-k8s-proxy user
+ cfg.users = appendOrUpdate(cfg.users, map[string]any{
+ "name": authName,
+ "user": map[string]any{
+ "exec": map[string]any{
+ "apiVersion": "client.authentication.k8s.io/v1",
+ "args": []string{"kubernetes", "exec-credentials"},
+ "command": "encore",
+ "env": nil,
+ "installHint": "Install encore for use with kubectl, see https://encore.dev",
+ "interactiveMode": "Never",
+ "provideClusterInfo": false,
+ },
+ },
+ })
+
+ // Update the current context to the first cluster for the environment
+ cfg.raw["current-context"] = contextNames[0]
+
+ if err := writeKubeConfig(configFilePath, cfg); err != nil {
+ return err
+ }
+
+ if len(clusters) == 1 {
+ _, _ = fmt.Fprintf(os.Stdout, "kubectl configured for cluster %s under context %s.\n", color.CyanString(clusters[0].Name), color.CyanString(contextNames[0]))
+ } else {
+ _, _ = fmt.Fprintf(os.Stdout, "kubectl configured for %d clusters:\n\n", len(clusters))
+
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.StripEscape)
+ _, _ = fmt.Fprint(w, "CLUSTER\tCONTEXT\tACTIVE\n")
+ for i, cluster := range clusters {
+ active := ""
+ if i == 0 {
+ active = "yes"
+ }
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", cluster.Name, contextNames[0], active)
+ }
+ _ = w.Flush()
+ }
+
+ return nil
+}
+
+// readKubeConfig reads the existing kubeconfig file and returns a Cfg struct.
+// however this is as untyped as possible, so that we can easily marshal it back without losing any data.
+func readKubeConfig(file string) (*Cfg, error) {
+ b, err := os.ReadFile(file)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return nil, errors.Wrap(err, "unable to read kubeconfig file")
+ }
+
+ // Read the existing kubeconfig file
+ var kubeConfig map[string]any
+ if len(b) > 0 {
+ if err = yaml.Unmarshal(b, &kubeConfig); err != nil {
+ return nil, errors.Wrap(err, "unable to parse kubeconfig file")
+ }
+ }
+
+ // Ensure the kubeConfig struct is valid
+ if kubeConfig == nil {
+ kubeConfig = map[string]any{
+ "apiVersion": "v1",
+ "kind": "Config",
+ }
+ } else if kubeConfig["apiVersion"] != "v1" || kubeConfig["kind"] != "Config" {
+ return nil, errors.New("invalid existing kubeconfig file")
+ }
+ cfg := &Cfg{
+ raw: kubeConfig,
+ }
+
+ if clusters, ok := kubeConfig["clusters"]; ok {
+ if clusters, ok := clusters.([]any); ok {
+ cfg.clusters = clusters
+ } else {
+ return nil, errors.Newf("clusters is not an array got %T", clusters)
+ }
+ }
+
+ if users, ok := kubeConfig["users"]; ok {
+ if users, ok := users.([]any); ok {
+ cfg.users = users
+ } else {
+ return nil, errors.Newf("users is not an array got %T", users)
+ }
+ }
+
+ if contexts, ok := kubeConfig["contexts"]; ok {
+ if contexts, ok := contexts.([]any); ok {
+ cfg.contexts = contexts
+ } else {
+ return nil, errors.Newf("contexts is not an array got %T", contexts)
+ }
+ }
+
+ return cfg, nil
+}
+
+// writeKubeConfig writes the kubeconfig back to the file.
+func writeKubeConfig(file string, cfg *Cfg) error {
+ // Update the raw kubeconfig struct
+ cfg.raw["clusters"] = cfg.clusters
+ cfg.raw["users"] = cfg.users
+ cfg.raw["contexts"] = cfg.contexts
+
+ b, err := yaml.Marshal(cfg.raw)
+ if err != nil {
+ return errors.Wrap(err, "unable to marshal kubeconfig back into yaml")
+ }
+
+ // Ensure the directory exists
+ if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil {
+ return errors.Wrap(err, "unable to create kubeconfig directory")
+ }
+
+ // Then write the file
+ err = xos.WriteFile(file, b, 0600)
+ if err != nil {
+ return errors.Wrap(err, "unable to write kubeconfig file")
+ }
+ return nil
+}
+
+type Cfg struct {
+ raw map[string]any
+ clusters []any
+ users []any
+ contexts []any
+}
+
+// appendOrUpdate looks at the array for an entry which is a map and has a "name" key which matches the name in val, if found
+// it will update the entry with val, otherwise it will append val to the array.
+func appendOrUpdate(dst []any, val map[string]any) []any {
+ idx := slices.IndexFunc(dst, func(entry any) bool {
+ if entry, ok := entry.(map[string]any); ok {
+ if entry["name"] == val["name"] {
+ return true
+ }
+ }
+ return false
+ })
+
+ if idx == -1 {
+ return append(dst, val)
+ } else {
+ dst[idx] = val
+ return dst
+ }
+}
diff --git a/cli/cmd/encore/k8s/kubernetes.go b/cli/cmd/encore/k8s/kubernetes.go
new file mode 100644
index 0000000000..86a21bd4e8
--- /dev/null
+++ b/cli/cmd/encore/k8s/kubernetes.go
@@ -0,0 +1,17 @@
+package k8s
+
+import (
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/root"
+)
+
+var kubernetesCmd = &cobra.Command{
+ Use: "kubernetes",
+ Short: "Kubernetes management commands",
+ Aliases: []string{"k8s"},
+}
+
+func init() {
+ root.Cmd.AddCommand(kubernetesCmd)
+}
diff --git a/cli/cmd/encore/k8s/types/KUBERNETES_LICENSE.txt b/cli/cmd/encore/k8s/types/KUBERNETES_LICENSE.txt
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/KUBERNETES_LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cli/cmd/encore/k8s/types/README.md b/cli/cmd/encore/k8s/types/README.md
new file mode 100644
index 0000000000..038a9fcdbe
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/README.md
@@ -0,0 +1,4 @@
+# Kubernetes Types
+
+This package contains types copied directly from the [Kubernetes](https://github.com/kubernetes/kubernetes) project, this
+is to prevent the Encore CLI needing to have a dependency on the Kubernetes project for just these types.
diff --git a/cli/cmd/encore/k8s/types/clientauthentication_types.go b/cli/cmd/encore/k8s/types/clientauthentication_types.go
new file mode 100644
index 0000000000..29e31d3aa4
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/clientauthentication_types.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// ExecCredential is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+ TypeMeta `json:",inline"`
+
+ // Spec holds information passed to the plugin by the transport.
+ Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+ // Status is filled in by the plugin and holds the credentials that the transport
+ // should use to contact the API.
+ // +optional
+ Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredentialSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+ // Cluster contains information to allow an exec plugin to communicate with the
+ // kubernetes cluster being authenticated to. Note that Cluster is non-nil only
+ // when provideClusterInfo is set to true in the exec provider config (i.e.,
+ // ExecConfig.ProvideClusterInfo).
+ // +optional
+ Cluster *Cluster `json:"cluster,omitempty"`
+
+ // Interactive declares whether stdin has been passed to this exec plugin.
+ Interactive bool `json:"interactive"`
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+ // ExpirationTimestamp indicates a time when the provided credentials expire.
+ // +optional
+ ExpirationTimestamp *Time `json:"expirationTimestamp,omitempty"`
+ // Token is a bearer token used by the client for request authentication.
+ Token string `json:"token,omitempty" datapolicy:"token"`
+ // PEM-encoded client TLS certificates (including intermediates, if any).
+ ClientCertificateData string `json:"clientCertificateData,omitempty"`
+ // PEM-encoded private key for the above certificate.
+ ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"`
+}
+
+// Cluster contains information to allow an exec plugin to communicate
+// with the kubernetes cluster being authenticated to.
+//
+// To ensure that this struct contains everything someone would need to communicate
+// with a kubernetes cluster (just like they would via a kubeconfig), the fields
+// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception
+// of CertificateAuthority, since CA data will always be passed to the plugin as bytes.
+type Cluster struct {
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `json:"server"`
+ // TLSServerName is passed to the server for SNI and is used in the client to
+ // check server certificates against. If ServerName is empty, the hostname
+ // used to contact the server is used.
+ // +optional
+ TLSServerName string `json:"tls-server-name,omitempty"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate.
+ // This will make your HTTPS connections insecure.
+ // +optional
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ // CAData contains PEM-encoded certificate authority certificates.
+ // If empty, system roots should be used.
+ // +listType=atomic
+ // +optional
+ CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+ // ProxyURL is the URL to the proxy to be used for all requests to this
+ // cluster.
+ // +optional
+ ProxyURL string `json:"proxy-url,omitempty"`
+ // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+ // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+ // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+ // +optional
+ DisableCompression bool `json:"disable-compression,omitempty"`
+ // Config holds additional config data that is specific to the exec
+ // plugin with regards to the cluster being authenticated to.
+ //
+ // This data is sourced from the clientcmd Cluster object's
+ // extensions[client.authentication.k8s.io/exec] field:
+ //
+ // clusters:
+ // - name: my-cluster
+ // cluster:
+ // ...
+ // extensions:
+ // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config
+ // extension:
+ // audience: 06e3fbd18de8 # arbitrary config
+ //
+ // In some environments, the user config may be exactly the same across many clusters
+ // (i.e. call this exec plugin) minus some details that are specific to each cluster
+ // such as the audience. This field allows the per cluster config to be directly
+ // specified with the cluster info. Using this field to store secret data is not
+ // recommended as one of the prime benefits of exec plugins is that no secrets need
+ // to be stored directly in the kubeconfig.
+ // +optional
+ Config RawExtension `json:"config,omitempty"`
+}
diff --git a/cli/cmd/encore/k8s/types/homedir.go b/cli/cmd/encore/k8s/types/homedir.go
new file mode 100644
index 0000000000..c041f40c71
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/homedir.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// HomeDir returns the home directory for the current user.
+// On Windows:
+// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
+// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
+// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
+// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
+func HomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOME")
+ homeDriveHomePath := ""
+ if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
+ homeDriveHomePath = homeDrive + homePath
+ }
+ userProfile := os.Getenv("USERPROFILE")
+
+ // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
+ // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
+ for _, p := range []string{home, homeDriveHomePath, userProfile} {
+ if len(p) == 0 {
+ continue
+ }
+ if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
+ continue
+ }
+ return p
+ }
+
+ firstSetPath := ""
+ firstExistingPath := ""
+
+ // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
+ for _, p := range []string{home, userProfile, homeDriveHomePath} {
+ if len(p) == 0 {
+ continue
+ }
+ if len(firstSetPath) == 0 {
+ // remember the first path that is set
+ firstSetPath = p
+ }
+ info, err := os.Stat(p)
+ if err != nil {
+ continue
+ }
+ if len(firstExistingPath) == 0 {
+ // remember the first path that exists
+ firstExistingPath = p
+ }
+ if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
+ // return first path that is writeable
+ return p
+ }
+ }
+
+ // If none are writeable, return first location that exists
+ if len(firstExistingPath) > 0 {
+ return firstExistingPath
+ }
+
+ // If none exist, return first location that is set
+ if len(firstSetPath) > 0 {
+ return firstSetPath
+ }
+
+ // We've got nothing
+ return ""
+ }
+ return os.Getenv("HOME")
+}
diff --git a/cli/cmd/encore/k8s/types/meta_types.go b/cli/cmd/encore/k8s/types/meta_types.go
new file mode 100644
index 0000000000..b9dadd8cfe
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/meta_types.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+type TypeMeta struct {
+ // Kind is a string value representing the REST resource this object represents.
+ // Servers may infer this from the endpoint the client submits requests to.
+ // Cannot be updated.
+ // In CamelCase.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+
+ // APIVersion defines the versioned schema of this representation of an object.
+ // Servers should convert recognized schemas to the latest internal value, and
+ // may reject unrecognized values.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ // +optional
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+}
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON. Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Time struct {
+ time.Time `protobuf:"-"`
+}
+
+// NewTime returns a wrapped instance of the provided time
+func NewTime(time time.Time) Time {
+ return Time{time}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ if len(b) == 4 && string(b) == "null" {
+ t.Time = time.Time{}
+ return nil
+ }
+
+ var str string
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
+
+ pt, err := time.Parse(time.RFC3339, str)
+ if err != nil {
+ return err
+ }
+
+ t.Time = pt.Local()
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ if t.IsZero() {
+ // Encode unset/nil objects as JSON's "null".
+ return []byte("null"), nil
+ }
+ buf := make([]byte, 0, len(time.RFC3339)+2)
+ buf = append(buf, '"')
+ // time cannot contain non escapable JSON characters
+ buf = t.UTC().AppendFormat(buf, time.RFC3339)
+ buf = append(buf, '"')
+ return buf, nil
+}
diff --git a/cli/cmd/encore/k8s/types/runtime_types.go b/cli/cmd/encore/k8s/types/runtime_types.go
new file mode 100644
index 0000000000..23cf5a5160
--- /dev/null
+++ b/cli/cmd/encore/k8s/types/runtime_types.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package types
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+//
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.Object `json:"myPlugin"`
+// }
+//
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // External package:
+//
+// type MyAPIObject struct {
+// runtime.TypeMeta `json:",inline"`
+// MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+//
+// type PluginA struct {
+// AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+//
+// {
+// "kind":"MyAPIObject",
+// "apiVersion":"v1",
+// "myPlugin": {
+// "kind":"PluginA",
+// "aOption":"foo",
+// },
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+// +k8s:openapi-gen=true
+type RawExtension struct {
+ // Raw is the underlying serialization of this object.
+ //
+ // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+ Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"`
+ // Object can hold a representation of this extension - useful for working with versioned
+ // structs.
+ Object any `json:"-"`
+}
diff --git a/cli/cmd/encore/logs.go b/cli/cmd/encore/logs.go
index d9bb39e1a8..307d7e17c3 100644
--- a/cli/cmd/encore/logs.go
+++ b/cli/cmd/encore/logs.go
@@ -3,26 +3,33 @@ package main
import (
"bytes"
"context"
+ "encoding/json"
+ "errors"
"fmt"
- "io"
"os"
"os/signal"
+ "time"
- daemonpb "encr.dev/proto/encore/daemon"
+ "github.com/gorilla/websocket"
+ "github.com/logrusorgru/aurora/v3"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+
+ "encr.dev/cli/internal/platform"
+ "encr.dev/pkg/appfile"
)
var (
- logsEnv string
- logsJSON bool
+ logsEnv string
+ logsJSON bool
+ logsQuiet bool
)
var logsCmd = &cobra.Command{
- Use: "logs",
+ Use: "logs [--env=prod] [--json]",
Short: "Streams logs from your application",
+
+ DisableFlagsInUseLine: true,
Run: func(cmd *cobra.Command, args []string) {
appRoot, _ := determineAppRoot()
streamLogs(appRoot, logsEnv)
@@ -30,51 +37,104 @@ var logsCmd = &cobra.Command{
}
func streamLogs(appRoot, envName string) {
- interrupt := make(chan os.Signal, 1)
- signal.Notify(interrupt, os.Interrupt)
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+ appSlug, err := appfile.Slug(appRoot)
+ if err != nil {
+ fatal(err)
+ } else if appSlug == "" {
+ fatal("app is not linked with Encore Cloud")
+ }
+
+ if envName == "" {
+ envName = "@primary"
+ }
+ logs, err := platform.EnvLogs(ctx, appSlug, envName)
+ if err != nil {
+ var e platform.Error
+ if errors.As(err, &e) {
+ switch e.Code {
+ case "env_not_found":
+ fatalf("environment %q not found", envName)
+ }
+ }
+ fatal(err)
+ }
- ctx, cancel := context.WithCancel(context.Background())
go func() {
- <-interrupt
- cancel()
+ <-ctx.Done()
+ logs.Close()
}()
- daemon := setupDaemon(ctx)
- stream, err := daemon.Logs(ctx, &daemonpb.LogsRequest{
- AppRoot: appRoot,
- EnvName: envName,
- })
- if err != nil {
- fatal("could not stream logs: ", err)
+ // Use the same configuration as the runtime
+ zerolog.TimeFieldFormat = time.RFC3339Nano
+
+ if !logsQuiet {
+ fmt.Println(aurora.Gray(12, "Connected, waiting for logs..."))
}
cw := zerolog.NewConsoleWriter()
for {
- msg, err := stream.Recv()
- if err == io.EOF || status.Code(err) == codes.Canceled {
+ _, message, err := logs.ReadMessage()
+ if err != nil {
+ if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
+ fatal("the server closed the connection unexpectedly.")
+ }
return
- } else if err != nil {
- fatal(err)
}
- for _, line := range msg.Lines {
+
+ lines := bytes.Split(message, []byte("\n"))
+ for _, line := range lines {
// Pretty-print logs if requested and it looks like a JSON log line
if !logsJSON && bytes.HasPrefix(line, []byte{'{'}) {
- if _, err := cw.Write(line); err != nil {
+ if _, err := cw.Write(mapCloudFieldNamesToExpected(line)); err != nil {
// Fall back to regular stdout in case of error
os.Stdout.Write(line)
+ os.Stdout.Write([]byte("\n"))
}
} else {
os.Stdout.Write(line)
+ os.Stdout.Write([]byte("\n"))
}
}
- if msg.DropNotice {
- fmt.Fprintln(os.Stderr, "--- NOTICE: log lines were not sent due to high volume or slow reader ---")
- }
}
}
+// mapCloudFieldNamesToExpected detects if we're logging with GCP style logging and then swaps
+// the field names to what is expected by zerolog
+func mapCloudFieldNamesToExpected(jsonBytes []byte) []byte {
+ unmarshaled := map[string]any{}
+ err := json.Unmarshal(jsonBytes, &unmarshaled)
+ if err != nil {
+ return jsonBytes
+ }
+
+ _, hasSeverity := unmarshaled["severity"]
+ _, hasExpectedLevelField := unmarshaled[zerolog.LevelFieldName]
+ _, hasTimestamp := unmarshaled["timestamp"]
+ _, hasExpectedTimeField := unmarshaled[zerolog.TimestampFieldName]
+
+ // GCP logs have a severity field and a timestamp field and not the default level and timestamp
+ if hasSeverity && !hasExpectedLevelField && hasTimestamp && !hasExpectedTimeField {
+ unmarshaled[zerolog.LevelFieldName] = unmarshaled["severity"]
+ delete(unmarshaled, "severity")
+ unmarshaled[zerolog.TimestampFieldName] = unmarshaled["timestamp"]
+ delete(unmarshaled, "timestamp")
+ } else {
+ // No changes, return the original bytes unmodified
+ return jsonBytes
+ }
+
+ newBytes, err := json.Marshal(unmarshaled)
+ if err != nil {
+ return jsonBytes
+ }
+ return newBytes
+}
+
func init() {
rootCmd.AddCommand(logsCmd)
- logsCmd.Flags().StringVarP(&logsEnv, "env", "e", "", "Environment name to stream logs from (defaults to the production environment)")
+ logsCmd.Flags().StringVarP(&logsEnv, "env", "e", "", "Environment name to stream logs from (defaults to the primary environment)")
logsCmd.Flags().BoolVar(&logsJSON, "json", false, "Whether to print logs in raw JSON format")
+ logsCmd.Flags().BoolVarP(&logsQuiet, "quiet", "q", false, "Whether to print initial message when the command is waiting for logs")
}
diff --git a/cli/cmd/encore/main.go b/cli/cmd/encore/main.go
index 4b5f845888..ffb4dc4a77 100644
--- a/cli/cmd/encore/main.go
+++ b/cli/cmd/encore/main.go
@@ -2,28 +2,29 @@ package main
import (
"fmt"
- "io"
"os"
- "path/filepath"
- daemonpb "encr.dev/proto/encore/daemon"
- "github.com/fatih/color"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
- "github.com/spf13/cobra"
"golang.org/x/tools/go/packages"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+
+ // Register commands
+ _ "encr.dev/cli/cmd/encore/app"
+ _ "encr.dev/cli/cmd/encore/config"
+ _ "encr.dev/cli/cmd/encore/k8s"
+ _ "encr.dev/cli/cmd/encore/namespace"
+ _ "encr.dev/cli/cmd/encore/secrets"
)
-var rootCmd = &cobra.Command{
- Use: "encore",
- Short: "encore is the fastest way of developing backend applications",
-}
+// for backwards compatibility, for now
+var rootCmd = root.Cmd
func main() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
- if err := rootCmd.Execute(); err != nil {
+ if err := root.Cmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
@@ -36,30 +37,11 @@ func main() {
// relative path from the app root to the working directory.
// On errors it prints an error message and exits.
func determineAppRoot() (appRoot, relPath string) {
- dir, err := os.Getwd()
- if err != nil {
- fatal(err)
- }
- rel := "."
- for {
- path := filepath.Join(dir, "encore.app")
- fi, err := os.Stat(path)
- if os.IsNotExist(err) {
- dir2 := filepath.Dir(dir)
- if dir2 == dir {
- fatal("no encore.app found in directory (or any of the parent directories).")
- }
- rel = filepath.Join(filepath.Base(dir), rel)
- dir = dir2
- continue
- } else if err != nil {
- fatal(err)
- } else if fi.IsDir() {
- fatal("encore.app is a directory, not a file")
- } else {
- return dir, rel
- }
- }
+ return cmdutil.AppRoot()
+}
+
+func determineWorkspaceRoot(appRoot string) string {
+ return cmdutil.WorkspaceRoot(appRoot)
}
func resolvePackages(dir string, patterns ...string) ([]string, error) {
@@ -78,69 +60,22 @@ func resolvePackages(dir string, patterns ...string) ([]string, error) {
return paths, nil
}
-// commandOutputStream is the interface for gRPC streams that
-// stream the output of a command.
-type commandOutputStream interface {
- Recv() (*daemonpb.CommandMessage, error)
-}
-
-// streamCommandOutput streams the output from the given command stream,
-// and exits with the same exit code as the command.
-func streamCommandOutput(stream commandOutputStream) {
- for {
- msg, err := stream.Recv()
- if err != nil {
- st := status.Convert(err)
- switch {
- case st.Code() == codes.FailedPrecondition:
- fmt.Fprintln(os.Stderr, st.Message())
- os.Exit(1)
- case err == io.EOF || st.Code() == codes.Canceled:
- return
- default:
- log.Fatal().Err(err).Msg("connection failure")
- }
- }
-
- switch m := msg.Msg.(type) {
- case *daemonpb.CommandMessage_Output:
- if m.Output.Stdout != nil {
- os.Stdout.Write(m.Output.Stdout)
- }
- if m.Output.Stderr != nil {
- os.Stderr.Write(m.Output.Stderr)
- }
- case *daemonpb.CommandMessage_Exit:
- os.Exit(int(m.Exit.Code))
- }
- }
+func displayError(out *os.File, err []byte) {
+ cmdutil.DisplayError(out, err)
}
func fatal(args ...interface{}) {
- // Prettify gRPC errors
- for i, arg := range args {
- if err, ok := arg.(error); ok {
- if s, ok := status.FromError(err); ok {
- args[i] = s.Message()
- }
- }
- }
-
- red := color.New(color.FgRed)
- red.Fprint(os.Stderr, "error: ")
- red.Fprintln(os.Stderr, args...)
- os.Exit(1)
+ cmdutil.Fatal(args...)
}
func fatalf(format string, args ...interface{}) {
- // Prettify gRPC errors
- for i, arg := range args {
- if err, ok := arg.(error); ok {
- if s, ok := status.FromError(err); ok {
- args[i] = s.Message()
- }
- }
- }
+ cmdutil.Fatalf(format, args...)
+}
- fatal(fmt.Sprintf(format, args...))
+func nonZeroPtr[T comparable](v T) *T {
+ var zero T
+ if v == zero {
+ return nil
+ }
+ return &v
}
diff --git a/cli/cmd/encore/mcp.go b/cli/cmd/encore/mcp.go
new file mode 100644
index 0000000000..9ce2b5cf7f
--- /dev/null
+++ b/cli/cmd/encore/mcp.go
@@ -0,0 +1,412 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ "encr.dev/cli/internal/jsonrpc2"
+)
+
+var mcpCmd = &cobra.Command{
+ Use: "mcp",
+ Short: "MCP (Message Context Provider) commands",
+}
+
+var (
+ appID string
+ mcpPort int = 9900
+)
+
+var startCmd = &cobra.Command{
+ Use: "start",
+ Short: "Starts an SSE based MCP session and prints the SSE URL",
+ Run: func(cmd *cobra.Command, args []string) {
+ ctx := cmd.Context()
+ if appID == "" {
+ appID = cmdutil.AppSlugOrLocalID()
+ }
+ setupDaemon(ctx)
+
+ _, _ = fmt.Fprintf(os.Stderr, " MCP Service is running!\n\n")
+ _, _ = fmt.Fprintf(os.Stderr, " MCP SSE URL: %s\n", aurora.Cyan(fmt.Sprintf(
+ "http://localhost:%d/sse?app=%s", mcpPort, appID)))
+ _, _ = fmt.Fprintf(os.Stderr, " MCP stdio Command: %s\n", aurora.Cyan(fmt.Sprintf(
+ "encore mcp run --app=%s", appID)))
+ },
+}
+
+type sseConnection struct {
+ read func() (typ, data string, err error)
+ close func() error
+
+ appID string
+ connected bool
+ path string
+ client *http.Client
+
+ // Track outstanding request IDs
+ mu sync.Mutex
+ requestIDs map[jsonrpc2.ID]struct{}
+}
+
+func (c *sseConnection) Read() (typ, data string, err error) {
+ typ, data, err = c.read()
+ if err != nil {
+ c.connected = false
+ return "", "", err
+ }
+ return typ, data, nil
+}
+
+func (c *sseConnection) Close() error {
+ if c.close != nil {
+ c.connected = false
+ return c.close()
+ }
+ return nil
+}
+
+func (c *sseConnection) reconnect(ctx context.Context) error {
+ // Close the existing connection if there is one
+ if c.close != nil {
+ _ = c.close()
+ }
+ c.connected = false
+
+ // Initial backoff duration
+ backoff := 1000 * time.Millisecond
+ maxBackoff := 10 * time.Second
+
+ for {
+ // Check if context is canceled
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if root.Verbosity > 0 {
+ fmt.Fprintf(os.Stderr, "Reconnecting to MCP: %v\n", backoff)
+ }
+
+ // Try to connect
+ err := c.connect(ctx)
+ if err == nil {
+ c.connected = true
+ return nil
+ }
+
+ // If connection failed, wait and retry with exponential backoff
+ if root.Verbosity > 0 {
+ fmt.Fprintf(os.Stderr, "Failed to connect to MCP: %v, retrying in %v\n", err, backoff)
+ }
+
+ select {
+ case <-time.After(backoff):
+ // Double the backoff for next attempt, but cap at maxBackoff
+ backoff *= 2
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+func (c *sseConnection) connect(ctx context.Context) error {
+ setupDaemon(ctx)
+ if c.client == nil {
+ c.client = &http.Client{}
+ }
+
+ // Initialize the request IDs map
+ c.mu.Lock()
+ c.requestIDs = make(map[jsonrpc2.ID]struct{})
+ c.mu.Unlock()
+
+ resp, err := c.client.Get(fmt.Sprintf("http://localhost:%d/sse?app=%s", mcpPort, c.appID))
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 200 {
+ resp.Body.Close()
+ return fmt.Errorf("error getting session ID: %v", resp.Status)
+ }
+ c.read = eventReader(startLineReader(ctx, bufio.NewReader(resp.Body).ReadString))
+ c.close = resp.Body.Close
+ c.connected = true
+
+ // Read the endpoint path
+ event, path, err := c.Read()
+ if err != nil {
+ return fmt.Errorf("error reading endpoint path: %v", err)
+ }
+ if event != "endpoint" {
+ return fmt.Errorf("expected endpoint event, got %q", event)
+ }
+ c.path = path
+
+ return nil
+}
+
+func (c *sseConnection) SendMessage(data []byte) error {
+ if !c.connected {
+ return fmt.Errorf("not connected to MCP")
+ }
+
+ if c.client == nil {
+ c.client = &http.Client{}
+ }
+
+ // Track the request ID if it's a Call
+ msg, err := jsonrpc2.DecodeMessage(data)
+ if err == nil {
+ if call, ok := msg.(*jsonrpc2.Call); ok {
+ c.mu.Lock()
+ c.requestIDs[call.ID()] = struct{}{}
+ c.mu.Unlock()
+ }
+ }
+
+ resp, err := c.client.Post(fmt.Sprintf("http://localhost:%d%s", mcpPort, c.path), "application/json", bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("error forwarding request: %v", resp.Status)
+ }
+
+ return nil
+}
+
+// CreateErrorResponse creates a JSON-RPC error response with the correct ID if available
+func (c *sseConnection) CreateErrorResponse(id *jsonrpc2.ID, code int, message string) string {
+ // Build the error response
+ response := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "error": map[string]interface{}{
+ "code": code,
+ "message": message,
+ },
+ }
+
+ // Include ID if available
+ if id != nil {
+ response["id"] = id
+
+ // Remove from tracking as we're responding to it
+ c.mu.Lock()
+ delete(c.requestIDs, *id)
+ c.mu.Unlock()
+ } else {
+ response["id"] = nil
+ }
+
+ // Marshal to JSON
+ jsonData, err := json.Marshal(response)
+ if err != nil {
+ // Fallback if marshaling fails
+ return fmt.Sprintf(`{"jsonrpc":"2.0","id":null,"error":{"code":%d,"message":"%s"}}`, code, message)
+ }
+
+ return string(jsonData)
+}
+
+// RemoveRequestID removes a request ID from tracking once a response is received
+func (c *sseConnection) RemoveRequestID(id jsonrpc2.ID) {
+ c.mu.Lock()
+ delete(c.requestIDs, id)
+ c.mu.Unlock()
+}
+
+var runCmd = &cobra.Command{
+ Use: "run",
+ Short: "Runs an stdio-based MCP session",
+ Run: func(cmd *cobra.Command, args []string) {
+
+ ctx := cmd.Context()
+
+ if appID == "" {
+ appID = cmdutil.AppSlugOrLocalID()
+ }
+
+ if root.Verbosity > 0 {
+ _, _ = fmt.Fprintf(os.Stderr, "Starting an MCP session for app %s\n", appID)
+ }
+
+ conn := &sseConnection{appID: appID}
+ if err := conn.connect(ctx); err != nil {
+ fmt.Fprintf(os.Stderr, "Error connecting to MCP: %v\n", err)
+ os.Exit(1)
+ }
+ defer conn.Close()
+
+ go func() {
+ for {
+ event, data, err := conn.Read()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error reading event: %v\n", err)
+
+ conn.mu.Lock()
+ requestIDs := maps.Clone(conn.requestIDs)
+ conn.mu.Unlock()
+ for id := range requestIDs {
+ fmt.Println(conn.CreateErrorResponse(&id, -32700, "error"))
+ }
+ if err := conn.reconnect(ctx); err != nil {
+ fmt.Fprintf(os.Stderr, "Error reconnecting to MCP: %v\n", err)
+ os.Exit(1)
+ }
+ continue
+ }
+ if root.Verbosity > 0 {
+ fmt.Fprintf(os.Stderr, "Received event: %s: %s\n", event, data)
+ }
+ if event == "message" {
+ // If it's a response message, remove the ID from tracking
+ responseMsg := struct {
+ JSONRPC string `json:"jsonrpc"`
+ ID *jsonrpc2.ID `json:"id"`
+ Result interface{} `json:"result,omitempty"`
+ Error interface{} `json:"error,omitempty"`
+ }{}
+
+ if err := json.Unmarshal([]byte(data), &responseMsg); err == nil && responseMsg.ID != nil {
+ conn.RemoveRequestID(*responseMsg.ID)
+ }
+
+ fmt.Println(data)
+ }
+ }
+ }()
+
+ stdinReader := startLineReader(ctx, bufio.NewReader(os.Stdin).ReadBytes)
+ if root.Verbosity > 0 {
+ _, _ = fmt.Fprintf(os.Stderr, "Listening on stdin for MCP requests\n\n")
+ }
+
+ for {
+ line, err := stdinReader()
+ if err != nil {
+ if err == io.EOF || err == context.Canceled {
+ break
+ }
+ fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
+ os.Exit(1)
+ }
+ if strings.TrimSpace(string(line)) == "" {
+ continue
+ }
+
+ msg, err := jsonrpc2.DecodeMessage(line)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error decoding request: %v\n", err)
+ fmt.Println(conn.CreateErrorResponse(nil, -32700, "parse error"))
+ continue
+ }
+
+ if root.Verbosity > 0 {
+ fmt.Fprintf(os.Stderr, "Sending request: %s\n", line)
+ }
+
+ err = conn.SendMessage(line)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error sending message: %v\n", err)
+
+ // Create error response with the request ID if available
+ var requestID *jsonrpc2.ID
+ if call, ok := msg.(*jsonrpc2.Call); ok {
+ id := call.ID()
+ requestID = &id
+ }
+
+ fmt.Println(conn.CreateErrorResponse(requestID, -32700, "error sending message"))
+ continue
+ }
+ }
+ },
+}
+
+type lineResult[T any] struct {
+ res T
+ err error
+}
+
+func startLineReader[T any](ctx context.Context, rd func(byte) (T, error)) func() (T, error) {
+ channel := make(chan lineResult[T])
+ go func() {
+ for {
+ line, err := rd('\n') // wait for Enter key
+ if err != nil {
+ channel <- lineResult[T]{err: err}
+ return
+ }
+ channel <- lineResult[T]{res: line}
+ }
+ }()
+ return func() (T, error) {
+ var t T
+ select {
+ case <-ctx.Done():
+ return t, ctx.Err()
+ case result := <-channel:
+ if result.err != nil {
+ return t, result.err
+ }
+ return result.res, nil
+ }
+ }
+}
+
+func eventReader(reader func() (string, error)) func() (typ, data string, err error) {
+ return func() (typ, data string, err error) {
+ var line string
+ for {
+ line, err = reader()
+ if err != nil {
+ return "", "", err
+ }
+ if strings.HasPrefix(line, "event:") {
+ break
+ }
+ }
+ typ = strings.TrimSpace(strings.TrimPrefix(line, "event:"))
+ line, err = reader()
+ if err != nil {
+ return "", "", err
+ }
+ if !strings.HasPrefix(line, "data:") {
+ return "", "", fmt.Errorf("expected data: prefix, got %q", line)
+ }
+ data = strings.TrimSpace(strings.TrimPrefix(line, "data:"))
+ return typ, data, nil
+ }
+}
+
+func init() {
+ mcpCmd.AddCommand(runCmd)
+ runCmd.Flags().StringVar(&appID, "app", "", "The app ID to use for the MCP session")
+
+ mcpCmd.AddCommand(startCmd)
+ startCmd.Flags().StringVar(&appID, "app", "", "The app ID to use for the MCP session")
+
+ root.Cmd.AddCommand(mcpCmd)
+}
diff --git a/cli/cmd/encore/namespace/namespace.go b/cli/cmd/encore/namespace/namespace.go
new file mode 100644
index 0000000000..61af5a7f46
--- /dev/null
+++ b/cli/cmd/encore/namespace/namespace.go
@@ -0,0 +1,210 @@
+package namespace
+
+import (
+ "bytes"
+ "cmp"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "slices"
+ "text/tabwriter"
+ "time"
+
+ "github.com/spf13/cobra"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var nsCmd = &cobra.Command{
+ Use: "namespace",
+ Short: "Manage infrastructure namespaces",
+ Aliases: []string{"ns"},
+}
+
+func init() {
+ output := cmdutil.Oneof{Value: "columns", Allowed: []string{"columns", "json"}}
+ listCmd := &cobra.Command{
+ Use: "list",
+ Short: "List infrastructure namespaces",
+ Aliases: []string{"ls"},
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ daemon := cmdutil.ConnectDaemon(ctx)
+ resp, err := daemon.ListNamespaces(ctx, &daemonpb.ListNamespacesRequest{AppRoot: appRoot})
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ nss := resp.Namespaces
+
+ // Sort by active first, then name second.
+ slices.SortFunc(nss, func(a, b *daemonpb.Namespace) int {
+ if a.Active != b.Active {
+ if a.Active {
+ return -1
+ } else {
+ return 1
+ }
+ }
+ return cmp.Compare(a.Name, b.Name)
+ })
+
+ if output.Value == "json" {
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, ns := range nss {
+ data, err := protojson.MarshalOptions{
+ UseProtoNames: true,
+ EmitUnpopulated: true,
+ }.Marshal(ns)
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ buf.Write(data)
+ }
+ buf.WriteByte(']')
+
+ var dst bytes.Buffer
+ if err := json.Indent(&dst, buf.Bytes(), "", " "); err != nil {
+ cmdutil.Fatal(err)
+ }
+ _, _ = fmt.Fprintln(os.Stdout, dst.String())
+ return
+ }
+
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.StripEscape)
+
+ _, _ = fmt.Fprint(w, "NAME\tID\tACTIVE\n")
+
+ for _, ns := range nss {
+ active := ""
+ if ns.Active {
+ active = "yes"
+ }
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", ns.Name, ns.Id, active)
+ }
+ _ = w.Flush()
+ },
+ }
+ output.AddFlag(listCmd)
+
+ nsCmd.AddCommand(listCmd)
+}
+
+var createCmd = &cobra.Command{
+ Use: "create NAME",
+ Short: "Create a new infrastructure namespace",
+
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ daemon := cmdutil.ConnectDaemon(ctx)
+ ns, err := daemon.CreateNamespace(ctx, &daemonpb.CreateNamespaceRequest{
+ AppRoot: appRoot,
+ Name: args[0],
+ })
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ _, _ = fmt.Fprintf(os.Stdout, "created namespace %s\n", ns.Name)
+ },
+}
+
+var deleteCmd = &cobra.Command{
+ Use: "delete NAME",
+ Short: "Delete an infrastructure namespace",
+ Aliases: []string{"del"},
+
+ Args: cobra.ExactArgs(1),
+ ValidArgsFunction: namespaceListCompletion,
+ Run: func(cmd *cobra.Command, args []string) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ daemon := cmdutil.ConnectDaemon(ctx)
+ name := args[0]
+ _, err := daemon.DeleteNamespace(ctx, &daemonpb.DeleteNamespaceRequest{
+ AppRoot: appRoot,
+ Name: name,
+ })
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ _, _ = fmt.Fprintf(os.Stdout, "deleted namespace %s\n", name)
+ },
+}
+
+func init() {
+ var create bool
+ switchCmd := &cobra.Command{
+ Use: "switch [--create] NAME",
+ Short: "Switch to a different infrastructure namespace",
+ Long: `Switch to a specified infrastructure namespace. Subsequent commands will use the given namespace by default.
+
+If -c is specified, the namespace will first be created before switching to it.
+
+You can use '-' as the namespace name to switch back to the previously active namespace.
+`,
+
+ Args: cobra.ExactArgs(1),
+ ValidArgsFunction: namespaceListCompletion,
+ Run: func(cmd *cobra.Command, args []string) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ daemon := cmdutil.ConnectDaemon(ctx)
+ ns, err := daemon.SwitchNamespace(ctx, &daemonpb.SwitchNamespaceRequest{
+ AppRoot: appRoot,
+ Name: args[0],
+ Create: create,
+ })
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ _, _ = fmt.Fprintf(os.Stdout, "switched to namespace %s\n", ns.Name)
+ },
+ }
+
+ switchCmd.Flags().BoolVarP(&create, "create", "c", false, "create the namespace before switching")
+ nsCmd.AddCommand(switchCmd)
+}
+
+func init() {
+ nsCmd.AddCommand(createCmd)
+ nsCmd.AddCommand(deleteCmd)
+ root.Cmd.AddCommand(nsCmd)
+}
+
+func namespaceListCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ // List namespaces from the daemon for completion.
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ daemon := cmdutil.ConnectDaemon(ctx)
+ resp, err := daemon.ListNamespaces(ctx, &daemonpb.ListNamespacesRequest{AppRoot: appRoot})
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+
+ namespaces := make([]string, len(resp.Namespaces))
+ for i, ns := range resp.Namespaces {
+ namespaces[i] = ns.Name
+ }
+ return namespaces, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/cli/cmd/encore/rand.go b/cli/cmd/encore/rand.go
new file mode 100644
index 0000000000..ce8dd29b93
--- /dev/null
+++ b/cli/cmd/encore/rand.go
@@ -0,0 +1,186 @@
+package main
+
+import (
+ cryptorand "crypto/rand"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/gofrs/uuid"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/pkg/words"
+)
+
+var randCmd = &cobra.Command{
+ Use: "rand",
+ Short: "Utilities for generating cryptographically secure random data",
+}
+
+func init() {
+ rootCmd.AddCommand(randCmd)
+}
+
+// UUID command
+func init() {
+ var v1, v4, v6, v7 bool
+ uuidCmd := &cobra.Command{
+ Use: "uuid [-1|-4|-6|-7]",
+ Short: "Generates a random UUID (defaults to version 4)",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ versions := map[bool]func() (uuid.UUID, error){
+ v1: uuid.NewV1,
+ v4: uuid.NewV4,
+ v6: uuid.NewV6,
+ v7: uuid.NewV7,
+ }
+ fn, ok := versions[true]
+ if !ok {
+ fatalf("unsupported UUID version")
+ }
+ u, err := fn()
+ if err != nil {
+ fatalf("failed to generate UUID: %v", err)
+ }
+ _, _ = fmt.Println(u.String())
+ },
+ }
+ uuidCmd.Flags().BoolVarP(&v1, "v1", "1", false, "Generate a version 1 UUID")
+ uuidCmd.Flags().BoolVarP(&v4, "v4", "4", true, "Generate a version 4 UUID")
+ uuidCmd.Flags().BoolVarP(&v6, "v6", "6", false, "Generate a version 6 UUID")
+ uuidCmd.Flags().BoolVarP(&v7, "v7", "7", false, "Generate a version 7 UUID")
+ uuidCmd.MarkFlagsMutuallyExclusive("v1", "v4", "v6", "v7")
+
+ randCmd.AddCommand(uuidCmd)
+}
+
+// Bytes command
+func init() {
+ format := cmdutil.Oneof{
+ Value: "hex",
+ Allowed: []string{"hex", "base32", "base32hex", "base32crockford", "base64", "base64url", "raw"},
+ Flag: "format",
+ FlagShort: "f",
+ Desc: "Output format",
+ }
+
+ noPadding := false
+ doFormat := func(data []byte) string {
+ switch format.Value {
+ case "hex":
+ return hex.EncodeToString(data)
+ case "base32":
+ enc := base32.StdEncoding
+ if noPadding {
+ enc = enc.WithPadding(base32.NoPadding)
+ }
+ return enc.EncodeToString(data)
+ case "base32hex":
+ enc := base32.HexEncoding
+ if noPadding {
+ enc = enc.WithPadding(base32.NoPadding)
+ }
+ return enc.EncodeToString(data)
+ case "base32crockford":
+ enc := base32.NewEncoding("0123456789ABCDEFGHJKMNPQRSTVWXYZ")
+ if noPadding {
+ enc = enc.WithPadding(base32.NoPadding)
+ }
+ return enc.EncodeToString(data)
+ case "base64":
+ enc := base64.StdEncoding
+ if noPadding {
+ enc = enc.WithPadding(base64.NoPadding)
+ }
+ return enc.EncodeToString(data)
+ case "base64url":
+ enc := base64.URLEncoding
+ if noPadding {
+ enc = enc.WithPadding(base64.NoPadding)
+ }
+ return enc.EncodeToString(data)
+ default:
+ fatalf("unsupported output format: %s", format.Value)
+ panic("unreachable")
+ }
+ }
+
+ bytesCmd := &cobra.Command{
+ Use: "bytes BYTES [-f " + format.Alternatives() + "]",
+ Short: "Generates random bytes and outputs them in the specified format",
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ num, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ fatalf("invalid number of bytes: %v", err)
+ } else if num < 1 {
+ fatalf("number of bytes must be positive")
+ } else if num > 1024*1024 {
+ fatalf("too many bytes requested")
+ }
+
+ data := make([]byte, num)
+ _, err = cryptorand.Read(data)
+ if err != nil {
+ fatalf("failed to generate random bytes: %v", err)
+ }
+
+ if format.Value == "raw" {
+ _, err = os.Stdout.Write(data)
+ if err != nil {
+ fatalf("failed to write: %v", err)
+ }
+ } else {
+ formatted := doFormat(data)
+ if _, err := os.Stdout.WriteString(formatted); err != nil {
+ fatalf("failed to write: %v", err)
+ }
+ _, _ = os.Stdout.Write([]byte{'\n'})
+ }
+ },
+ }
+
+ format.AddFlag(bytesCmd)
+ bytesCmd.Flags().BoolVar(&noPadding, "no-padding", false, "omit padding characters from base32/base64 output")
+ randCmd.AddCommand(bytesCmd)
+}
+
+// Words command
+func init() {
+ var sep string
+ wordsCmd := &cobra.Command{
+ Use: "words [--sep=SEPARATOR] NUM",
+ Short: "Generates random 4-5 letter words for memorable passphrases",
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ num, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ fatalf("invalid number of words: %v", err)
+ } else if num < 1 {
+ fatalf("number of words must be positive")
+ } else if num > 1024 {
+ fatalf("too many words requested")
+ }
+
+ selected, err := words.Select(int(num))
+ if err != nil {
+ fatalf("failed to select words: %v", err)
+ }
+
+ formatted := strings.Join(selected, sep)
+ if _, err := os.Stdout.WriteString(formatted); err != nil {
+ fatalf("failed to write: %v", err)
+ }
+ _, _ = os.Stdout.Write([]byte{'\n'})
+ },
+ }
+
+ wordsCmd.Flags().StringVarP(&sep, "sep", "s", " ", "separator between words")
+ randCmd.AddCommand(wordsCmd)
+}
diff --git a/cli/cmd/encore/root/rootcmd.go b/cli/cmd/encore/root/rootcmd.go
new file mode 100644
index 0000000000..fc1fd99af7
--- /dev/null
+++ b/cli/cmd/encore/root/rootcmd.go
@@ -0,0 +1,60 @@
+package root
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "github.com/spf13/cobra"
+
+ "encr.dev/pkg/errlist"
+)
+
+var (
+ Verbosity int
+ traceFile string
+
+ // TraceFile is the file to write trace logs to.
+ // If nil (the default), trace logs are not written.
+ TraceFile *string
+)
+
+var preRuns []func(cmd *cobra.Command, args []string)
+
+// AddPreRun adds a function to be executed before the command runs.
+func AddPreRun(f func(cmd *cobra.Command, args []string)) {
+ preRuns = append(preRuns, f)
+}
+
+var Cmd = &cobra.Command{
+ Use: "encore",
+ Short: "encore is the fastest way of developing backend applications",
+ SilenceErrors: true, // We'll handle displaying an error in our main func
+ CompletionOptions: cobra.CompletionOptions{
+ HiddenDefaultCmd: true, // Hide the "completion" command from help (used for generating auto-completions for the shell)
+ },
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ if traceFile != "" {
+ TraceFile = &traceFile
+ }
+
+ level := zerolog.InfoLevel
+ if Verbosity == 1 {
+ level = zerolog.DebugLevel
+ } else if Verbosity >= 2 {
+ level = zerolog.TraceLevel
+ }
+
+ if Verbosity >= 1 {
+ errlist.Verbose = true
+ }
+ log.Logger = log.Logger.Level(level)
+
+ for _, f := range preRuns {
+ f(cmd, args)
+ }
+ },
+}
+
+func init() {
+ Cmd.PersistentFlags().CountVarP(&Verbosity, "verbose", "v", "verbose output")
+ Cmd.PersistentFlags().StringVar(&traceFile, "trace", "", "file to write execution trace data to")
+}
diff --git a/cli/cmd/encore/run.go b/cli/cmd/encore/run.go
index 8c808fcb91..57656a21e6 100644
--- a/cli/cmd/encore/run.go
+++ b/cli/cmd/encore/run.go
@@ -3,88 +3,150 @@ package main
import (
"context"
"fmt"
- "io"
+ "net"
"os"
"os/signal"
+ "strconv"
- daemonpb "encr.dev/proto/encore/daemon"
+ "github.com/logrusorgru/aurora/v3"
"github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+ "golang.org/x/term"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ "encr.dev/cli/internal/onboarding"
+ daemonpb "encr.dev/proto/encore/daemon"
)
var (
- tunnel bool
- debug bool
- watch bool
+ color bool
+ noColor bool // for "--no-color" compatibility
+ debug = cmdutil.Oneof{
+ Value: "",
+ NoOptDefVal: "enabled",
+ Allowed: []string{"enabled", "break"},
+ Flag: "debug",
+ FlagShort: "", // no short flag
+ Desc: "Compile for debugging (disables some optimizations)",
+ TypeDesc: "string",
+ }
+ watch bool
+ listen string
+ port uint
+ jsonLogs bool
+ browser = cmdutil.Oneof{
+ Value: "auto",
+ Allowed: []string{"auto", "never", "always"},
+ Flag: "browser",
+ FlagShort: "", // no short flag
+ Desc: "Whether to open the local development dashboard in the browser on startup",
+ TypeDesc: "string",
+ }
)
-var runCmd = &cobra.Command{
- Use: "run",
- Short: "Runs your application",
- Run: func(cmd *cobra.Command, args []string) {
- appRoot, wd := determineAppRoot()
- runApp(appRoot, wd, tunnel, watch)
- },
+func init() {
+ runCmd := &cobra.Command{
+ Use: "run [--debug] [--watch=true] [--port=4000] [--listen=]",
+ Short: "Runs your application",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ appRoot, wd := determineAppRoot()
+ // If the user didn't explicitly set --watch and we're in debug mode, disable watching
+ // as we typically don't want to swap the process when the user is debugging.
+ if !cmd.Flag("watch").Changed && debug.Value != "" {
+ watch = false
+ }
+ runApp(appRoot, wd)
+ },
+ }
+
+ isTerm := term.IsTerminal(int(os.Stdout.Fd()))
+
+ rootCmd.AddCommand(runCmd)
+ runCmd.Flags().BoolVarP(&watch, "watch", "w", true, "Watch for changes and live-reload")
+ runCmd.Flags().StringVar(&listen, "listen", "", "Address to listen on (for example \"0.0.0.0:4000\")")
+ runCmd.Flags().UintVarP(&port, "port", "p", 4000, "Port to listen on")
+ runCmd.Flags().BoolVar(&jsonLogs, "json", false, "Display logs in JSON format")
+ runCmd.Flags().StringVarP(&nsName, "namespace", "n", "", "Namespace to use (defaults to active namespace)")
+ runCmd.Flags().BoolVar(&color, "color", isTerm, "Whether to display colorized output")
+ runCmd.Flags().BoolVar(&noColor, "no-color", false, "Equivalent to --color=false")
+ runCmd.Flags().MarkHidden("no-color")
+ debug.AddFlag(runCmd)
+ browser.AddFlag(runCmd)
}
// runApp runs the app.
-func runApp(appRoot, wd string, tunnel, watch bool) {
- interrupt := make(chan os.Signal, 1)
- signal.Notify(interrupt, os.Interrupt)
+func runApp(appRoot, wd string) {
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+
+ // Determine listen addr.
+ var listenAddr string
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- <-interrupt
- cancel()
- }()
+ if listen == "" {
+ // If we have no listen address at all, listen on localhost.
+ // (we do this so MacOS's firewall doesn't ask for permission for the daemon to listen on all interfaces)
+ listenAddr = fmt.Sprintf("127.0.0.1:%d", port)
+ } else if _, _, err := net.SplitHostPort(listen); err == nil {
+ // If --listen is given with a port, use that directly and ignore --port.
+ listenAddr = listen
+ } else {
+ // Otherwise use --listen as the host and --port as the port.
+ listenAddr = net.JoinHostPort(listen, strconv.Itoa(int(port)))
+ }
+
+ browserMode := daemonpb.RunRequest_BROWSER_AUTO
+ switch browser.Value {
+ case "auto":
+ browserMode = daemonpb.RunRequest_BROWSER_AUTO
+ case "never":
+ browserMode = daemonpb.RunRequest_BROWSER_NEVER
+ case "always":
+ browserMode = daemonpb.RunRequest_BROWSER_ALWAYS
+ }
+
+ debugMode := daemonpb.RunRequest_DEBUG_DISABLED
+ switch debug.Value {
+ case "enabled":
+ debugMode = daemonpb.RunRequest_DEBUG_ENABLED
+ case "break":
+ debugMode = daemonpb.RunRequest_DEBUG_BREAK
+ }
daemon := setupDaemon(ctx)
stream, err := daemon.Run(ctx, &daemonpb.RunRequest{
AppRoot: appRoot,
- Tunnel: tunnel,
- Debug: debug,
+ DebugMode: debugMode,
Watch: watch,
WorkingDir: wd,
+ ListenAddr: listenAddr,
+ Environ: os.Environ(),
+ TraceFile: root.TraceFile,
+ Namespace: nonZeroPtr(nsName),
+ Browser: browserMode,
})
if err != nil {
fatal(err)
}
- for {
- msg, err := stream.Recv()
- if err == io.EOF || status.Code(err) == codes.Canceled {
- return
- } else if err != nil {
- fatal(err)
- }
- switch resp := msg.Msg.(type) {
- case *daemonpb.RunMessage_Started:
- fmt.Fprintf(os.Stderr, "Running on http://localhost:%d\n", resp.Started.Port)
- if debug && resp.Started.Pid > 0 {
- fmt.Fprintf(os.Stderr, "Process ID (for debugging): %d\n", resp.Started.Pid)
- }
- if url := resp.Started.TunnelUrl; url != "" {
- fmt.Fprintf(os.Stderr, "Tunnel active on %s\n", url)
- }
+ cmdutil.ClearTerminalExceptFirstNLines(1)
- case *daemonpb.RunMessage_Output:
- if out := resp.Output.Stdout; len(out) > 0 {
- os.Stdout.Write(out)
- }
- if out := resp.Output.Stderr; len(out) > 0 {
- os.Stderr.Write(out)
+ var converter cmdutil.OutputConverter
+ if !jsonLogs {
+ converter = cmdutil.ConvertJSONLogs(cmdutil.Colorize(color && !noColor))
+ }
+ code := cmdutil.StreamCommandOutput(stream, converter)
+ if code == 0 {
+ if state, err := onboarding.Load(); err == nil {
+ if state.DeployHint.Set() {
+ if err := state.Write(); err == nil {
+ _, _ = fmt.Println(aurora.Sprintf("\nHint: deploy your app to the cloud by running: %s", aurora.Cyan("git push encore")))
+ }
}
-
- case *daemonpb.RunMessage_Exit:
- os.Exit(int(resp.Exit.Code))
}
}
+ os.Exit(code)
}
func init() {
- rootCmd.AddCommand(runCmd)
- runCmd.Flags().BoolVar(&tunnel, "tunnel", false, "Create a tunnel to your machine for others to test against")
- runCmd.Flags().BoolVar(&debug, "debug", false, "Compile for debugging (disables some optimizations)")
- runCmd.Flags().BoolVarP(&watch, "watch", "w", true, "Watch for changes and live-reload")
}
diff --git a/cli/cmd/encore/secret.go b/cli/cmd/encore/secret.go
deleted file mode 100644
index 8f4fdfe095..0000000000
--- a/cli/cmd/encore/secret.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "syscall"
- "time"
-
- daemonpb "encr.dev/proto/encore/daemon"
- "github.com/spf13/cobra"
- "golang.org/x/crypto/ssh/terminal"
-)
-
-var secretCmd = &cobra.Command{
- Use: "secret",
- Short: "Secret management commands",
-}
-
-var (
- secretDevFlag bool
- secretProdFlag bool
-)
-
-var setSecretCmd = &cobra.Command{
- Use: "set --dev|prod ",
- Short: "Sets a secret value",
- Example: `
-Entering a secret directly in terminal:
-
- $ encore secret set --dev MySecret
- Enter secret value: ...
- Successfully created development secret MySecret.
-
-Piping a secret from a file:
-
- $ encore secret set --dev MySecret < my-secret.txt
- Successfully created development secret MySecret.
-
-Note that this strips trailing newlines from the secret value.
- `,
- Args: cobra.ExactArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- if !secretDevFlag && !secretProdFlag {
- fatal("must specify either --dev or --prod.")
- } else if secretDevFlag && secretProdFlag {
- fatal("cannot specify both --dev and --prod.")
- }
-
- appRoot, _ := determineAppRoot()
-
- key := args[0]
- var value string
- fd := syscall.Stdin
- if terminal.IsTerminal(int(fd)) {
- fmt.Fprint(os.Stderr, "Enter secret value: ")
- data, err := terminal.ReadPassword(int(fd))
- if err != nil {
- fatal(err)
- }
- value = string(data)
- fmt.Fprintln(os.Stderr)
- } else {
- data, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- fatal(err)
- }
- value = string(bytes.TrimRight(data, "\r\n"))
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- daemon := setupDaemon(ctx)
- typName := "development"
- typ := daemonpb.SetSecretRequest_DEVELOPMENT
- if secretProdFlag {
- typName = "production"
- typ = daemonpb.SetSecretRequest_PRODUCTION
- }
-
- resp, err := daemon.SetSecret(ctx, &daemonpb.SetSecretRequest{
- AppRoot: appRoot,
- Key: key,
- Value: value,
- Type: typ,
- })
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- if resp.Created {
- fmt.Fprintf(os.Stderr, "Successfully created %s secret %s!\n", typName, key)
- } else {
- fmt.Fprintf(os.Stderr, "Successfully updated %s secret %s.\n", typName, key)
- }
- },
-}
-
-func init() {
- rootCmd.AddCommand(secretCmd)
- secretCmd.AddCommand(setSecretCmd)
- setSecretCmd.Flags().BoolVarP(&secretDevFlag, "dev", "d", false, "To set the secret for development use")
- setSecretCmd.Flags().BoolVarP(&secretProdFlag, "prod", "p", false, "To set the secret for production use")
-}
diff --git a/cli/cmd/encore/secrets/archive.go b/cli/cmd/encore/secrets/archive.go
new file mode 100644
index 0000000000..13ceb5730b
--- /dev/null
+++ b/cli/cmd/encore/secrets/archive.go
@@ -0,0 +1,60 @@
+package secrets
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+)
+
+var archiveSecretCmd = &cobra.Command{
+ Use: "archive ",
+ Short: "Archives a secret value",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ doArchiveOrUnarchive(args[0], true)
+ },
+}
+
+var unarchiveSecretCmd = &cobra.Command{
+ Use: "unarchive ",
+ Short: "Unarchives a secret value",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ doArchiveOrUnarchive(args[0], false)
+ },
+}
+
+func doArchiveOrUnarchive(groupID string, archive bool) {
+ if !strings.HasPrefix(groupID, "secgrp") {
+ cmdutil.Fatal("the id must begin with 'secgrp_'. Valid ids can be found with 'encore secret list '.")
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ err := platform.UpdateSecretGroup(ctx, platform.UpdateSecretGroupParams{
+ ID: groupID,
+ Archived: &archive,
+ })
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ if archive {
+ fmt.Printf("Successfully archived secret group %s.\n", groupID)
+ } else {
+ fmt.Printf("Successfully unarchived secret group %s.\n", groupID)
+ }
+}
+
+func init() {
+ secretCmd.AddCommand(archiveSecretCmd)
+ secretCmd.AddCommand(unarchiveSecretCmd)
+}
diff --git a/cli/cmd/encore/secrets/check.go b/cli/cmd/encore/secrets/check.go
new file mode 100644
index 0000000000..77e395b442
--- /dev/null
+++ b/cli/cmd/encore/secrets/check.go
@@ -0,0 +1,221 @@
+package secrets
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "slices"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/cli/internal/platform/gql"
+)
+
+var checkSecretCmd = &cobra.Command{
+ Use: "check [envs...]",
+ Short: "Check if secrets are properly set across environments",
+ Long: `Check if secrets are properly set across specified environments.
+This command validates that all secrets required by your application
+are configured in the specified environments.
+
+Example usage:
+ encore secret check prod dev
+ encore secret check production development`,
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) == 0 {
+ cmdutil.Fatal("at least one environment must be specified")
+ }
+
+ appSlug := cmdutil.AppSlug()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ // Get all secrets for the app
+ secrets, err := platform.ListSecretGroups(ctx, appSlug, nil)
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+
+ // Normalize environment names
+ envNames := normalizeEnvNames(args)
+
+ // Check secrets across environments
+ result := CheckSecretsAcrossEnvs(secrets, envNames)
+
+ // Display results
+ DisplayCheckResults(result, envNames)
+
+ // Exit with error if any secrets are missing
+ if result.HasMissing {
+ os.Exit(1)
+ }
+ },
+}
+
+type CheckResult struct {
+ SecretResults []SecretCheckResult
+ HasMissing bool
+}
+
+type SecretCheckResult struct {
+ Key string
+ EnvStatus map[string]bool // env name -> has secret
+}
+
+func normalizeEnvNames(args []string) []string {
+ var normalized []string
+ for _, arg := range args {
+ switch strings.ToLower(arg) {
+ case "prod", "production":
+ normalized = append(normalized, "production")
+ case "dev", "development":
+ normalized = append(normalized, "development")
+ case "local":
+ normalized = append(normalized, "local")
+ case "preview":
+ normalized = append(normalized, "preview")
+ default:
+ // Assume it's a specific environment name
+ normalized = append(normalized, arg)
+ }
+ }
+ return normalized
+}
+
+func CheckSecretsAcrossEnvs(secrets []*gql.Secret, envNames []string) CheckResult {
+ var result CheckResult
+
+ for _, secret := range secrets {
+ secretResult := SecretCheckResult{
+ Key: secret.Key,
+ EnvStatus: make(map[string]bool),
+ }
+
+ // Initialize all environments as missing
+ for _, envName := range envNames {
+ secretResult.EnvStatus[envName] = false
+ }
+
+ // Check which environments have this secret
+ for _, group := range secret.Groups {
+ if group.ArchivedAt != nil {
+ continue // Skip archived secrets
+ }
+
+ for _, selector := range group.Selector {
+ switch sel := selector.(type) {
+ case *gql.SecretSelectorEnvType:
+ if contains(envNames, sel.Kind) {
+ secretResult.EnvStatus[sel.Kind] = true
+ }
+ case *gql.SecretSelectorSpecificEnv:
+ if contains(envNames, sel.Env.Name) {
+ secretResult.EnvStatus[sel.Env.Name] = true
+ }
+ }
+ }
+ }
+
+ // Check if any environment is missing this secret
+ for _, hasSecret := range secretResult.EnvStatus {
+ if !hasSecret {
+ result.HasMissing = true
+ break
+ }
+ }
+
+ result.SecretResults = append(result.SecretResults, secretResult)
+ }
+
+ return result
+}
+
+func DisplayCheckResults(result CheckResult, envNames []string) {
+ if len(result.SecretResults) == 0 {
+ fmt.Println("No secrets found.")
+ return
+ }
+
+ var buf bytes.Buffer
+ w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.StripEscape)
+
+ // Header
+ header := "Secret Key"
+ for _, envName := range envNames {
+ header += fmt.Sprintf("\t%s", strings.Title(envName))
+ }
+ header += "\t\n"
+ fmt.Fprint(w, header)
+
+ const (
+ checkYes = "\u2713"
+ checkNo = "\u2717"
+ )
+
+ missingCount := 0
+
+ // Sort secrets by key for consistent output
+ slices.SortFunc(result.SecretResults, func(a, b SecretCheckResult) int {
+ return strings.Compare(a.Key, b.Key)
+ })
+
+ for _, secretResult := range result.SecretResults {
+ line := secretResult.Key
+ secretHasMissing := false
+
+ for _, envName := range envNames {
+ hasSecret := secretResult.EnvStatus[envName]
+ if hasSecret {
+ line += fmt.Sprintf("\t%s", checkYes)
+ } else {
+ line += fmt.Sprintf("\t%s", checkNo)
+ secretHasMissing = true
+ }
+ }
+ line += "\t\n"
+
+ if secretHasMissing {
+ missingCount++
+ }
+
+ fmt.Fprint(w, line)
+ }
+
+ w.Flush()
+
+ // Add color to the checkmarks
+ r := strings.NewReplacer(checkYes, color.GreenString(checkYes), checkNo, color.RedString(checkNo))
+ r.WriteString(os.Stdout, buf.String())
+
+ // Print summary
+ if result.HasMissing {
+ if missingCount == 1 {
+ fmt.Printf("\nError: There is 1 secret missing.\n")
+ } else {
+ fmt.Printf("\nError: There are %d secrets missing.\n", missingCount)
+ }
+ } else {
+ fmt.Printf("\nAll secrets are properly configured across specified environments.\n")
+ }
+}
+
+func contains(slice []string, item string) bool {
+ for _, s := range slice {
+ if s == item {
+ return true
+ }
+ }
+ return false
+}
+
+func init() {
+ secretCmd.AddCommand(checkSecretCmd)
+}
\ No newline at end of file
diff --git a/cli/cmd/encore/secrets/list.go b/cli/cmd/encore/secrets/list.go
new file mode 100644
index 0000000000..d481b95ec0
--- /dev/null
+++ b/cli/cmd/encore/secrets/list.go
@@ -0,0 +1,169 @@
+package secrets
+
+import (
+ "bytes"
+ "cmp"
+ "context"
+ "fmt"
+ "os"
+ "slices"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/cli/internal/platform/gql"
+)
+
+var listSecretCmd = &cobra.Command{
+ Use: "list [keys...]",
+ Short: "Lists secrets, optionally for a specific key",
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ appSlug := cmdutil.AppSlug()
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ var keys []string
+ if len(args) > 0 {
+ keys = args
+ }
+ secrets, err := platform.ListSecretGroups(ctx, appSlug, keys)
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+
+ if keys == nil {
+ // Print secrets overview
+ var buf bytes.Buffer
+ w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.StripEscape)
+
+ _, _ = fmt.Fprint(w, "Secret Key\tProduction\tDevelopment\tLocal\tPreview\tSpecific Envs\t\n")
+ const (
+ checkYes = "\u2713"
+ checkNo = "\u2717"
+ )
+ for _, s := range secrets {
+ render := func(b bool) string {
+ if b {
+ return checkYes
+ } else {
+ return checkNo
+ }
+ }
+ d := getSecretEnvDesc(s.Groups)
+ if !d.hasAny {
+ continue
+ }
+ _, _ = fmt.Fprintf(w, "%s\t%v\t%v\t%v\t%v\t", s.Key,
+ render(d.prod), render(d.dev), render(d.local), render(d.preview))
+ // Render specific envs, if any
+ for i, env := range d.specific {
+ if i > 0 {
+ _, _ = fmt.Fprintf(w, ",")
+ }
+ _, _ = fmt.Fprintf(w, "%s", env.Name)
+ }
+
+ _, _ = fmt.Fprint(w, "\t\n")
+ }
+ _ = w.Flush()
+
+ // Add color to the checkmarks now that the table is correctly laid out.
+ // We can't do it before since the tabwriter will get the alignment wrong
+ // if we include a bunch of ANSI escape codes that it doesn't understand.
+ r := strings.NewReplacer(checkYes, color.GreenString(checkYes), checkNo, color.RedString(checkNo))
+ _, _ = r.WriteString(os.Stdout, buf.String())
+ } else {
+ // Specific secrets
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
+ _, _ = fmt.Fprint(w, "ID\tSecret Key\tEnvironment(s)\t\n")
+
+ slices.SortFunc(secrets, func(a, b *gql.Secret) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+ for _, s := range secrets {
+ // Sort the archived groups to the end
+ slices.SortFunc(s.Groups, func(a, b *gql.SecretGroup) int {
+ aa, ab := a.ArchivedAt != nil, b.ArchivedAt != nil
+ if aa != ab {
+ if aa {
+ return 1
+ } else {
+ return -1
+ }
+ } else if aa {
+ return a.ArchivedAt.Compare(*b.ArchivedAt)
+ } else {
+ return cmp.Compare(a.ID, b.ID)
+ }
+ })
+
+ for _, g := range s.Groups {
+ var sel []string
+ for _, s := range g.Selector {
+ switch s := s.(type) {
+ case *gql.SecretSelectorSpecificEnv:
+ // If we have a specific environment, render the name
+ // instead of the id (which is the default when using s.String()).
+ sel = append(sel, "env:"+s.Env.Name)
+ default:
+ sel = append(sel, s.String())
+ }
+ }
+
+ s := fmt.Sprintf("%s\t%s\t%s\t", g.ID, s.Key, strings.Join(sel, ", "))
+ if g.ArchivedAt != nil {
+ s += "(archived)\t"
+ _, _ = color.New(color.Concealed).Fprintln(w, s)
+ } else {
+ _, _ = fmt.Fprintln(w, s)
+ }
+ }
+ }
+ _ = w.Flush()
+ }
+ },
+}
+
+func init() {
+ secretCmd.AddCommand(listSecretCmd)
+}
+
+type secretEnvDesc struct {
+ hasAny bool // if there are any non-archived groups at all
+ prod, dev, local, preview bool
+ specific []*gql.Env
+}
+
+func getSecretEnvDesc(groups []*gql.SecretGroup) secretEnvDesc {
+ var desc secretEnvDesc
+ for _, g := range groups {
+ if g.ArchivedAt != nil {
+ continue
+ }
+ desc.hasAny = true
+ for _, sel := range g.Selector {
+ switch sel := sel.(type) {
+ case *gql.SecretSelectorEnvType:
+ switch sel.Kind {
+ case "production":
+ desc.prod = true
+ case "development":
+ desc.dev = true
+ case "local":
+ desc.local = true
+ case "preview":
+ desc.preview = true
+ }
+ case *gql.SecretSelectorSpecificEnv:
+ desc.specific = append(desc.specific, sel.Env)
+ }
+ }
+ }
+ return desc
+}
diff --git a/cli/cmd/encore/secrets/secrets.go b/cli/cmd/encore/secrets/secrets.go
new file mode 100644
index 0000000000..6a057bb61e
--- /dev/null
+++ b/cli/cmd/encore/secrets/secrets.go
@@ -0,0 +1,17 @@
+package secrets
+
+import (
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/root"
+)
+
+var secretCmd = &cobra.Command{
+ Use: "secret",
+ Short: "Secret management commands",
+ Aliases: []string{"secrets"},
+}
+
+func init() {
+ root.Cmd.AddCommand(secretCmd)
+}
diff --git a/cli/cmd/encore/secrets/set.go b/cli/cmd/encore/secrets/set.go
new file mode 100644
index 0000000000..d07fd3369d
--- /dev/null
+++ b/cli/cmd/encore/secrets/set.go
@@ -0,0 +1,272 @@
+package secrets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "slices"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/spf13/cobra"
+ "golang.org/x/crypto/ssh/terminal"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/cli/internal/platform/gql"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var setSecretCmd = &cobra.Command{
+ Use: "set --type ",
+ Short: "Sets a secret value",
+ Long: `
+Sets a secret value for one or more environment types.
+
+The valid environment types are 'prod', 'dev', 'pr' and 'local'.
+`,
+
+ Example: `
+Entering a secret directly in terminal:
+
+ $ encore secret set --type dev,local MySecret
+ Enter secret value: ...
+ Successfully created secret value for MySecret.
+
+Piping a secret from a file:
+
+ $ encore secret set --type dev,local,pr MySecret < my-secret.txt
+ Successfully created secret value for MySecret.
+
+Note that this strips trailing newlines from the secret value.`,
+ Args: cobra.ExactArgs(1),
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ setSecret(args[0])
+ },
+}
+
+var secretEnvs secretEnvSelector
+
+type secretEnvSelector struct {
+ devFlag bool
+ prodFlag bool
+ envTypes []string
+ envNames []string
+}
+
+func init() {
+ secretCmd.AddCommand(setSecretCmd)
+ setSecretCmd.Flags().BoolVarP(&secretEnvs.devFlag, "dev", "d", false, "To set the secret for development use")
+ setSecretCmd.Flags().BoolVarP(&secretEnvs.prodFlag, "prod", "p", false, "To set the secret for production use")
+ setSecretCmd.Flags().StringSliceVarP(&secretEnvs.envTypes, "type", "t", nil, "environment type(s) to set for (comma-separated list)")
+ setSecretCmd.Flags().StringSliceVarP(&secretEnvs.envNames, "env", "e", nil, "environment name(s) to set for (comma-separated list)")
+ _ = setSecretCmd.Flags().MarkHidden("dev")
+ _ = setSecretCmd.Flags().MarkHidden("prod")
+}
+
+func setSecret(key string) {
+ plaintextValue := readSecretValue()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ appRoot, _ := cmdutil.AppRoot()
+ appSlug := cmdutil.AppSlug()
+ sel := secretEnvs.ParseSelector(ctx, appSlug)
+
+ app, err := platform.GetApp(ctx, appSlug)
+ if err != nil {
+ cmdutil.Fatalf("unable to lookup app %s: %v", appSlug, err)
+ }
+
+ // Does a matching secret group already exist?
+ secrets, err := platform.ListSecretGroups(ctx, app.Slug, []string{key})
+ if err != nil {
+ cmdutil.Fatalf("unable to list secrets: %v", err)
+ }
+
+ if matching := findMatchingSecretGroup(secrets, key, sel); matching != nil {
+ // We found a matching secret group. Update it.
+ err := platform.CreateSecretVersion(ctx, platform.CreateSecretVersionParams{
+ GroupID: matching.ID,
+ PlaintextValue: plaintextValue,
+ Etag: matching.Etag,
+ })
+ if err != nil {
+ cmdutil.Fatalf("unable to update secret: %v", err)
+ }
+ fmt.Printf("Successfully updated secret value for %s.\n", key)
+ return
+ }
+
+ // Otherwise create a new secret group.
+ err = platform.CreateSecretGroup(ctx, platform.CreateSecretGroupParams{
+ AppID: app.ID,
+ Key: key,
+ PlaintextValue: plaintextValue,
+ Selector: sel,
+ Description: "", // not yet supported from CLI
+ })
+ if err != nil {
+ if ce, ok := getConflictError(err); ok {
+ var errMsg strings.Builder
+ fmt.Fprintln(&errMsg, "the environment selection conflicts with other secret values:")
+ for _, c := range ce.Conflicts {
+ fmt.Fprintf(&errMsg, "\t%s %s\n", c.GroupID, strings.Join(c.Conflicts, ", "))
+ }
+ cmdutil.Fatal(errMsg.String())
+ }
+ cmdutil.Fatalf("unable to create secret: %v", err)
+ }
+
+ daemon := cmdutil.ConnectDaemon(ctx)
+ if _, err := daemon.SecretsRefresh(ctx, &daemonpb.SecretsRefreshRequest{AppRoot: appRoot}); err != nil {
+ fmt.Fprintln(os.Stderr, "warning: failed to refresh secret secret, skipping:", err)
+ }
+
+ fmt.Printf("Successfully created secret value for %s.\n", key)
+}
+
+func (s secretEnvSelector) ParseSelector(ctx context.Context, appSlug string) []gql.SecretSelector {
+ if s.devFlag && s.prodFlag {
+ cmdutil.Fatal("cannot specify both --dev and --prod")
+ } else if s.devFlag && (len(s.envTypes) > 0 || len(s.envNames) > 0) {
+ cmdutil.Fatal("cannot combine --dev with --type/--env")
+ } else if s.prodFlag && (len(s.envTypes) > 0 || len(s.envNames) > 0) {
+ cmdutil.Fatal("cannot combine --prod with --type/--env")
+ }
+
+ // Look up the environments
+ envMap := make(map[string]string) // name -> id
+ envs, err := platform.ListEnvs(ctx, appSlug)
+ if err != nil {
+ cmdutil.Fatalf("unable to list environments: %v", err)
+ }
+ for _, env := range envs {
+ envMap[env.Slug] = env.ID
+ }
+
+ var sel []gql.SecretSelector
+ if s.devFlag {
+ sel = append(sel,
+ &gql.SecretSelectorEnvType{Kind: "development"},
+ &gql.SecretSelectorEnvType{Kind: "preview"},
+ &gql.SecretSelectorEnvType{Kind: "local"},
+ )
+ } else if s.prodFlag {
+ sel = append(sel, &gql.SecretSelectorEnvType{Kind: "production"})
+ } else {
+ // Parse env types and env names
+ seenTypes := make(map[string]bool)
+ validTypes := map[string]string{
+ // Actual names
+ "development": "development",
+ "production": "production",
+ "preview": "preview",
+ "local": "local",
+
+ // Aliases
+ "dev": "development",
+ "prod": "production",
+ "pr": "preview",
+ "ephemeral": "preview",
+ }
+
+ for _, t := range s.envTypes {
+ val, ok := validTypes[t]
+ if !ok {
+ cmdutil.Fatalf("invalid environment type %q", t)
+ }
+ if !seenTypes[val] {
+ seenTypes[val] = true
+ sel = append(sel, &gql.SecretSelectorEnvType{Kind: val})
+ }
+ }
+ for _, n := range s.envNames {
+ envID, ok := envMap[n]
+ if !ok {
+ cmdutil.Fatalf("environment %q not found", n)
+ }
+ sel = append(sel, &gql.SecretSelectorSpecificEnv{Env: &gql.Env{ID: envID}})
+ }
+ }
+
+ if len(sel) == 0 {
+ cmdutil.Fatal("must specify at least one environment with --type/--env (or --dev/--prod)")
+ }
+ return sel
+}
+
+// readSecretValue reads the secret value from the user.
+// If it's a terminal it becomes an interactive prompt,
+// otherwise it reads from stdin.
+func readSecretValue() string {
+ var value string
+ fd := syscall.Stdin
+ if terminal.IsTerminal(int(fd)) {
+ fmt.Fprint(os.Stderr, "Enter secret value: ")
+ data, err := terminal.ReadPassword(int(fd))
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ value = string(data)
+ fmt.Fprintln(os.Stderr)
+ } else {
+ data, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ cmdutil.Fatal(err)
+ }
+ value = string(bytes.TrimRight(data, "\r\n"))
+ }
+ return value
+}
+
+// findMatchingSecretGroup find whether a matching secret group already exists
+// for the given secret key and selector.
+func findMatchingSecretGroup(secrets []*gql.Secret, key string, selector []gql.SecretSelector) *gql.SecretGroup {
+ // canonicalize returns the secret selectors in canonical form
+ canonicalize := func(sels []gql.SecretSelector) []string {
+ var strs []string
+ for _, s := range sels {
+ strs = append(strs, s.String())
+ }
+ sort.Strings(strs)
+ return strs
+ }
+
+ want := canonicalize(selector)
+ for _, s := range secrets {
+ if s.Key == key {
+ for _, g := range s.Groups {
+ got := canonicalize(g.Selector)
+ if slices.Equal(got, want) {
+ return g
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func getConflictError(err error) (*gql.ConflictError, bool) {
+ var gqlErr gql.ErrorList
+ if !errors.As(err, &gqlErr) {
+ return nil, false
+ }
+ for _, e := range gqlErr {
+ if conflict := e.Extensions["conflict"]; len(conflict) > 0 {
+ var cerr gql.ConflictError
+ if err := json.Unmarshal(conflict, &cerr); err == nil {
+ return &cerr, true
+ }
+ }
+ }
+ return nil, false
+}
diff --git a/cli/cmd/encore/sqlc.go b/cli/cmd/encore/sqlc.go
new file mode 100644
index 0000000000..86de21206e
--- /dev/null
+++ b/cli/cmd/encore/sqlc.go
@@ -0,0 +1,178 @@
+package main
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/cobra"
+ "github.com/sqlc-dev/sqlc/pkg/cli"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "encr.dev/proto/encore/daemon"
+)
+
+type sqlcSQL struct {
+ Schema string `json:"schema"`
+ Queries string `json:"queries"`
+ Engine string `json:"engine"`
+ Codegen []sqlcCodegen `json:"codegen"`
+}
+
+type sqlcCodegen struct {
+ Out string `json:"out"`
+ Plugin string `json:"plugin"`
+}
+
+type sqlcPlugin struct {
+ Name string `json:"name"`
+ Process sqlcProcess `json:"process"`
+}
+
+type sqlcProcess struct {
+ Cmd string `json:"cmd"`
+}
+
+type sqlcConfig struct {
+ Version string `json:"version"`
+ SQL []sqlcSQL `json:"sql"`
+ Plugins []sqlcPlugin `json:"plugins"`
+}
+
+func init() {
+ var useProto bool
+ genCmd := &cobra.Command{
+ Use: "generate-sql-schema ",
+ Short: "Plugin for SQLC: stores the parsed sqlc model in a protobuf file",
+ Hidden: true,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ schemaPath, err := filepath.Abs(args[0])
+ if err != nil {
+ return err
+ }
+ tmpDir, err := os.MkdirTemp("", "encore-sqlc")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = os.RemoveAll(tmpDir)
+ }()
+
+ sqlcPath := filepath.Join(tmpDir, "sqlc.json")
+ queryPath := filepath.Join(tmpDir, "query.sql")
+ outPath := filepath.Join(tmpDir, "gen")
+ // SQLC requires the schema path to be relative to the sqlc.json file
+ schemaPath, err = filepath.Rel(tmpDir, schemaPath)
+ if err != nil {
+ return err
+ }
+ cfg := sqlcConfig{
+ Version: "2",
+ SQL: []sqlcSQL{
+ {
+ Schema: schemaPath,
+ Queries: "query.sql",
+ Engine: "postgresql",
+ Codegen: []sqlcCodegen{
+ {
+ Out: "gen",
+ Plugin: "encore",
+ },
+ },
+ },
+ },
+ Plugins: []sqlcPlugin{
+ {
+ Name: "encore",
+ Process: sqlcProcess{
+ Cmd: os.Args[0],
+ },
+ },
+ },
+ }
+ cfgData, err := json.Marshal(cfg)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile(sqlcPath, cfgData, 0644)
+ if err != nil {
+ return err
+ }
+
+ // SQLC requires at least one query to be present in the query file
+ err = os.WriteFile(queryPath, []byte("-- name: Dummy :one\nSELECT 'dummy';"), 0644)
+ if err != nil {
+ return err
+ }
+
+ res := cli.Run([]string{"generate", "-f", sqlcPath})
+ if res != 0 {
+ return fmt.Errorf("sqlc exited with code %d", res)
+ }
+ reqBlob, err := os.ReadFile(filepath.Join(outPath, "output.pb"))
+ if !useProto {
+ req := &daemon.SQLCPlugin_GenerateRequest{}
+ if err := proto.Unmarshal(reqBlob, req); err != nil {
+ return err
+ }
+ reqBlob, err = protojson.MarshalOptions{
+ EmitUnpopulated: true,
+ Indent: " ",
+ UseProtoNames: true,
+ }.Marshal(req)
+ if err != nil {
+ return err
+ }
+ }
+
+ w := bufio.NewWriter(os.Stdout)
+ if _, err := w.Write(reqBlob); err != nil {
+ return err
+ }
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return nil
+ },
+ }
+ genCmd.Flags().BoolVar(&useProto, "proto", false, "Output the parsed schema as protobuf")
+ pluginCmd := &cobra.Command{
+ Use: "/plugin.CodegenService/Generate",
+ Short: "Plugin for SQLC: stores the parsed sqlc model in a protobuf file",
+ Hidden: true,
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ reqBlob, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return err
+ }
+ resp := &daemon.SQLCPlugin_GenerateResponse{
+ Files: []*daemon.SQLCPlugin_File{
+ {
+ Name: "output.pb",
+ Contents: reqBlob,
+ },
+ },
+ }
+ respBlob, err := proto.Marshal(resp)
+ if err != nil {
+ return err
+ }
+ w := bufio.NewWriter(os.Stdout)
+ if _, err := w.Write(respBlob); err != nil {
+ return err
+ }
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return nil
+ },
+ }
+ rootCmd.AddCommand(genCmd)
+ rootCmd.AddCommand(pluginCmd)
+}
diff --git a/cli/cmd/encore/telemetry.go b/cli/cmd/encore/telemetry.go
new file mode 100644
index 0000000000..f5ae08146a
--- /dev/null
+++ b/cli/cmd/encore/telemetry.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "slices"
+ "strings"
+
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/rs/zerolog/log"
+ "github.com/spf13/cobra"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/cmd/encore/root"
+ "encr.dev/cli/internal/telemetry"
+ "encr.dev/pkg/fns"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var TelemetryDisabledByEnvVar = os.Getenv("DISABLE_ENCORE_TELEMETRY") == "1"
+var TelemetryDebugByEnvVar = os.Getenv("ENCORE_TELEMETRY_DEBUG") == "1"
+
+func printTelemetryStatus() {
+ status := aurora.Green("Enabled").String()
+ if !telemetry.IsEnabled() {
+ status = aurora.Red("Disabled").String()
+ }
+ fmt.Println(aurora.Sprintf("%s\n", aurora.Bold("Encore Telemetry")))
+ items := [][2]string{
+ {"Status", status},
+ }
+ if root.Verbosity > 0 {
+ items = append(items, [2]string{"Install ID", telemetry.GetAnonID()})
+ }
+ if telemetry.IsDebug() {
+ items = append(items, [2]string{"Debug", aurora.Green("Enabled").String()})
+ }
+ maxKeyLen := fns.Max(items, func(entry [2]string) int { return len(entry[0]) })
+ for _, item := range items {
+ spacing := strings.Repeat(" ", maxKeyLen-len(item[0]))
+ fmt.Printf("%s: %s%s\n", item[0], spacing, item[1])
+ }
+ fmt.Println(aurora.Sprintf("\nLearn more: %s", aurora.Underline("https://encore.dev/docs/telemetry")))
+}
+
+func updateTelemetry(ctx context.Context) {
+ // Update the telemetry config on the daemon if it is running
+ if cmdutil.IsDaemonRunning(ctx) {
+ daemon := cmdutil.ConnectDaemon(ctx)
+ _, err := daemon.Telemetry(ctx, &daemonpb.TelemetryConfig{
+ AnonId: telemetry.GetAnonID(),
+ Enabled: telemetry.IsEnabled(),
+ Debug: telemetry.IsDebug(),
+ })
+ if err != nil {
+ log.Debug().Err(err).Msgf("could not update daemon telemetry: %s", err)
+ }
+ }
+ if err := telemetry.SaveConfig(); err != nil {
+ log.Debug().Err(err).Msgf("could not save telemetry: %s", err)
+ }
+}
+
+var telemetryCommand = &cobra.Command{
+ Use: "telemetry",
+ Short: "Reports the current telemetry status",
+
+ Run: func(cmd *cobra.Command, args []string) {
+ printTelemetryStatus()
+ },
+}
+
+var telemetryEnableCommand = &cobra.Command{
+ Use: "enable",
+ Short: "Enables telemetry reporting",
+ Run: func(cmd *cobra.Command, args []string) {
+ if telemetry.SetEnabled(true) {
+ updateTelemetry(cmd.Context())
+ }
+ printTelemetryStatus()
+ },
+}
+
+var telemetryDisableCommand = &cobra.Command{
+ Use: "disable",
+ Short: "Disables telemetry reporting",
+ Run: func(cmd *cobra.Command, args []string) {
+ if telemetry.SetEnabled(false) {
+ updateTelemetry(cmd.Context())
+ }
+ printTelemetryStatus()
+ },
+}
+
+func isCommand(cmd *cobra.Command, name ...string) bool {
+ for cmd != nil {
+ if slices.Contains(name, cmd.Name()) {
+ return true
+ }
+ cmd = cmd.Parent()
+ }
+ return false
+}
+
+func init() {
+ telemetryCommand.AddCommand(telemetryEnableCommand, telemetryDisableCommand)
+ rootCmd.AddCommand(telemetryCommand)
+ root.AddPreRun(func(cmd *cobra.Command, args []string) {
+ update := false
+ if TelemetryDisabledByEnvVar {
+ update = telemetry.SetEnabled(false)
+ }
+ if cmd.Use == "daemon" {
+ return
+ }
+ update = update || telemetry.SetDebug(TelemetryDebugByEnvVar)
+ if update {
+ go updateTelemetry(cmd.Context())
+ }
+ if telemetry.ShouldShowWarning() && !isCommand(cmd, "version", "completion") {
+ fmt.Println()
+ fmt.Println(aurora.Sprintf("%s: This CLI tool collects usage data to help us improve Encore.", aurora.Bold("Note")))
+ fmt.Println(aurora.Sprintf(" You can disable this by running '%s'.\n", aurora.Yellow("encore telemetry disable")))
+ telemetry.SetShownWarning()
+ }
+ })
+
+}
diff --git a/cli/cmd/encore/test.go b/cli/cmd/encore/test.go
index f05362fcf3..01a8ea1fb7 100644
--- a/cli/cmd/encore/test.go
+++ b/cli/cmd/encore/test.go
@@ -2,31 +2,82 @@ package main
import (
"context"
+ "encoding/json"
+ "errors"
+ "fmt"
"os"
+ "os/exec"
"os/signal"
+ "path/filepath"
+ "slices"
+ "strings"
+ "time"
- daemonpb "encr.dev/proto/encore/daemon"
"github.com/spf13/cobra"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ daemonpb "encr.dev/proto/encore/daemon"
)
var testCmd = &cobra.Command{
- Use: "test",
+ Use: "test [go test flags]",
Short: "Tests your application",
+ Long: "Takes all the same flags as `go test`.",
+
+ DisableFlagsInUseLine: true,
Run: func(cmd *cobra.Command, args []string) {
- // Support --help but otherwise let all args be passed on to "go test"
- for _, arg := range args {
+ var (
+ traceFile string
+ codegenDebug bool
+ prepareOnly bool
+ noColor bool
+ )
+ // Support specific args but otherwise let all args be passed on to "go test"
+ for i := 0; i < len(args); i++ {
+ arg := args[i]
if arg == "-h" || arg == "--help" {
- cmd.Help()
+ _ = cmd.Help()
return
+ } else if arg == "--trace" || strings.HasPrefix(arg, "--trace=") {
+ // Drop this argument always.
+ args = slices.Delete(args, i, i+1)
+ i--
+
+ // We either have '--trace=file' or '--trace file'.
+ // Handle both.
+ if _, value, ok := strings.Cut(arg, "="); ok {
+ traceFile = value
+ } else {
+ // Make sure there is a next argument.
+ if i < len(args) {
+ traceFile = args[i]
+ args = slices.Delete(args, i, i+1)
+ i--
+ }
+ }
+ } else if arg == "--codegen-debug" {
+ codegenDebug = true
+ args = slices.Delete(args, i, i+1)
+ i--
+ } else if arg == "--prepare" {
+ prepareOnly = true
+ args = slices.Delete(args, i, i+1)
+ i--
+ } else if arg == "--no-color" {
+ noColor = true
+ args = slices.Delete(args, i, i+1)
+ i--
}
}
appRoot, relPath := determineAppRoot()
- runTests(appRoot, relPath, args)
+ runTests(appRoot, relPath, args, traceFile, codegenDebug, prepareOnly, noColor)
},
}
-func runTests(appRoot, testDir string, args []string) {
+func runTests(appRoot, testDir string, args []string, traceFile string, codegenDebug, prepareOnly, noColor bool) {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
@@ -36,19 +87,121 @@ func runTests(appRoot, testDir string, args []string) {
cancel()
}()
+ converter := cmdutil.ConvertJSONLogs(cmdutil.Colorize(!noColor))
+ if slices.Contains(args, "-json") {
+ converter = convertTestEventOutputOnly(converter)
+ }
+
daemon := setupDaemon(ctx)
+
+ // Is this a node package?
+ packageJsonPath := filepath.Join(appRoot, "package.json")
+ if _, err := os.Stat(packageJsonPath); err == nil || prepareOnly {
+ spec, err := daemon.TestSpec(ctx, &daemonpb.TestSpecRequest{
+ AppRoot: appRoot,
+ WorkingDir: testDir,
+ Args: args,
+ Environ: os.Environ(),
+ })
+ if status.Code(err) == codes.NotFound {
+ fatal("application does not define any tests.\nNote: Add a 'test' script command to package.json to run tests.")
+ } else if err != nil {
+ fatal(err)
+ }
+
+ if prepareOnly {
+ for _, ln := range spec.Environ {
+ fmt.Println(ln)
+ }
+ return
+ }
+
+ cmd := exec.Command(spec.Command, spec.Args...)
+ cmd.Env = spec.Environ
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Stdin = os.Stdin
+
+ if err := cmd.Run(); err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ os.Exit(exitErr.ExitCode())
+ } else {
+ fatal(err)
+ }
+ }
+ return
+ }
+
stream, err := daemon.Test(ctx, &daemonpb.TestRequest{
- AppRoot: appRoot,
- WorkingDir: testDir,
- Args: args,
+ AppRoot: appRoot,
+ WorkingDir: testDir,
+ Args: args,
+ Environ: os.Environ(),
+ TraceFile: nonZeroPtr(traceFile),
+ CodegenDebug: codegenDebug,
})
if err != nil {
fatal(err)
}
- streamCommandOutput(stream)
+ os.Exit(cmdutil.StreamCommandOutput(stream, converter))
}
func init() {
testCmd.DisableFlagParsing = true
rootCmd.AddCommand(testCmd)
+
+ // Even though we've disabled flag parsing, we still need to define the flags
+ // so that the help text is correct.
+ testCmd.Flags().Bool("codegen-debug", false, "Dump generated code (for debugging Encore's code generation)")
+ testCmd.Flags().Bool("prepare", false, "Prepare for running tests (without running them)")
+ testCmd.Flags().String("trace", "", "Specifies a trace file to write trace information about the parse and compilation process to.")
+ testCmd.Flags().Bool("no-color", false, "Disable colorized output")
+
+}
+
+func convertTestEventOutputOnly(converter cmdutil.OutputConverter) cmdutil.OutputConverter {
+ return func(line []byte) []byte {
+ // If this isn't a JSON log line, just return it as-is
+ if len(line) == 0 || line[0] != '{' {
+ return line
+ }
+
+ testEvent := &testJSONEvent{}
+ if err := json.Unmarshal(line, testEvent); err == nil && testEvent.Action == "output" {
+ if testEvent.Output != nil && (*(testEvent.Output))[0] == '{' {
+ convertedLogs := textBytes(converter(*testEvent.Output))
+ testEvent.Output = &convertedLogs
+
+ newLine, err := json.Marshal(testEvent)
+ if err == nil {
+ return append(newLine, '\n')
+ }
+ }
+ }
+
+ return line
+ }
+}
+
+// testJSONEvent and textBytes taken from the Go source code
+type testJSONEvent struct {
+ Time *time.Time `json:",omitempty"`
+ Action string
+ Package string `json:",omitempty"`
+ Test string `json:",omitempty"`
+ Elapsed *float64 `json:",omitempty"`
+ Output *textBytes `json:",omitempty"`
+}
+
+// textBytes is a hack to get JSON to emit a []byte as a string
+// without actually copying it to a string.
+// It implements encoding.TextMarshaler, which returns its text form as a []byte,
+// and then json encodes that text form as a string (which was our goal).
+type textBytes []byte
+
+func (b *textBytes) MarshalText() ([]byte, error) { return *b, nil }
+func (b *textBytes) UnmarshalText(in []byte) error {
+ *b = in
+ return nil
}
diff --git a/cli/cmd/encore/version.go b/cli/cmd/encore/version.go
index f7d7274382..d7cbf1d412 100644
--- a/cli/cmd/encore/version.go
+++ b/cli/cmd/encore/version.go
@@ -1,24 +1,95 @@
package main
import (
+ "context"
"fmt"
"os"
+ "strings"
+ "time"
+ "github.com/logrusorgru/aurora/v3"
"github.com/spf13/cobra"
-)
-// Version is the version of the encore binary.
-// It is set using `go build -ldflags "-X main.Version=v1.2.3"`.
-var Version string
+ "encr.dev/cli/internal/update"
+ "encr.dev/internal/version"
+)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Reports the current version of the encore application",
+
+ DisableFlagsInUseLine: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ var (
+ ver *update.LatestVersion
+ err error
+ )
+ if version.Version != "" {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ ver, err = update.Check(ctx)
+ }
+
+ // NOTE: This output format is relied on by the Encore IntelliJ plugin.
+ // Don't change this without considering its impact on that plugin.
+ fmt.Fprintln(os.Stdout, "encore version", version.Version)
+
+ if err != nil {
+ fatalf("could not check for update: %v", err)
+ } else if ver.IsNewer(version.Version) {
+ if ver.ForceUpgrade {
+ fmt.Println(aurora.Red("An urgent security update for Encore is available."))
+ if ver.SecurityNotes != "" {
+ fmt.Println(aurora.Sprintf(aurora.Yellow("%s"), ver.SecurityNotes))
+ }
+
+ versionUpdateCmd.Run(cmd, args)
+ } else {
+ if ver.SecurityUpdate {
+ fmt.Println(aurora.Sprintf(aurora.Red("A security update is update available: %s -> %s\nUpdate with: encore version update"), version.Version, ver.Version()))
+
+ if ver.SecurityNotes != "" {
+ fmt.Println(aurora.Sprintf(aurora.Yellow("%s"), ver.SecurityNotes))
+ }
+ } else {
+ fmt.Println(aurora.Sprintf(aurora.Yellow("Update available: %s -> %s\nUpdate with: encore version update"), version.Version, ver.Version()))
+ }
+ }
+ }
+ },
+}
+
+var versionUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Checks for an update of encore and, if one is available, runs the appropriate command to update it.",
+
+ DisableFlagsInUseLine: true,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Fprintln(os.Stdout, "encore version", Version)
+ if version.Version == "" || strings.HasPrefix(version.Version, "devel") {
+ fatal("cannot update development build, first install Encore from https://encore.dev/docs/install")
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ ver, err := update.Check(ctx)
+ if err != nil {
+ fatalf("could not check for update: %v", err)
+ }
+ if ver.IsNewer(version.Version) {
+ fmt.Printf("Upgrading Encore to %v...\n", ver.Version())
+
+ if err := ver.DoUpgrade(os.Stdout, os.Stderr); err != nil {
+ fatalf("could not update: %v", err)
+ os.Exit(1)
+ }
+ os.Exit(0)
+ } else {
+ fmt.Println("Encore already up to date.")
+ }
},
}
func init() {
+ versionCmd.AddCommand(versionUpdateCmd)
rootCmd.AddCommand(versionCmd)
}
diff --git a/cli/cmd/encore/vpn.go b/cli/cmd/encore/vpn.go
deleted file mode 100644
index 80060d9747..0000000000
--- a/cli/cmd/encore/vpn.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package main
-
-import (
- "context"
- "errors"
- "fmt"
- "log"
- "os"
- "time"
-
- "encr.dev/cli/internal/conf"
- "encr.dev/cli/internal/wgtunnel"
- "encr.dev/cli/internal/xos"
- "github.com/spf13/cobra"
- "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
-)
-
-var vpnCmd = &cobra.Command{
- Use: "vpn",
- Short: "VPN management commands",
-}
-
-func init() {
- rootCmd.AddCommand(vpnCmd)
-
- startCmd := &cobra.Command{
- Use: "start",
- Short: "Sets up a secure connection to private environments",
- Run: func(cmd *cobra.Command, args []string) {
- if admin, err := xos.IsAdminUser(); err == nil && !admin {
- log.Fatalf("fatal: must start VPN as root user (use 'sudo'?)")
- }
-
- cfg, err := conf.OriginalUser("")
- if errors.Is(err, os.ErrNotExist) {
- log.Fatalf("fatal: not logged in. run 'encore auth login' first")
- } else if err != nil {
- log.Fatalf("fatal: could not read encore config (did you run 'encore auth login'?): %v", err)
- } else if cfg.WireGuard.PrivateKey == "" || cfg.WireGuard.PublicKey == "" {
- log.Println("encore: generating WireGuard key...")
- pub, priv, err := wgtunnel.GenKey()
- if err != nil {
- log.Fatalf("fatal: could not generate WireGuard key: %v", err)
- }
- cfg.WireGuard.PublicKey = pub.String()
- cfg.WireGuard.PrivateKey = priv.String()
- if err := conf.Write(cfg); err != nil {
- log.Fatalf("fatal: could not write updated config: %v", err)
- }
- log.Println("encore: successfully generated and persisted WireGuard key")
- }
-
- pubKey, err1 := wgtypes.ParseKey(cfg.WireGuard.PublicKey)
- privKey, err2 := wgtypes.ParseKey(cfg.WireGuard.PrivateKey)
- if err1 != nil || err2 != nil {
- fatalf("could not parse public/private key: %v/%v", err1, err2)
- }
-
- log.Printf("encore: registering device with server...")
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- ip, err := wgtunnel.RegisterDevice(ctx, pubKey)
- if err != nil {
- log.Fatalf("fatal: could not register device: %v", err)
- }
- log.Printf("encore: successfully registered device, assigned address %s", ip)
-
- log.Printf("encore: starting WireGuard tunnel...")
- cc := &wgtunnel.ClientConfig{
- Addr: ip,
- PrivKey: privKey,
- }
- if err := wgtunnel.Start(cc, nil); err != nil {
- log.Fatalf("fatal: could not start tunnel: %v", err)
- }
- log.Printf("encore: successfully started WireGuard tunnel")
- },
- }
- vpnCmd.AddCommand(startCmd)
-
- stopCmd := &cobra.Command{
- Use: "stop",
- Short: "Stops the VPN connection",
- Run: func(cmd *cobra.Command, args []string) {
- if err := wgtunnel.Stop(); os.IsPermission(err) {
- log.Fatal("fatal: permission denied to stop tunnel (use 'sudo'?)")
- } else if err != nil {
- log.Fatalf("fatal: could not stop tunnel: %v", err)
- }
- log.Printf("encore: stopped WireGuard tunnel")
- },
- }
- vpnCmd.AddCommand(stopCmd)
-
- statusCmd := &cobra.Command{
- Use: "status",
- Short: "Determines the status of the VPN connection",
- Run: func(cmd *cobra.Command, args []string) {
- if running, err := wgtunnel.Status(); os.IsPermission(err) {
- log.Fatal("fatal: permission denied to check tunnel status (use 'sudo'?)")
- } else if err != nil {
- log.Fatalf("fatal: could not check tunnel status: %v", err)
- } else if running {
- fmt.Fprintln(os.Stdout, "running")
- } else {
- fmt.Fprintln(os.Stdout, "not running")
- }
- },
- }
- vpnCmd.AddCommand(statusCmd)
-}
diff --git a/cli/cmd/encore/vpn_darwin.go b/cli/cmd/encore/vpn_darwin.go
deleted file mode 100644
index 0c85936d6e..0000000000
--- a/cli/cmd/encore/vpn_darwin.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package main
-
-import (
- "log"
-
- "encr.dev/cli/internal/wgtunnel"
- "encr.dev/cli/internal/xos"
- "github.com/spf13/cobra"
-)
-
-func init() {
- runCmd := &cobra.Command{
- Use: "__run",
- Short: "Runs the WireGuard tunnel synchronously.",
- Hidden: true,
- Run: func(cmd *cobra.Command, args []string) {
- if admin, err := xos.IsAdminUser(); err == nil && !admin {
- log.Fatalf("fatal: must start VPN as root user (use 'sudo'?)")
- }
- if err := wgtunnel.Run(); err != nil {
- fatal(err)
- }
- },
- }
-
- vpnCmd.AddCommand(runCmd)
-}
diff --git a/cli/cmd/encore/vpn_windows.go b/cli/cmd/encore/vpn_windows.go
deleted file mode 100644
index 7ba2d477e3..0000000000
--- a/cli/cmd/encore/vpn_windows.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
-
- "encr.dev/cli/internal/winsvc"
- "github.com/spf13/cobra"
-)
-
-func init() {
- installCmd := &cobra.Command{
- Hidden: true,
- Use: "svc-install",
- Short: "Installs the windows service for the WireGuard tunnel",
- Args: cobra.ExactArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- if err := winsvc.Install(args[0]); err != nil {
- fatal(err)
- }
- },
- }
- vpnCmd.AddCommand(installCmd)
-
- uninstallCmd := &cobra.Command{
- Hidden: true,
- Use: "svc-uninstall",
- Short: "Uninstalls the windows service",
- Args: cobra.ExactArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- if err := winsvc.Uninstall(args[0]); err != nil {
- fatal(err)
- }
- },
- }
- vpnCmd.AddCommand(uninstallCmd)
-
- statusCmd := &cobra.Command{
- Hidden: true,
- Use: "svc-status",
- Short: "Uninstalls the windows service",
- Args: cobra.ExactArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- installed, err := winsvc.Status(args[0])
- if err != nil {
- fatal(err)
- }
- if installed {
- fmt.Fprintln(os.Stdout, "installed")
- } else {
- fmt.Fprintln(os.Stdout, "not installed")
- }
- },
- }
- vpnCmd.AddCommand(statusCmd)
-
- runCmd := &cobra.Command{
- Hidden: true,
- Use: "svc-run",
- Short: "Runs the windows service",
- Args: cobra.ExactArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- if err := winsvc.Run(args[0]); err != nil {
- fatal(err)
- }
- },
- }
- vpnCmd.AddCommand(runCmd)
-}
diff --git a/cli/cmd/git-remote-encore/main.go b/cli/cmd/git-remote-encore/main.go
index b26a36dae1..c22bdcc0d9 100644
--- a/cli/cmd/git-remote-encore/main.go
+++ b/cli/cmd/git-remote-encore/main.go
@@ -9,9 +9,10 @@ import (
"net/url"
"os"
"os/exec"
+ "path/filepath"
"strings"
- "encr.dev/cli/internal/conf"
+ "encr.dev/internal/conf"
)
func main() {
@@ -21,6 +22,20 @@ func main() {
}
}
+var isLocalTest = (func() bool {
+ return filepath.Base(os.Args[0]) == "git-remote-encorelocal"
+})()
+
+// remoteScheme is the remote scheme we expect.
+// It's "encore" in general but "encorelocal" for local development.
+var remoteScheme = (func() string {
+ if isLocalTest {
+ return "encorelocal"
+ } else {
+ return "encore"
+ }
+})()
+
func run(args []string) error {
stdin := bufio.NewReader(os.Stdin)
stdout := os.Stdout
@@ -52,12 +67,12 @@ func connect(args []string, svc string) error {
uri, err := url.Parse(args[2])
if err != nil {
return fmt.Errorf("connect %s: invalid remote uri: %v", os.Args[2], err)
- } else if uri.Scheme != "encore" {
- return fmt.Errorf("connect %s: expected remote scheme \"encore\", got %q", os.Args[2], uri.Scheme)
+ } else if uri.Scheme != remoteScheme {
+ return fmt.Errorf("connect %s: expected remote scheme %q, got %q", os.Args[2], remoteScheme, uri.Scheme)
}
appID := uri.Hostname()
- ts := &conf.TokenSource{}
+ ts := conf.NewTokenSource()
tok, err := ts.Token()
if err != nil {
return fmt.Errorf("could not get Encore auth token: %v", err)
@@ -68,25 +83,46 @@ func connect(args []string, svc string) error {
return err
}
keyPath := f.Name()
- defer os.Remove(keyPath)
+ defer func() { _ = os.Remove(keyPath) }()
+
if err := f.Chmod(0600); err != nil {
- f.Close()
+ _ = f.Close()
return err
} else if _, err := f.Write([]byte(SentinelPrivateKey)); err != nil {
- f.Close()
+ _ = f.Close()
return err
} else if err := f.Close(); err != nil {
return err
}
+ // Create a dummy config file so that we can work around any host overrides
+ // present on the system.
+ cfg, err := os.CreateTemp("", "encore-dummy-ssh-config")
+ if err != nil {
+ return err
+ }
+ cfgPath := cfg.Name()
+ defer func() { _ = os.Remove(cfgPath) }()
+
// Communicate to Git that the connection is established.
- os.Stdout.Write([]byte("\n"))
+ _, _ = os.Stdout.Write([]byte("\n"))
+
+ sshServer, port := "git.encore.dev", "22"
+ if isLocalTest {
+ sshServer, port = "localhost", "9040"
+ }
// Set up an SSH tunnel with a sentinel key as a way to signal
// Encore to use token-based authentication, and pass the token
// as part of the command.
- cmd := exec.Command("ssh", "-x", "-T", "-o", "IdentitiesOnly=yes", "-i", keyPath,
- "git.encore.dev", fmt.Sprintf("token=%s %s '%s'", tok.AccessToken, svc, appID))
+ cmd := exec.Command("ssh",
+ "-x", "-T",
+ "-F", cfgPath,
+ "-o", "IdentitiesOnly=yes",
+ "-i", keyPath,
+ "-p", port,
+ sshServer,
+ fmt.Sprintf("token=%s %s '%s'", tok.AccessToken, svc, appID))
cmd.Env = []string{}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
@@ -100,20 +136,12 @@ func connect(args []string, svc string) error {
//
// NOTE: This is not a security problem. The key is meant to be public
// and does not serve as a means of authentication.
+// nosemgrep
const SentinelPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
-b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn
-NhAAAAAwEAAQAAAIEA1ZrV6bnLgKI7cZHGn3Z93jTATaGjw6ytPdSorrnwYRP3K833BC19
-ANPSWAoXcYXNDIR90j/V+sd5ILv5NUoctdV1+2J8jzW/hedj0HuDou1YruNHVowfE3JFYr
-6eMK15kvc/K9EsIl/TfH9/RiWVnWq1wHwOdZtH2UZE9QdT+r0AAAIIrcJlP63CZT8AAAAH
-c3NoLXJzYQAAAIEA1ZrV6bnLgKI7cZHGn3Z93jTATaGjw6ytPdSorrnwYRP3K833BC19AN
-PSWAoXcYXNDIR90j/V+sd5ILv5NUoctdV1+2J8jzW/hedj0HuDou1YruNHVowfE3JFYr6e
-MK15kvc/K9EsIl/TfH9/RiWVnWq1wHwOdZtH2UZE9QdT+r0AAAADAQABAAAAgBndpgmndf
-0dqBUYkfS9ZICD4sWDzVDkmBXkqoh9+53FzSiAyGi5GWoAPHhswGn+ydW6NYJAOKklfoV4
-PbU2REOHwXYblAZmDmPksSN1IbjDdFZ+0vXFUmS2k30eqIgIEGOrN1tnLXoK+B4kwFQ1IN
-UMMpB39vRyhyrEGv+S4gQBAAAAQFiOrnRAtY50ZiqXND3SdCnQxnjmUxcE7pcQaaQK6KMP
-A7bQpMNzJop/UpNRIjLb5bPG9FPgTzQ5+5l4fGL5OwYAAABBAP4V8q7KQLqoPsHaWG7pga
-iE9cUzE9hle2zXiRCcXt2qXxB7P1U9DQVdzVwarfAggIGRsqjJmEDe69F/I4QAkj8AAABB
-ANc20AXzRmnneRyZuOEUhTsdNWcQf9qv+tQh3DDr7SW7NhuSKW9CqC18nbDckEp0yOCjIR
-k5HAPXd2pDop0UvAMAAAAPZWFuZHJlQG0xLmxvY2FsAQIDBA==
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACCyj3F5Tp1eBIp7rMohszumYzlys/BFfmX/LVkXJS8magAAAJjsp3yz7Kd8
+swAAAAtzc2gtZWQyNTUxOQAAACCyj3F5Tp1eBIp7rMohszumYzlys/BFfmX/LVkXJS8mag
+AAAEDMiwRrf5WET2mTKjKjX7z6vox3n6hKGKbP7V4MDtVre7KPcXlOnV4EinusyiGzO6Zj
+OXKz8EV+Zf8tWRclLyZqAAAAE2VuY29yZS1zZW50aW5lbC1rZXkBAg==
-----END OPENSSH PRIVATE KEY-----
`
diff --git a/cli/cmd/tsbundler-encore/main.go b/cli/cmd/tsbundler-encore/main.go
new file mode 100644
index 0000000000..90aa1e4fa3
--- /dev/null
+++ b/cli/cmd/tsbundler-encore/main.go
@@ -0,0 +1,297 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/evanw/esbuild/pkg/api"
+
+ "encr.dev/internal/version"
+
+ flag "github.com/spf13/pflag"
+)
+
+var (
+ entryPoints []string
+ specifiedEngines []string
+ // replacementFile string
+ outDir string
+ bundle bool
+ minify bool
+ help bool
+ logLevel int
+)
+
+// main is the entry point for the tsbundler-encore command.
+//
+// It is responsible for parsing the command line flags, validating the input, and then triggering esbuild.
+//
+// Run with --help for more information.
+func main() {
+ // Required flags
+ // flag.StringVar(&replacementFile, "replacements", "", "Replacement file or json object (default read from stdin)")
+
+ // Optional flags
+ flag.StringVar(&outDir, "outdir", "dist", "Output directory")
+ flag.BoolVar(&bundle, "bundle", true, "Bundle all dependencies")
+ flag.BoolVar(&minify, "minify", false, "Minify output (default false)")
+ flag.StringArrayVar(&specifiedEngines, "engine", []string{"node:21"}, "Target engine")
+ flag.CountVarP(&logLevel, "verbose", "v", "Increase logging level (can be specified multiple times)")
+ flag.BoolVarP(&help, "help", "h", false, "Print help")
+ flag.Usage = printHelp
+ flag.Parse()
+
+ entryPoints = flag.Args()
+ if help {
+ printHelp()
+ os.Exit(0)
+ }
+
+ // Validate input (note: these functions will exit on error)
+ validateEntrypointParams()
+ engines := readEngines()
+ // replacements := readReplacementMapping()
+
+ // Create our transformer plugin
+ // rewritePlugin := api.Plugin{
+ // Name: "encore-codegen-transformer",
+ // Setup: func(build api.PluginBuild) {
+ // build.OnLoad(
+ // api.OnLoadOptions{Filter: `\.(ts|js)(x?)$`},
+ // func(args api.OnLoadArgs) (api.OnLoadResult, error) {
+ // replacement, found := replacements[args.Path]
+ // if !found {
+ // return api.OnLoadResult{}, nil
+ // }
+
+ // contentsBytes, err := os.ReadFile(replacement)
+ // if err != nil {
+ // return api.OnLoadResult{}, fmt.Errorf("error reading replacement file: %w", err)
+ // }
+ // content := string(contentsBytes)
+
+ // return api.OnLoadResult{
+ // PluginName: "encore-codegen-transformer",
+ // Contents: &content,
+ // Loader: api.LoaderTS,
+ // }, nil
+ // },
+ // )
+ // },
+ // }
+
+ banner := `// This file was bundled by Encore ` + version.Version + `
+//
+// https://encore.dev`
+
+ outBase := ""
+ if len(entryPoints) == 1 {
+ // If there's a single entrypoint, use its directory as the outbase
+ // as otherwise esbuild won't include the "[dir]" token in the output.
+ outBase = filepath.Dir(filepath.Dir(entryPoints[0]))
+ }
+
+ // Trigger esbuild
+ result := api.Build(api.BuildOptions{
+ // Setup base settings
+ LogLevel: api.LogLevelWarning - api.LogLevel(logLevel),
+ Banner: map[string]string{"js": banner},
+ Charset: api.CharsetUTF8,
+ Sourcemap: api.SourceMapLinked,
+ Packages: api.PackagesExternal,
+ Plugins: []api.Plugin{
+ // rewritePlugin,
+ },
+ TreeShaking: api.TreeShakingTrue,
+
+ // Set our build target
+ Platform: api.PlatformNode,
+ Format: api.FormatESModule,
+ Target: api.ES2022,
+ Engines: engines,
+
+ // Minification settings
+ MinifyWhitespace: minify,
+ MinifySyntax: minify,
+ MinifyIdentifiers: minify,
+
+ // Pass in what we want to build
+ EntryNames: "[dir]/[name]",
+ EntryPoints: entryPoints,
+ Bundle: bundle,
+ Outdir: outDir,
+ Outbase: outBase,
+ Write: true, // Write to outdir
+ OutExtension: map[string]string{
+ ".js": ".mjs",
+ },
+ Define: map[string]string{
+ "ENCORE_DROP_TESTS": "true",
+ },
+ })
+
+ if len(result.Errors) > 0 {
+ os.Exit(1)
+ }
+}
+
+func printHelp() {
+ binary := filepath.Base(os.Args[0])
+
+ // Base usage help
+ versionStr := fmt.Sprintf("tsbundler-encore (%s)", version.Version)
+ _, _ = fmt.Fprintf(os.Stderr, "%s\n%s\n", versionStr, strings.Repeat("=", len(versionStr)))
+ _, _ = fmt.Fprintf(os.Stderr, "\nUsage: %s [options]\n", binary)
+ flag.PrintDefaults()
+
+ // Replacements help
+ // _, _ = fmt.Fprintf(os.Stderr, "\nReplacements JSON Format:\n")
+ // _, _ = fmt.Fprintf(os.Stderr, " {\n")
+ // _, _ = fmt.Fprintf(os.Stderr, " \"/absolute/path/to/file.ts\": \"/path/to/replacement.ts\",\n")
+ // _, _ = fmt.Fprintf(os.Stderr, " \"/absolute/path/to/file2.ts\": \"/path/to/replacement2.ts\"\n")
+ // _, _ = fmt.Fprintf(os.Stderr, " }\n")
+
+ // Engine help
+ _, _ = fmt.Fprintf(os.Stderr, "\nEngines:\n\nEngines can be specified as a name, or a name and version separated by a colon,\nfor example \"node:21\" or \"node\". Multiple engines can be specified if required.\n\nThe supported engines are:\n")
+ _, _ = fmt.Fprintf(os.Stderr, " - node\n")
+ _, _ = fmt.Fprintf(os.Stderr, " - bun\n")
+ _, _ = fmt.Fprintf(os.Stderr, " - deno\n")
+ _, _ = fmt.Fprintf(os.Stderr, " - rhino\n")
+}
+
+// validateEntrypointParams validates that the entry points parameters was specified and that all entry points exist
+// and are readable on the file system.
+func validateEntrypointParams() {
+ if len(entryPoints) == 0 {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: at least one entry point must be specified\n\n")
+ printHelp()
+ os.Exit(1)
+ }
+
+ for _, entryPoint := range entryPoints {
+ if st, err := os.Stat(entryPoint); errors.Is(err, fs.ErrNotExist) {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: entry point %s does not exist\n", entryPoint)
+ os.Exit(1)
+ } else if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: error reading entry point %s: %s\n", entryPoint, err)
+ os.Exit(1)
+ } else if st.IsDir() {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: entry point %s is a directory\n", entryPoint)
+ os.Exit(1)
+ }
+ }
+}
+
+// readReplacementMapping reads a replacement mapping from either a file or stdin depending
+// on if the replacementFile flag was specified.
+//
+// It then validates that all the keys are valid paths to files and the values are valid paths to files.
+// func readReplacementMapping() map[string]string {
+// out := make(map[string]string)
+
+// // If a replacement file was specified, read it
+// replacementFile = strings.TrimSpace(replacementFile)
+// if replacementFile != "" {
+// if replacementFile[0] == '{' {
+// err := json.Unmarshal([]byte(replacementFile), &out)
+// if err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error parsing replacement object: %s\n", err)
+// os.Exit(1)
+// }
+// } else {
+// data, err := os.ReadFile(replacementFile)
+// if err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error reading replacement file: %s\n", err)
+// os.Exit(1)
+// }
+
+// err = json.Unmarshal(data, &out)
+// if err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error parsing replacement file: %s\n", err)
+// os.Exit(1)
+// }
+// }
+// } else {
+// // Check something is being piped in
+// info, _ := os.Stdin.Stat()
+// if (info.Mode()&os.ModeCharDevice) != 0 || info.Size() <= 0 {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: no replacement file specified and nothing piped in\n")
+// os.Exit(1)
+// }
+
+// // Otherwise, read from stdin
+// if err := json.NewDecoder(os.Stdin).Decode(&out); err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error reading replacement file from stdin: %s\n", err)
+// os.Exit(1)
+// }
+// }
+
+// // Validate that all the keys are valid paths to files and the values are valid paths to files
+// for key, value := range out {
+// // Validate key
+// if st, err := os.Stat(key); errors.Is(err, fs.ErrNotExist) {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: replacement key %s does not exist\n", key)
+// os.Exit(1)
+// } else if err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: error reading replacement key %s: %s\n", key, err)
+// os.Exit(1)
+// } else if st.IsDir() {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: replacement key %s is a directory\n", key)
+// os.Exit(1)
+// } else if !filepath.IsAbs(key) {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: replacement key %s is not an absolute path\n", key)
+// os.Exit(1)
+// }
+
+// // Validate value
+// if st, err := os.Stat(value); errors.Is(err, fs.ErrNotExist) {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: replacement value %s does not exist\n", value)
+// os.Exit(1)
+// } else if err != nil {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: error reading replacement value %s: %s\n", value, err)
+// os.Exit(1)
+// } else if st.IsDir() {
+// _, _ = fmt.Fprintf(os.Stderr, "Error: replacement value %s is a directory\n", value)
+// os.Exit(1)
+// }
+// }
+
+// return out
+// }
+
+// readEngines reads the engines from the specified flag and returns a list of engines.
+func readEngines() []api.Engine {
+ if len(specifiedEngines) == 0 {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: at least one engine must be specified\n\n")
+ printHelp()
+ os.Exit(1)
+ }
+
+ var engines []api.Engine
+ for _, engineName := range specifiedEngines {
+ engineName = strings.ToLower(strings.TrimSpace(engineName))
+ engineName, engineVersion, _ := strings.Cut(engineName, ":")
+
+ var eng api.Engine
+ switch engineName {
+ case "node", "bun": // Note: esbuild doesn't have a "bun" engine (yet), but to future proof we'll alias it to node
+ eng = api.Engine{Name: api.EngineNode, Version: engineVersion}
+ case "deno":
+ eng = api.Engine{Name: api.EngineDeno, Version: engineVersion}
+ case "rhino":
+ eng = api.Engine{Name: api.EngineRhino, Version: engineVersion}
+ default:
+ _, _ = fmt.Fprintf(os.Stderr, "Error: unknown/unsupported engine %s\n\n", engineName)
+ printHelp()
+ os.Exit(1)
+ }
+
+ engines = append(engines, eng)
+ }
+
+ return engines
+}
diff --git a/cli/daemon/apps/apps.go b/cli/daemon/apps/apps.go
new file mode 100644
index 0000000000..8169a9f66f
--- /dev/null
+++ b/cli/daemon/apps/apps.go
@@ -0,0 +1,566 @@
+package apps
+
+import (
+ "database/sql"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/golang/protobuf/proto"
+ "github.com/rs/zerolog/log"
+ "go4.org/syncutil"
+
+ "encore.dev/appruntime/exported/experiments"
+ "encr.dev/cli/internal/manifest"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+ "encr.dev/internal/goldfish"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/watcher"
+ "encr.dev/pkg/xos"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+var ErrNotFound = errors.New("app not found")
+
+func NewManager(db *sql.DB) *Manager {
+ return &Manager{
+ db: db,
+ instances: make(map[string]*Instance),
+ }
+}
+
+// Manager keeps track of known apps and watches them for changes.
+type Manager struct {
+ db *sql.DB
+ setupWatch syncutil.Once
+
+ appRegMu sync.Mutex
+ appListeners []func(*Instance)
+
+ watchMu sync.Mutex
+ watchers []WatchFunc
+
+ instanceMu sync.Mutex
+ instances map[string]*Instance // root -> instance
+}
+
+type TrackOption func(*Instance) error
+
+func WithTutorial(tutorial string) TrackOption {
+ return func(i *Instance) error {
+ err := manifest.SetTutorial(i.root, tutorial)
+ if err != nil {
+ return errors.Wrap(err, "set tutorial")
+ }
+ i.tutorial = tutorial
+ return nil
+ }
+}
+
+// Track begins tracking an app, and marks it as updated
+// if the app is already tracked.
+func (mgr *Manager) Track(appRoot string, options ...TrackOption) (*Instance, error) {
+ app, err := mgr.resolve(appRoot)
+ for _, opt := range options {
+ if err := opt(app); err != nil {
+ return nil, err
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ _, err = mgr.db.Exec(`
+ INSERT OR REPLACE INTO app (root, local_id, platform_id, updated_at)
+ VALUES (?, ?, ?, ?)
+ `, app.root, app.localID, app.PlatformID(), time.Now())
+ if err != nil {
+ return nil, errors.Wrap(err, "update app store")
+ }
+ log.Info().Str("app_id", app.PlatformOrLocalID()).Msg("tracking app")
+ return app, nil
+}
+
+// FindLatestByPlatformID finds the most recently updated app instance with the given platformID.
+// If no such app is found it reports an error matching ErrNotFound.
+func (mgr *Manager) FindLatestByPlatformID(platformID string) (*Instance, error) {
+ var root string
+ err := mgr.db.QueryRow(`
+ SELECT root
+ FROM app
+ WHERE platform_id = ?
+ ORDER BY updated_at DESC
+ LIMIT 1
+ `, platformID).Scan(&root)
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errors.WithStack(ErrNotFound)
+ } else if err != nil {
+ return nil, errors.Wrap(err, "query app store")
+ }
+
+ return mgr.resolve(root)
+}
+
+func (mgr *Manager) FindLatestByPlatformOrLocalID(id string) (*Instance, error) {
+ // Local ID do not contain hyphens, platform ID's always contain hyphens.
+ if strings.Contains(id, "-") {
+ return mgr.FindLatestByPlatformID(id)
+ }
+
+ var root string
+ err := mgr.db.QueryRow(`
+ SELECT root
+ FROM app
+ WHERE local_id = ?
+ ORDER BY updated_at DESC
+ LIMIT 1
+ `, id).Scan(&root)
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errors.WithStack(ErrNotFound)
+ } else if err != nil {
+ return nil, errors.Wrap(err, "query app store")
+ }
+
+ return mgr.resolve(root)
+}
+
+// List lists all known apps.
+func (mgr *Manager) List() ([]*Instance, error) {
+ roots, err := mgr.listRoots()
+ if err != nil {
+ return nil, err
+ }
+
+ var apps []*Instance
+ for _, root := range roots {
+ app, err := mgr.resolve(root)
+ if errors.Is(err, fs.ErrNotExist) {
+ log.Debug().Str("root", root).Msg("app no longer exists, skipping")
+ // Delete the
+ _, _ = mgr.db.Exec(`DELETE FROM app WHERE root = ?`, root)
+ continue
+ } else if err != nil {
+ log.Error().Err(err).Str("root", root).Msg("unable to resolve app")
+ continue
+ }
+ apps = append(apps, app)
+ }
+
+ return apps, nil
+}
+
+func (mgr *Manager) listRoots() ([]string, error) {
+ rows, err := mgr.db.Query(`SELECT root FROM app`)
+ if err != nil {
+ return nil, errors.Wrap(err, "query app roots")
+ }
+ defer fns.CloseIgnore(rows)
+
+ var roots []string
+ for rows.Next() {
+ var root string
+ if err := rows.Scan(&root); err != nil {
+ return nil, errors.Wrap(err, "scan row")
+ }
+ roots = append(roots, root)
+ }
+ err = errors.Wrap(rows.Err(), "iterate rows")
+ return roots, err
+}
+
+// RegisterAppListener registers a callback that gets invoked every time
+// an app is tracked.
+func (mgr *Manager) RegisterAppListener(fn func(*Instance)) {
+ mgr.instanceMu.Lock()
+ defer mgr.instanceMu.Unlock()
+
+ mgr.appRegMu.Lock()
+ mgr.appListeners = append(mgr.appListeners, fn)
+ mgr.appRegMu.Unlock()
+
+ // Call the handler for all existing apps
+ for _, inst := range mgr.instances {
+ fn(inst)
+ }
+}
+
+// WatchFunc is the signature of functions registered as app watchers.
+type WatchFunc func(*Instance, []watcher.Event)
+
+// WatchAll watches all apps for changes.
+func (mgr *Manager) WatchAll(fn WatchFunc) error {
+ err := mgr.setupWatch.Do(func() error {
+ // Begin tracking all known apps by calling List (since it calls resolve).
+ _, err := mgr.List()
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+ mgr.watchMu.Lock()
+ mgr.watchers = append(mgr.watchers, fn)
+ mgr.watchMu.Unlock()
+ return nil
+}
+
+func (mgr *Manager) onWatchEvent(i *Instance, ev []watcher.Event) {
+ mgr.watchMu.Lock()
+ watchers := mgr.watchers
+ mgr.watchMu.Unlock()
+ for _, fn := range watchers {
+ fn(i, ev)
+ }
+}
+
+// resolve resolves the current information about the app located at appRoot.
+// If the app does not exist (either because appRoot does not exist,
+// or because encore.app does not exist within it), it reports an error
+// matching fs.ErrNotExist.
+func (mgr *Manager) resolve(appRoot string) (*Instance, error) {
+ mgr.instanceMu.Lock()
+ defer mgr.instanceMu.Unlock()
+
+ if existing, ok := mgr.instances[appRoot]; ok {
+ return existing, nil
+ }
+
+ platformID, err := readPlatformID(appRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse the manifest file
+ man, err := manifest.ReadOrCreate(appRoot)
+ if err != nil {
+ return nil, errors.Wrap(err, "parse manifest")
+ }
+
+ i := NewInstance(appRoot, man.LocalID, platformID)
+ i.tutorial = man.Tutorial
+ i.mgr = mgr
+ if err := i.beginWatch(); err != nil && !errors.Is(err, fs.ErrNotExist) {
+ log.Error().Err(err).Str("id", i.PlatformOrLocalID()).Msg("unable to begin watching app")
+ }
+ mgr.instances[appRoot] = i
+
+ // Notify any listeners about the new app
+ for _, fn := range mgr.appListeners {
+ fn(i)
+ }
+
+ return i, nil
+}
+
+func (mgr *Manager) Close() error {
+ mgr.instanceMu.Lock()
+ defer mgr.instanceMu.Unlock()
+
+ for _, inst := range mgr.instances {
+ if err := inst.Close(); err != nil {
+ log.Err(err).Str("id", inst.PlatformOrLocalID()).Msg("unable to close app instance")
+ // do not return an error here as we want to close all instances
+ }
+ }
+
+ return nil
+}
+
+// Instance describes an app instance known by the Encore daemon.
+type Instance struct {
+ root string
+ localID string
+ platformID *goldfish.Cache[string]
+ tutorial string
+
+ // mgr is a reference to the manager that created it.
+ // It may be nil if an instance was created without a manager.
+ mgr *Manager
+ watcher *watcher.Watcher
+
+ setupWatch syncutil.Once
+ watchMu sync.Mutex
+ nextWatchID WatchSubscriptionID
+ watchers map[WatchSubscriptionID]*watchSubscription
+
+ mdMu sync.Mutex
+ cachedMd *meta.Data
+}
+
+func NewInstance(root, localID, platformID string) *Instance {
+ i := &Instance{
+ root: root,
+ localID: localID,
+ watchers: make(map[WatchSubscriptionID]*watchSubscription),
+ }
+ i.platformID = goldfish.New[string](1*time.Second, i.fetchPlatformID)
+ if platformID != "" {
+ i.platformID.Set(platformID)
+ }
+ return i
+}
+
+func (i *Instance) Tutorial() string {
+ return i.tutorial
+}
+
+// Root returns the filesystem path for the app root.
+// It always returns a non-empty string.
+func (i *Instance) Root() string { return i.root }
+
+// LocalID reports a local, random id unique for this app,
+// as persisted in the .encore/manifest.json file.
+// It always returns a non-empty string.
+func (i *Instance) LocalID() string { return i.localID }
+
+// PlatformID reports the Encore Platform's ID for this app.
+// If the app is not linked it reports the empty string.
+func (i *Instance) PlatformID() string {
+ val, _ := i.platformID.Get()
+ return val
+}
+
+// PlatformOrLocalID reports PlatformID() if set and otherwise LocalID().
+func (i *Instance) PlatformOrLocalID() string {
+ if id := i.PlatformID(); id != "" {
+ return id
+ }
+ return i.localID
+}
+
+// Name returns the platform ID for the app, or if there isn't one
+// it returns the folder name the app is in.
+func (i *Instance) Name() string {
+ if id := i.PlatformID(); id != "" {
+ return id
+ }
+
+ return filepath.Base(i.root)
+}
+
+func (i *Instance) fetchPlatformID() (string, error) {
+ return readPlatformID(i.root)
+}
+
+func readPlatformID(appRoot string) (string, error) {
+ // Parse the encore.app file
+ path := filepath.Join(appRoot, appfile.Name)
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return "", err
+ }
+ encore, err := appfile.Parse(data)
+ if err != nil {
+ return "", errors.Wrap(err, "parse encore.app")
+ }
+ return encore.ID, nil
+}
+
+// Experiments returns the enabled experiments for this app.
+//
+// Note: we read the app file here instead of a cached value so we
+// can detect changes between runs of the compiler if we're in
+// watch mode.
+func (i *Instance) Experiments(environ []string) (*experiments.Set, error) {
+ exp, err := appfile.Experiments(i.root)
+ if err != nil {
+ return nil, err
+ }
+
+ return experiments.FromAppFileAndEnviron(exp, environ)
+}
+
+func (i *Instance) Lang() appfile.Lang {
+ appFile, err := appfile.ParseFile(filepath.Join(i.root, appfile.Name))
+ if err != nil {
+ return appfile.LangGo
+ }
+ return appFile.Lang
+}
+
+func (i *Instance) AppFile() (*appfile.File, error) {
+ return appfile.ParseFile(filepath.Join(i.root, appfile.Name))
+}
+
+func (i *Instance) BuildSettings() (appfile.Build, error) {
+ appFile, err := appfile.ParseFile(filepath.Join(i.root, appfile.Name))
+ if err != nil {
+ return appfile.Build{}, err
+ }
+ return appFile.Build, nil
+}
+
+// GlobalCORS returns the CORS configuration for the app which
+// will be applied against all API gateways into the app
+func (i *Instance) GlobalCORS() (appfile.CORS, error) {
+ cors, err := appfile.GlobalCORS(i.root)
+ if err != nil {
+ return appfile.CORS{}, err
+ }
+
+ // If there are no Global CORS return the default
+ if cors == nil {
+ return appfile.CORS{}, nil
+ }
+
+ return *cors, nil
+
+}
+
+func (i *Instance) Watch(fn WatchFunc) (WatchSubscriptionID, error) {
+ if err := i.beginWatch(); err != nil {
+ return 0, err
+ }
+
+ i.watchMu.Lock()
+ i.nextWatchID++
+ id := i.nextWatchID
+ i.watchers[id] = &watchSubscription{id, fn}
+ i.watchMu.Unlock()
+ return id, nil
+}
+
+func (i *Instance) Unwatch(id WatchSubscriptionID) {
+ i.watchMu.Lock()
+ delete(i.watchers, id)
+ i.watchMu.Unlock()
+}
+
+func (i *Instance) beginWatch() error {
+ return i.setupWatch.Do(func() error {
+ watch, err := watcher.New(i.PlatformOrLocalID())
+ if err != nil {
+ return errors.Wrap(err, "unable to create watcher")
+ }
+ i.watcher = watch
+
+ if err := i.watcher.RecursivelyWatch(i.root); err != nil {
+ return errors.Wrap(err, "unable to watch app")
+ }
+
+ // If we're in dev mode, we want to watch the runtime
+ // too, so we can develop changes to the runtime without
+ // needing to restart the application.
+ if conf.DevDaemon {
+ if err := i.watcher.RecursivelyWatch(env.EncoreRuntimesPath()); err != nil {
+ return errors.Wrap(err, "unable to watch runtime")
+ }
+ }
+
+ go func() {
+ for {
+ events, ok := i.watcher.WaitForEvents()
+ if !ok {
+ // We're done watching.
+ return
+ }
+
+ if i.mgr != nil {
+ i.mgr.onWatchEvent(i, events)
+ }
+
+ i.watchMu.Lock()
+ watchers := i.watchers
+ i.watchMu.Unlock()
+ for _, sub := range watchers {
+ sub.f(i, events)
+ }
+ }
+ }()
+
+ return nil
+ })
+}
+
+// CachePath returns the path to the cache directory for this app.
+// It creates the directory if it does not exist.
+func (i *Instance) CachePath() (string, error) {
+ cacheDir, err := conf.CacheDir()
+ if err != nil {
+ return "", errors.Wrap(err, "unable to get encore cache dir")
+ }
+
+ // we use local ID to be stable if the app is linked to the platform later
+ cacheDir = filepath.Join(cacheDir, i.localID)
+ if err := os.MkdirAll(cacheDir, 0755); err != nil {
+ return "", errors.Wrap(err, "unable to create app cache dir")
+ }
+
+ return cacheDir, nil
+}
+
+// CacheMetadata caches the metadata for this app onto the file system
+func (i *Instance) CacheMetadata(md *meta.Data) error {
+ i.mdMu.Lock()
+ defer i.mdMu.Unlock()
+
+ i.cachedMd = md
+
+ cacheDir, err := i.CachePath()
+ if err != nil {
+ return err
+ }
+
+ data, err := proto.Marshal(md)
+ if err != nil {
+ return errors.Wrap(err, "unable to marshal metadata")
+ }
+
+ err = xos.WriteFile(filepath.Join(cacheDir, "metadata.pb"), data, 0644)
+ if err != nil {
+ return errors.Wrap(err, "unable to write metadata")
+ }
+
+ return nil
+}
+
+// CachedMetadata returns the cached metadata for this app, if any
+func (i *Instance) CachedMetadata() (*meta.Data, error) {
+ i.mdMu.Lock()
+ defer i.mdMu.Unlock()
+
+ if i.cachedMd != nil {
+ return i.cachedMd, nil
+ }
+
+ cacheDir, err := i.CachePath()
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := os.ReadFile(filepath.Join(cacheDir, "metadata.pb"))
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, errors.Wrap(err, "unable to read metadata")
+ }
+
+ md := &meta.Data{}
+ err = proto.Unmarshal(data, md)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to unmarshal metadata")
+ }
+
+ i.cachedMd = md
+ return md, nil
+}
+
+func (i *Instance) Close() error {
+ if i.watcher != nil {
+ return i.watcher.Close()
+ }
+ return nil
+}
+
+type WatchSubscriptionID int64
+
+type watchSubscription struct {
+ id WatchSubscriptionID
+ f WatchFunc
+}
diff --git a/cli/daemon/check.go b/cli/daemon/check.go
new file mode 100644
index 0000000000..09f306cddd
--- /dev/null
+++ b/cli/daemon/check.go
@@ -0,0 +1,39 @@
+package daemon
+
+import (
+ "encr.dev/cli/daemon/run"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// Check checks the app for compilation errors.
+func (s *Server) Check(req *daemonpb.CheckRequest, stream daemonpb.Daemon_CheckServer) error {
+ slog := &streamLog{stream: stream, buffered: false}
+ log := newStreamLogger(slog)
+
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to resolve app")
+ streamExit(stream, 1)
+ return nil
+ }
+
+ buildDir, err := s.mgr.Check(stream.Context(), run.CheckParams{
+ App: app,
+ WorkingDir: req.WorkingDir,
+ CodegenDebug: req.CodegenDebug,
+ Environ: req.Environ,
+ Tests: req.ParseTests,
+ })
+
+ exitCode := 0
+ if err != nil {
+ exitCode = 1
+ log.Error().Msg(err.Error())
+ }
+
+ if req.CodegenDebug && buildDir != "" {
+ log.Info().Msgf("wrote generated code to: %s", buildDir)
+ }
+ streamExit(stream, exitCode)
+ return nil
+}
diff --git a/cli/daemon/common.go b/cli/daemon/common.go
new file mode 100644
index 0000000000..0465b5dc32
--- /dev/null
+++ b/cli/daemon/common.go
@@ -0,0 +1,178 @@
+package daemon
+
+import (
+ "io"
+ "net"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/logrusorgru/aurora/v3"
+
+ "encr.dev/cli/daemon/run"
+ "encr.dev/cli/internal/onboarding"
+ "encr.dev/pkg/errlist"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+// OnStart implements run.EventListener.
+func (s *Server) OnStart(r *run.Run) {}
+
+func (s *Server) OnCompileStart(r *run.Run) {}
+
+// OnReload implements run.EventListener.
+func (s *Server) OnReload(r *run.Run) {}
+
+// OnStop implements run.EventListener.
+func (s *Server) OnStop(r *run.Run) {}
+
+// OnStdout implements run.EventListener.
+func (s *Server) OnStdout(r *run.Run, line []byte) {
+ s.mu.Lock()
+ slog, ok := s.streams[r.ID]
+ s.mu.Unlock()
+
+ if ok {
+ _, _ = slog.Stdout(true).Write(line)
+ }
+}
+
+// OnStderr implements run.EventListener.
+func (s *Server) OnStderr(r *run.Run, line []byte) {
+ s.mu.Lock()
+ slog, ok := s.streams[r.ID]
+ s.mu.Unlock()
+
+ if ok {
+ _, _ = slog.Stderr(true).Write(line)
+ }
+}
+
+func (s *Server) OnError(r *run.Run, err *errlist.List) {
+ s.mu.Lock()
+ slog, ok := s.streams[r.ID]
+ s.mu.Unlock()
+
+ if ok {
+ slog.Error(err)
+ }
+}
+
+func showFirstRunExperience(run *run.Run, md *meta.Data, stdout io.Writer) {
+ if state, err := onboarding.Load(); err == nil {
+ if !state.FirstRun.IsSet() {
+ // Is there a suitable endpoint to call?
+ var rpc *meta.RPC
+ var command string
+ for _, svc := range md.Svcs {
+ for _, r := range svc.Rpcs {
+ if cmd := genCurlCommand(run, md, r); rpc == nil || len(command) < len(cmd) {
+ rpc = r
+ command = cmd
+ }
+ }
+ }
+ if rpc != nil {
+ state.FirstRun.Set()
+ if err := state.Write(); err == nil {
+ _, _ = stdout.Write([]byte(aurora.Sprintf("\nHint: make an API call by running: %s\n", aurora.Cyan(command))))
+ }
+ }
+ }
+ }
+}
+
+// findAvailableAddr attempts to find an available host:port that's near
+// the given startAddr.
+func findAvailableAddr(startAddr string) (host string, port int, ok bool) {
+ host, portStr, err := net.SplitHostPort(startAddr)
+ if err != nil {
+ host = "localhost"
+ portStr = "4000"
+ }
+ startPort, err := strconv.Atoi(portStr)
+ if err != nil {
+ startPort = 4000
+ }
+
+ for p := startPort + 1; p <= startPort+10 && p <= 65535; p++ {
+ addr := host + ":" + strconv.Itoa(p)
+ ln, err := net.Listen("tcp", addr)
+ if err == nil {
+ _ = ln.Close()
+ return host, p, true
+ }
+ }
+ return "", 0, false
+}
+
+func genCurlCommand(run *run.Run, md *meta.Data, rpc *meta.RPC) string {
+ var payload []byte
+ method := rpc.HttpMethods[0]
+ switch method {
+ case "GET", "HEAD", "DELETE":
+ // doesn't use HTTP body payloads
+ default:
+ payload = genSchema(md, rpc.RequestSchema)
+ }
+
+ var segments []string
+ for _, seg := range rpc.Path.Segments {
+ var v string
+ switch seg.Type {
+ default:
+ v = "foo"
+ case meta.PathSegment_LITERAL:
+ v = seg.Value
+ case meta.PathSegment_WILDCARD, meta.PathSegment_FALLBACK:
+ v = "foo"
+ case meta.PathSegment_PARAM:
+ switch seg.ValueType {
+ case meta.PathSegment_STRING:
+ v = "foo"
+ case meta.PathSegment_BOOL:
+ v = "true"
+ case meta.PathSegment_INT8, meta.PathSegment_INT16, meta.PathSegment_INT32, meta.PathSegment_INT64,
+ meta.PathSegment_UINT8, meta.PathSegment_UINT16, meta.PathSegment_UINT32, meta.PathSegment_UINT64:
+ v = "1"
+ case meta.PathSegment_UUID:
+ v = "be23a21f-d12c-432c-91ec-fb8a52e23967" // some random UUID
+ default:
+ v = "foo"
+ }
+ }
+ segments = append(segments, v)
+ }
+
+ parts := []string{"curl"}
+ if (payload != nil && method != "POST") || (payload == nil && method != "GET") {
+ parts = append(parts, " -X ", method)
+ }
+ // nosemgrep
+ path := "/" + strings.Join(segments, "/")
+ parts = append(parts, " http://", run.ListenAddr, path)
+ if payload != nil {
+ parts = append(parts, " -d '", string(payload), "'")
+ }
+ return strings.Join(parts, "")
+}
+
+// errIsAddrInUse reports whether the error is due to the address already being in use.
+func errIsAddrInUse(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if syscallErr, ok := opErr.Err.(*os.SyscallError); ok {
+ if errno, ok := syscallErr.Err.(syscall.Errno); ok {
+ const WSAEADDRINUSE = 10048
+ switch {
+ case errno == syscall.EADDRINUSE:
+ return true
+ case runtime.GOOS == "windows" && errno == WSAEADDRINUSE:
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/cli/daemon/create.go b/cli/daemon/create.go
new file mode 100644
index 0000000000..06b064dd89
--- /dev/null
+++ b/cli/daemon/create.go
@@ -0,0 +1,21 @@
+package daemon
+
+import (
+ "context"
+
+ "encr.dev/cli/daemon/apps"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// CreateApp adds tracking for a new app
+func (s *Server) CreateApp(ctx context.Context, req *daemonpb.CreateAppRequest) (*daemonpb.CreateAppResponse, error) {
+ var options []apps.TrackOption
+ if req.Tutorial {
+ options = append(options, apps.WithTutorial(req.Template))
+ }
+ app, err := s.apps.Track(req.AppRoot, options...)
+ if err != nil {
+ return nil, err
+ }
+ return &daemonpb.CreateAppResponse{AppId: app.PlatformOrLocalID()}, nil
+}
diff --git a/cli/daemon/daemon.go b/cli/daemon/daemon.go
index 3bc1ad74f6..463ed23e6b 100644
--- a/cli/daemon/daemon.go
+++ b/cli/daemon/daemon.go
@@ -2,164 +2,228 @@
package daemon
import (
+ "bytes"
"context"
- "fmt"
"io"
+ "strings"
"sync"
+ "sync/atomic"
+ "time"
- "encr.dev/cli/daemon/internal/appfile"
- "encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/secret"
- "encr.dev/cli/daemon/sqldb"
- "encr.dev/cli/internal/codegen"
- daemonpb "encr.dev/proto/encore/daemon"
- meta "encr.dev/proto/encore/parser/meta/v1"
- "encr.dev/proto/encore/server/remote"
+ "github.com/cockroachdb/errors"
"github.com/golang/protobuf/ptypes/empty"
"github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/mcp"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run"
+ "encr.dev/cli/daemon/secret"
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/cli/internal/update"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/clientgen"
+ "encr.dev/pkg/clientgen/clientgentypes"
+ "encr.dev/pkg/errlist"
+ "encr.dev/pkg/fns"
+ daemonpb "encr.dev/proto/encore/daemon"
+ meta "encr.dev/proto/encore/parser/meta/v1"
)
var _ daemonpb.DaemonServer = (*Server)(nil)
// Server implements daemonpb.DaemonServer.
type Server struct {
- version string
- mgr *run.Manager
- cm *sqldb.ClusterManager
- sm *secret.Manager
- rc remote.RemoteClient
+ apps *apps.Manager
+ mgr *run.Manager
+ cm *sqldb.ClusterManager
+ sm *secret.Manager
+ ns *namespace.Manager
+ mcp *mcp.Manager
+
+ mu sync.Mutex
+ streams map[string]*streamLog // run id -> stream
- mu sync.Mutex
- streams map[string]daemonpb.Daemon_RunServer // run id -> stream
- appRoots map[string]string // cache of app id -> app root
+ availableVerInit sync.Once
+ availableVer atomic.Value // string
+
+ appDebounceMu sync.Mutex
+ appDebouncers map[*apps.Instance]*regenerateCodeDebouncer
daemonpb.UnimplementedDaemonServer
}
// New creates a new Server.
-func New(version string, mgr *run.Manager, cm *sqldb.ClusterManager, sm *secret.Manager, rc remote.RemoteClient) *Server {
+func New(appsMgr *apps.Manager, mgr *run.Manager, cm *sqldb.ClusterManager, sm *secret.Manager, ns *namespace.Manager, mcp *mcp.Manager) *Server {
srv := &Server{
- version: version,
- mgr: mgr,
- cm: cm,
- sm: sm,
- rc: rc,
- streams: make(map[string]daemonpb.Daemon_RunServer),
- appRoots: make(map[string]string),
+ apps: appsMgr,
+ mgr: mgr,
+ cm: cm,
+ sm: sm,
+ ns: ns,
+ mcp: mcp,
+ streams: make(map[string]*streamLog),
+
+ appDebouncers: make(map[*apps.Instance]*regenerateCodeDebouncer),
}
+
mgr.AddListener(srv)
+
+ // Check immediately for the latest version to avoid blocking 'encore run'
+ go srv.availableUpdate()
+
+ // Begin watching known apps for changes
+ go srv.watchApps()
+
return srv
}
// GenClient generates a client based on the app's API.
func (s *Server) GenClient(ctx context.Context, params *daemonpb.GenClientRequest) (*daemonpb.GenClientResponse, error) {
var md *meta.Data
- if params.EnvName == "local" {
+
+ envName := params.EnvName
+ if envName == "" {
+ envName = "local"
+ }
+
+ if envName == "local" {
// Determine the app root
- s.mu.Lock()
- appRoot, ok := s.appRoots[params.AppId]
- s.mu.Unlock()
- if !ok {
+ app, err := s.apps.FindLatestByPlatformOrLocalID(params.AppId)
+ if errors.Is(err, apps.ErrNotFound) {
return nil, status.Errorf(codes.FailedPrecondition, "the app %s must be run locally before generating a client for the 'local' environment.",
params.AppId)
+ } else if err != nil {
+ return nil, status.Errorf(codes.Internal, "unable to query app info: %v", err)
}
// Get the app metadata
- result, err := s.parseApp(appRoot, ".", false)
+ expSet, err := app.Experiments(nil)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "failed to parse app experiments: %v", err)
+ }
+
+ // Parse the app to figure out what infrastructure is needed.
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: builder.DefaultBuildInfo(),
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to parse app metadata: %v", err)
}
- md = result.Meta
+ md = parse.Meta
+
+ if err := app.CacheMetadata(md); err != nil {
+ return nil, status.Errorf(codes.Internal, "failed to cache app metadata: %v", err)
+ }
} else {
- meta, err := s.rc.Meta(ctx, &remote.MetaRequest{
- AppSlug: params.AppId,
- EnvName: params.EnvName,
- })
+ meta, err := platform.GetEnvMeta(ctx, params.AppId, envName)
if err != nil {
- return nil, status.Errorf(status.Code(err), "could not fetch API metadata: %v", err)
+ if strings.Contains(err.Error(), "env_not_found") || strings.Contains(err.Error(), "env_not_deployed") {
+ if envName == "@primary" {
+ return nil, status.Error(codes.NotFound, "You have no deployments of this application.\n\nYou can generate the client for your local code by setting `--env=local`.")
+ }
+ return nil, status.Errorf(codes.NotFound, "A deployed environment called `%s` not found.\n\nYou can generate the client for your local code by setting `--env=local`.", envName)
+ }
+ return nil, status.Errorf(codes.Unavailable, "could not fetch API metadata: %v", err)
}
md = meta
}
- lang := codegen.Lang(params.Lang)
- code, err := codegen.Client(lang, params.AppId, md)
+ lang := clientgen.Lang(params.Lang)
+
+ servicesToGenerate := clientgentypes.NewServiceSet(md, params.Services, params.ExcludedServices)
+ tagSet := clientgentypes.NewTagSet(params.EndpointTags, params.ExcludedEndpointTags)
+ opts := clientgentypes.Options{}
+ if params.OpenapiExcludePrivateEndpoints != nil {
+ opts.OpenAPIExcludePrivateEndpoints = *params.OpenapiExcludePrivateEndpoints
+ }
+ if params.TsSharedTypes != nil {
+ opts.TSSharedTypes = *params.TsSharedTypes
+ }
+ if params.TsClientTarget != nil {
+ opts.TSClientTarget = *params.TsClientTarget
+ }
+ code, err := clientgen.Client(lang, params.AppId, md, servicesToGenerate, tagSet, opts)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return &daemonpb.GenClientResponse{Code: code}, nil
}
-// SetSecret sets a secret key on the encore.dev platform.
-func (s *Server) SetSecret(ctx context.Context, req *daemonpb.SetSecretRequest) (*daemonpb.SetSecretResponse, error) {
- // Get the app id from the app file
- appSlug, err := appfile.Slug(req.AppRoot)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, err.Error())
- } else if appSlug == "" {
- return nil, errNotLinked
- }
-
- resp, err := s.rc.SetSecret(ctx, &remote.SetSecretRequest{
- AppSlug: appSlug,
- Key: req.Key,
- Value: req.Value,
- Type: remote.SetSecretRequest_Type(req.Type),
- })
+func (s *Server) SecretsRefresh(ctx context.Context, req *daemonpb.SecretsRefreshRequest) (*daemonpb.SecretsRefreshResponse, error) {
+ app, err := s.apps.Track(req.AppRoot)
if err != nil {
return nil, err
}
- go s.sm.UpdateKey(appSlug, req.Key, req.Value)
- return &daemonpb.SetSecretResponse{Created: resp.Created}, nil
+ s.sm.UpdateKey(app.PlatformID(), req.Key, req.Value)
+ return &daemonpb.SecretsRefreshResponse{}, nil
}
// Version reports the daemon version.
func (s *Server) Version(context.Context, *empty.Empty) (*daemonpb.VersionResponse, error) {
- return &daemonpb.VersionResponse{Version: s.version}, nil
-}
-
-// Logs streams logs from the encore.dev platform.
-func (s *Server) Logs(params *daemonpb.LogsRequest, stream daemonpb.Daemon_LogsServer) error {
- appSlug, err := appfile.Slug(params.AppRoot)
+ configHash, err := version.ConfigHash()
if err != nil {
- return status.Errorf(codes.InvalidArgument, err.Error())
- } else if appSlug == "" {
- return errNotLinked
+ return nil, err
}
- logs, err := s.rc.Logs(stream.Context(), &remote.LogsRequest{
- AppSlug: appSlug,
- EnvName: params.EnvName,
- })
- if err != nil {
- return err
- }
- for {
- msg, err := logs.Recv()
- if status.Code(err) == codes.Canceled {
- return nil
- } else if err != nil {
- return err
- }
- err = stream.Send(&daemonpb.LogsMessage{
- Lines: msg.Lines,
- DropNotice: msg.DropNotice,
- })
+ return &daemonpb.VersionResponse{
+ Version: version.Version,
+ ConfigHash: configHash,
+ }, nil
+}
+
+// availableUpdate checks for updates to Encore.
+// If there is a new version it returns it as a semver string.
+func (s *Server) availableUpdate() *update.LatestVersion {
+ check := func() *update.LatestVersion {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ ver, err := update.Check(ctx)
if err != nil {
- return err
+ log.Error().Err(err).Msg("could not check for new encore release")
}
+ return ver
}
-}
-// cacheAppRoot adds the appID -> appRoot mapping to the app root cache.
-func (s *Server) cacheAppRoot(appID, appRoot string) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.appRoots[appID] = appRoot
+ s.availableVerInit.Do(func() {
+ ver := check()
+ s.availableVer.Store(ver)
+ go func() {
+ for {
+ time.Sleep(1 * time.Hour)
+ if ver := check(); ver != nil {
+ s.availableVer.Store(ver)
+ }
+ }
+ }()
+ })
+
+ curr := version.Version
+ latest := s.availableVer.Load().(*update.LatestVersion)
+ if latest.IsNewer(curr) {
+ return latest
+ }
+ return nil
}
+var errDatabaseNotFound = (func() error {
+ st := status.New(codes.NotFound, "database not found")
+ return st.Err()
+})()
+
var errNotLinked = (func() error {
st, err := status.New(codes.FailedPrecondition, "app not linked").WithDetails(
&errdetails.PreconditionFailure{
@@ -181,35 +245,32 @@ type commandStream interface {
Send(msg *daemonpb.CommandMessage) error
}
-func newStreamLogger(stream commandStream) zerolog.Logger {
- return zerolog.New(zerolog.ConsoleWriter{Out: zerolog.SyncWriter(streamWriter{stream: stream})})
+func newStreamLogger(slog *streamLog) zerolog.Logger {
+ return zerolog.New(zerolog.SyncWriter(slog.Stderr(false))).With().Timestamp().Logger()
}
type streamWriter struct {
- stream commandStream
+ mu *sync.Mutex
+ sl *streamLog
stderr bool // if true write to stderr, otherwise stdout
+ buffer bool
}
func (w streamWriter) Write(b []byte) (int, error) {
- out := &daemonpb.CommandOutput{}
- if w.stderr {
- out.Stderr = b
- } else {
- out.Stdout = b
- }
- err := w.stream.Send(&daemonpb.CommandMessage{
- Msg: &daemonpb.CommandMessage_Output{
- Output: out,
- },
- })
- if err != nil {
- return 0, err
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.buffer && w.sl.buffered {
+ if w.stderr {
+ return w.sl.writeBuffered(&w.sl.stderr, b)
+ } else {
+ return w.sl.writeBuffered(&w.sl.stdout, b)
+ }
}
- return len(b), nil
+ return w.sl.writeStream(w.stderr, b)
}
func streamExit(stream commandStream, code int) {
- stream.Send(&daemonpb.CommandMessage{Msg: &daemonpb.CommandMessage_Exit{
+ _ = stream.Send(&daemonpb.CommandMessage{Msg: &daemonpb.CommandMessage_Exit{
Exit: &daemonpb.CommandExit{
Code: int32(code),
},
@@ -218,31 +279,66 @@ func streamExit(stream commandStream, code int) {
type streamLog struct {
stream commandStream
+ mu sync.Mutex
+
+ buffered bool
+ stdout *bytes.Buffer // lazily allocated
+ stderr *bytes.Buffer // lazily allocated
}
-func (log streamLog) Stdout() io.Writer {
- return streamWriter{stream: log.stream, stderr: false}
+func (log *streamLog) Stdout(buffer bool) io.Writer {
+ return streamWriter{mu: &log.mu, sl: log, stderr: false, buffer: buffer}
}
-func (log streamLog) Stderr() io.Writer {
- return streamWriter{stream: log.stream, stderr: true}
+func (log *streamLog) Stderr(buffer bool) io.Writer {
+ return streamWriter{mu: &log.mu, sl: log, stderr: true, buffer: buffer}
}
-type runStreamAdapter struct {
- stream daemonpb.Daemon_RunServer
+func (log *streamLog) Error(err *errlist.List) {
+ log.mu.Lock()
+ defer log.mu.Unlock()
+ _ = err.SendToStream(log.stream)
}
-func (a runStreamAdapter) Send(msg *daemonpb.CommandMessage) error {
- switch msg := msg.Msg.(type) {
- case *daemonpb.CommandMessage_Output:
- return a.stream.Send(&daemonpb.RunMessage{
- Msg: &daemonpb.RunMessage_Output{Output: msg.Output},
- })
- case *daemonpb.CommandMessage_Exit:
- return a.stream.Send(&daemonpb.RunMessage{
- Msg: &daemonpb.RunMessage_Exit{Exit: msg.Exit},
- })
- default:
- panic(fmt.Sprintf("unknown CommandMessage type %T", msg))
+func (log *streamLog) FlushBuffers() {
+ var stdout, stderr []byte
+ log.mu.Lock()
+ defer log.mu.Unlock()
+ if b := log.stdout; b != nil {
+ stdout = b.Bytes()
+ log.stdout = nil
+ }
+ if b := log.stderr; b != nil {
+ stderr = b.Bytes()
+ log.stderr = nil
+ }
+
+ _, _ = log.writeStream(false, stderr)
+ _, _ = log.writeStream(true, stdout)
+ log.buffered = false
+}
+
+func (log *streamLog) writeBuffered(b **bytes.Buffer, p []byte) (int, error) {
+ if *b == nil {
+ *b = &bytes.Buffer{}
}
+ return (*b).Write(p)
+}
+
+func (log *streamLog) writeStream(stderr bool, b []byte) (int, error) {
+ out := &daemonpb.CommandOutput{}
+ if stderr {
+ out.Stderr = b
+ } else {
+ out.Stdout = b
+ }
+ err := log.stream.Send(&daemonpb.CommandMessage{
+ Msg: &daemonpb.CommandMessage_Output{
+ Output: out,
+ },
+ })
+ if err != nil {
+ return 0, err
+ }
+ return len(b), nil
}
diff --git a/cli/daemon/dash/ai/assembler.go b/cli/daemon/dash/ai/assembler.go
new file mode 100644
index 0000000000..289930e1bb
--- /dev/null
+++ b/cli/daemon/dash/ai/assembler.go
@@ -0,0 +1,278 @@
+package ai
+
+import (
+ "context"
+ "slices"
+ "strings"
+
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/idents"
+ "encr.dev/v2/parser/apis/api/apienc"
+)
+
+// partialEndpoint is a helper struct that is used to assemble the endpoint
+// from the incoming websocket updates.
+type partialEndpoint struct {
+ service string
+ endpoint *Endpoint
+}
+
+// notification generates a partially assembled endpoint structure to return to the client
+func (e *partialEndpoint) notification() LocalEndpointUpdate {
+ e.endpoint.EndpointSource = e.endpoint.Render()
+ e.endpoint.TypeSource = ""
+ for i, s := range e.endpoint.Types {
+ if i > 0 {
+ e.endpoint.TypeSource += "\n\n"
+ }
+ e.endpoint.TypeSource += s.Render()
+ }
+ return LocalEndpointUpdate{
+ Service: e.service,
+ Endpoint: e.endpoint,
+ Type: "EndpointUpdate",
+ }
+}
+
+func (e *partialEndpoint) upsertType(name, doc string) *Type {
+ if name == "" {
+ return nil
+ }
+ for _, s := range e.endpoint.Types {
+ if s.Name == name {
+ if doc != "" {
+ s.Doc = wrapDoc(doc, 77)
+ }
+ return s
+ }
+ }
+ si := &Type{Name: name, Doc: wrapDoc(doc, 77)}
+ e.endpoint.Types = append(e.endpoint.Types, si)
+ return si
+}
+
+func wrapDoc(doc string, width int) string {
+ doc = strings.ReplaceAll(doc, "\n", " ")
+ doc = strings.TrimSpace(doc)
+ bytes := []byte(doc)
+ i := 0
+ for {
+ start := i
+ if start+width >= len(bytes) {
+ break
+ }
+ i += width
+ for i > start && bytes[i] != ' ' {
+ i--
+ }
+ if i > start {
+ bytes[i] = '\n'
+ } else {
+ for i < len(bytes) && bytes[i] != ' ' {
+ i++
+ }
+ if i < len(bytes) {
+ bytes[i] = '\n'
+ }
+ }
+ }
+ return string(bytes)
+}
+
+func (e *partialEndpoint) upsertError(err ErrorUpdate) *Error {
+ for _, s := range e.endpoint.Errors {
+ if s.Code == err.Code {
+ if err.Doc != "" {
+ s.Doc = wrapDoc(err.Doc, 60)
+ }
+ return s
+ }
+ }
+ si := &Error{Code: err.Code, Doc: wrapDoc(err.Doc, 60)}
+ e.endpoint.Errors = append(e.endpoint.Errors, si)
+ return si
+}
+
+func (e *partialEndpoint) upsertPathParam(up PathParamUpdate) PathSegment {
+ for i, s := range e.endpoint.Path {
+ if s.Value != nil && *s.Value == up.Param {
+ if up.Doc != "" {
+ e.endpoint.Path[i].Doc = wrapDoc(up.Doc, 73)
+ }
+ return s
+ }
+ }
+ seg := PathSegment{
+ Type: SegmentTypeParam,
+ ValueType: ptr[SegmentValueType]("string"),
+ Value: &up.Param,
+ Doc: wrapDoc(up.Doc, 73),
+ }
+ e.endpoint.Path = append(e.endpoint.Path, seg)
+ return seg
+}
+
+func (e *partialEndpoint) upsertField(up TypeFieldUpdate) *Type {
+ if up.Struct == "" {
+ return nil
+ }
+ s := e.upsertType(up.Struct, "")
+ for _, f := range s.Fields {
+ if f.Name == up.Name {
+ if up.Doc != "" {
+ f.Doc = wrapDoc(up.Doc, 73)
+ }
+ if up.Type != "" {
+ f.Type = up.Type
+ }
+ return s
+ }
+ }
+ defaultLoc := apienc.Body
+ isRequest := up.Struct == e.endpoint.RequestType
+ if slices.Contains([]string{"GET", "HEAD", "DELETE"}, e.endpoint.Method) && isRequest {
+ defaultLoc = apienc.Query
+ }
+ fi := &TypeField{
+ Name: up.Name,
+ Doc: wrapDoc(up.Doc, 73),
+ Type: up.Type,
+ Location: defaultLoc,
+ WireName: idents.Convert(up.Name, idents.CamelCase),
+ }
+ s.Fields = append(s.Fields, fi)
+ return s
+}
+
+// The endpointsAssembler is a helper struct that is used to assemble the endpoint
+// from the incoming websocket updates. It keeps track of the existing endpoints and services
+// and updates them accordingly.
+type endpointsAssembler struct {
+ eps map[string]*partialEndpoint
+}
+
+func newEndpointAssembler(existing []Service) *endpointsAssembler {
+ eas := &endpointsAssembler{
+ eps: make(map[string]*partialEndpoint),
+ }
+ for _, svc := range existing {
+ for _, ep := range svc.Endpoints {
+ key := svc.Name + "." + ep.Name
+ eas.eps[key] = &partialEndpoint{
+ service: svc.Name,
+ endpoint: ep,
+ }
+ }
+ }
+ return eas
+}
+
+func (s *endpointsAssembler) upsertEndpoint(e EndpointUpdate) *partialEndpoint {
+ for _, ep := range s.eps {
+ if ep.service != e.Service || ep.endpoint.Name != e.Name {
+ continue
+ }
+ if e.Doc != "" {
+ ep.endpoint.Doc = wrapDoc(e.Doc, 77)
+ }
+ if e.Method != "" {
+ ep.endpoint.Method = e.Method
+ }
+ if e.Visibility != "" {
+ ep.endpoint.Visibility = e.Visibility
+ }
+ if len(e.Path) > 0 {
+ ep.endpoint.Path = e.Path
+ }
+ if e.RequestType != "" {
+ ep.endpoint.RequestType = e.RequestType
+ ep.upsertType(e.RequestType, "")
+ }
+ if e.ResponseType != "" {
+ ep.endpoint.ResponseType = e.ResponseType
+ ep.upsertType(e.ResponseType, "")
+ }
+ if e.Errors != nil {
+ ep.endpoint.Errors = fns.Map(e.Errors, func(e string) *Error {
+ return &Error{Code: e}
+ })
+ }
+ return ep
+ }
+ ep := &partialEndpoint{
+ service: e.Service,
+ endpoint: &Endpoint{
+ Name: e.Name,
+ Doc: wrapDoc(e.Doc, 77),
+ Method: e.Method,
+ Visibility: e.Visibility,
+ Path: e.Path,
+ RequestType: e.RequestType,
+ ResponseType: e.ResponseType,
+ Errors: fns.Map(e.Errors, func(e string) *Error {
+ return &Error{Code: e}
+ }),
+ Language: "GO",
+ },
+ }
+ s.eps[e.Service+"."+e.Name] = ep
+ return ep
+}
+
+func (s *endpointsAssembler) endpoint(service, endpoint string) *partialEndpoint {
+ key := service + "." + endpoint
+ ep, ok := s.eps[key]
+ if !ok {
+ ep := &partialEndpoint{
+ service: service,
+ endpoint: &Endpoint{Name: endpoint},
+ }
+ s.eps[key] = ep
+ }
+ return ep
+}
+
+func newEndpointAssemblerHandler(existing []Service, notifier AINotifier, epComplete bool) AINotifier {
+ epCache := newEndpointAssembler(existing)
+ var lastEp *partialEndpoint
+ return func(ctx context.Context, msg *AINotification) error {
+ var ep *partialEndpoint
+ msgVal := msg.Value
+ switch val := msg.Value.(type) {
+ case TypeUpdate:
+ ep = epCache.endpoint(val.Service, val.Endpoint)
+ ep.upsertType(val.Name, val.Doc)
+ msgVal = ep.notification()
+ case TypeFieldUpdate:
+ ep = epCache.endpoint(val.Service, val.Endpoint)
+ ep.upsertField(val)
+ msgVal = ep.notification()
+ case EndpointUpdate:
+ ep = epCache.upsertEndpoint(val)
+ msgVal = ep.notification()
+ case ErrorUpdate:
+ ep = epCache.endpoint(val.Service, val.Endpoint)
+ ep.upsertError(val)
+ msgVal = ep.notification()
+ case PathParamUpdate:
+ ep = epCache.endpoint(val.Service, val.Endpoint)
+ ep.upsertPathParam(val)
+ msgVal = ep.notification()
+ }
+ if epComplete && lastEp != ep {
+ if lastEp != nil {
+ msg.Value = struct {
+ Type string `json:"type"`
+ Service string `json:"service"`
+ Endpoint string `json:"endpoint"`
+ }{"EndpointComplete", lastEp.service, lastEp.endpoint.Name}
+ if err := notifier(ctx, msg); err != nil || msg.Finished {
+ return err
+ }
+ }
+ lastEp = ep
+ }
+ msg.Value = msgVal
+ return notifier(ctx, msg)
+ }
+}
diff --git a/cli/daemon/dash/ai/client.go b/cli/daemon/dash/ai/client.go
new file mode 100644
index 0000000000..18762a4c31
--- /dev/null
+++ b/cli/daemon/dash/ai/client.go
@@ -0,0 +1,153 @@
+package ai
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/hasura/go-graphql-client"
+ "github.com/hasura/go-graphql-client/pkg/jsonutil"
+ "github.com/rs/zerolog/log"
+
+ "encr.dev/internal/conf"
+)
+
+type TaskMessage struct {
+ Type string `graphql:"__typename"`
+
+ ServiceUpdate `graphql:"... on ServiceUpdate"`
+ TypeUpdate `graphql:"... on TypeUpdate"`
+ TypeFieldUpdate `graphql:"... on TypeFieldUpdate"`
+ ErrorUpdate `graphql:"... on ErrorUpdate"`
+ EndpointUpdate `graphql:"... on EndpointUpdate"`
+ SessionUpdate `graphql:"... on SessionUpdate"`
+ TitleUpdate `graphql:"... on TitleUpdate"`
+ PathParamUpdate `graphql:"... on PathParamUpdate"`
+}
+
+func (u *TaskMessage) GetValue() AIUpdateType {
+ switch u.Type {
+ case "ServiceUpdate":
+ return u.ServiceUpdate
+ case "TypeUpdate":
+ return u.TypeUpdate
+ case "TypeFieldUpdate":
+ return u.TypeFieldUpdate
+ case "ErrorUpdate":
+ return u.ErrorUpdate
+ case "EndpointUpdate":
+ return u.EndpointUpdate
+ case "SessionUpdate":
+ return u.SessionUpdate
+ case "TitleUpdate":
+ return u.TitleUpdate
+ case "PathParamUpdate":
+ return u.PathParamUpdate
+ }
+ return nil
+}
+
+type AIStreamMessage struct {
+ Value TaskMessage
+ Error string
+ Finished bool
+}
+
+type aiTask struct {
+ Message *AIStreamMessage `graphql:"result"`
+}
+
+func getClient(errHandler func(err error)) *graphql.SubscriptionClient {
+ client := graphql.NewSubscriptionClient(conf.WSBaseURL + "/graphql").
+ WithRetryTimeout(5 * time.Second).
+ WithRetryDelay(2 * time.Second).
+ WithRetryStatusCodes("500-599").
+ WithWebSocketOptions(
+ graphql.WebsocketOptions{
+ HTTPClient: conf.AuthClient,
+ }).WithSyncMode(true)
+ go func() {
+ log.Info().Msg("starting ai client")
+ err := client.Run()
+ log.Info().Msg("closed ai client")
+ if err != nil {
+ errHandler(err)
+ }
+ }()
+ return client
+}
+
+type AITask struct {
+ SubscriptionID string
+ client *graphql.SubscriptionClient
+}
+
+func (t *AITask) Stop() error {
+ return t.client.Unsubscribe(t.SubscriptionID)
+}
+
+// startAITask is a helper function to intitiate an AI query to the encore platform. The query
+// should be assembled to stream a 'result' graphql field that is a AIStreamMessage.
+func startAITask[Query any](ctx context.Context, params map[string]interface{}, notifier AINotifier) (*AITask, error) {
+ var subId string
+ var errStrReply = func(error string, code any) error {
+ log.Error().Msgf("ai error: %s (%v)", error, code)
+ _ = notifier(ctx, &AINotification{
+ SubscriptionID: subId,
+ Error: &AIError{Message: error, Code: fmt.Sprintf("%v", code)},
+ Finished: true,
+ })
+ return graphql.ErrSubscriptionStopped
+ }
+ var errReply = func(err error) error {
+ var graphqlErr graphql.Errors
+ if errors.As(err, &graphqlErr) {
+ for _, e := range graphqlErr {
+ _ = errStrReply(e.Message, e.Extensions["code"])
+ }
+ return graphql.ErrSubscriptionStopped
+ }
+ return errStrReply(err.Error(), "")
+ }
+ var query Query
+ client := getClient(func(err error) { _ = errReply(err) })
+ subId, err := client.Subscribe(&query, params, func(message []byte, err error) error {
+ if err != nil {
+ return errReply(err)
+ }
+ var result aiTask
+ err = jsonutil.UnmarshalGraphQL(message, &result)
+ if err != nil {
+ return errReply(err)
+ }
+ if result.Message.Error != "" {
+ return errStrReply(result.Message.Error, "")
+ }
+ err = notifier(ctx, &AINotification{
+ SubscriptionID: subId,
+ Value: result.Message.Value.GetValue(),
+ Finished: result.Message.Finished,
+ })
+ if err != nil {
+ return errReply(err)
+ }
+ return nil
+ })
+ return &AITask{SubscriptionID: subId, client: client}, err
+}
+
+// AINotification is a wrapper around messages and errors from the encore platform ai service
+type AINotification struct {
+ SubscriptionID string `json:"subscriptionId,omitempty"`
+ Value any `json:"value,omitempty"`
+ Error *AIError `json:"error,omitempty"`
+ Finished bool `json:"finished,omitempty"`
+}
+
+type AIError struct {
+ Message string `json:"message"`
+ Code string `json:"code"`
+}
+
+type AINotifier func(context.Context, *AINotification) error
diff --git a/cli/daemon/dash/ai/codegen.go b/cli/daemon/dash/ai/codegen.go
new file mode 100644
index 0000000000..ab167b5a55
--- /dev/null
+++ b/cli/daemon/dash/ai/codegen.go
@@ -0,0 +1,418 @@
+package ai
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "golang.org/x/exp/maps"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/imports"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/internal/env"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/paths"
+ "encr.dev/v2/codegen/rewrite"
+ "encr.dev/v2/internals/perr"
+ "encr.dev/v2/internals/pkginfo"
+ "encr.dev/v2/parser/apis/api/apienc"
+ "encr.dev/v2/parser/apis/directive"
+)
+
+const defAuthHandler = `package auth
+
+import (
+ "context"
+
+ "encore.dev/beta/auth"
+)
+
+type Data struct {
+ Username string
+}
+
+//encore:authhandler
+func AuthHandler(ctx context.Context, token string) (auth.UID, *Data, error) {
+ panic("not yet implemented")
+}`
+
+const (
+ PathDocPrefix = "Path Parameters"
+ ErrDocPrefix = "Errors"
+)
+
+func (p PathSegments) Render() (docPath string, goParams []string) {
+ var params []string
+ return "/" + path.Join(fns.Map(p, func(s PathSegment) string {
+ switch s.Type {
+ case SegmentTypeLiteral:
+ return *s.Value
+ case SegmentTypeParam:
+ params = append(params, fmt.Sprintf("%s %s", *s.Value, *s.ValueType))
+ return fmt.Sprintf(":%s", *s.Value)
+ case SegmentTypeWildcard:
+ params = append(params, fmt.Sprintf("%s %s", *s.Value, SegmentValueTypeString))
+ return fmt.Sprintf("*%s", *s.Value)
+ case SegmentTypeFallback:
+ params = append(params, fmt.Sprintf("%s %s", *s.Value, SegmentValueTypeString))
+ return fmt.Sprintf("!%s", *s.Value)
+ default:
+ panic(fmt.Sprintf("unknown path segment type: %s", s.Type))
+ }
+ })...), params
+}
+
+func (s *Type) Render() string {
+ rtn := strings.Builder{}
+ if s.Doc != "" {
+ rtn.WriteString(fmtComment(strings.TrimSpace(s.Doc), 0, 1))
+ }
+ rtn.WriteString(fmt.Sprintf("type %s struct {\n", s.Name))
+ for i, f := range s.Fields {
+ if i > 0 {
+ rtn.WriteString("\n")
+ }
+ if f.Doc != "" {
+ rtn.WriteString(fmtComment(strings.TrimSpace(f.Doc), 2, 1))
+ }
+ tags := ""
+ switch f.Location {
+ case apienc.Body:
+ tags = fmt.Sprintf(" `json:\"%s\"`", f.WireName)
+ case apienc.Query:
+ tags = fmt.Sprintf(" `query:\"%s\"`", f.WireName)
+ case apienc.Header:
+ tags = fmt.Sprintf(" `header:\"%s\"`", f.WireName)
+ }
+ rtn.WriteString(fmt.Sprintf(" %s %s%s\n", f.Name, f.Type, tags))
+ }
+ rtn.WriteString("}")
+ return rtn.String()
+}
+
+func (e *Endpoint) Render() string {
+ buf := strings.Builder{}
+ if e.Doc != "" {
+ buf.WriteString(fmtComment(strings.TrimSpace(e.Doc)+"\n", 0, 1))
+ }
+ buf.WriteString(renderDocList(PathDocPrefix, e.Path))
+ buf.WriteString(renderDocList(ErrDocPrefix, e.Errors))
+ pathStr, pathParams := e.Path.Render()
+ params := []string{"ctx context.Context"}
+ params = append(params, pathParams...)
+ if e.RequestType != "" {
+ params = append(params, "req *"+e.RequestType)
+ }
+ var rtnParams []string
+ if e.ResponseType != "" {
+ rtnParams = append(rtnParams, "*"+e.ResponseType)
+ }
+ rtnParams = append(rtnParams, "error")
+ buf.WriteString(fmtComment("encore:api %s method=%s path=%s", 0, 0, e.Visibility, e.Method, pathStr))
+ paramsStr := strings.Join(params, ", ")
+ rtnParamsStr := strings.Join(rtnParams, ", ")
+ if len(rtnParams) > 1 {
+ rtnParamsStr = fmt.Sprintf("(%s)", rtnParamsStr)
+ }
+ buf.WriteString(fmt.Sprintf("func %s(%s) %s", e.Name, paramsStr, rtnParamsStr))
+ return buf.String()
+}
+
+func indentItem(header, comment string) string {
+ buf := strings.Builder{}
+ buf.WriteString(header)
+ for i, line := range strings.Split(strings.TrimSpace(comment), "\n") {
+ indent := ""
+ if i > 0 {
+ indent = strings.Repeat(" ", len(header))
+ }
+ buf.WriteString(fmt.Sprintf("%s%s\n", indent, line))
+ }
+ return buf.String()
+}
+
+func renderDocList[T interface{ DocItem() (string, string) }](header string, items []T) string {
+ maxLen := 0
+ items = fns.Filter(items, func(p T) bool {
+ key, val := p.DocItem()
+ if val == "" {
+ return false
+ }
+ maxLen = max(maxLen, len(key))
+ return true
+ })
+ buf := strings.Builder{}
+ for i, item := range items {
+ if i == 0 {
+ buf.WriteString(header)
+ buf.WriteString(":\n")
+ }
+ key, value := item.DocItem()
+ spacing := strings.Repeat(" ", maxLen-len(key))
+ itemHeader := fmt.Sprintf(" - %s: %s", key, spacing)
+ buf.WriteString(indentItem(itemHeader, value))
+ }
+ return fmtComment(buf.String(), 0, 1)
+}
+
+// fmtComment prepends '//' to each line of the given comment and indents it with the given number of spaces.
+func fmtComment(comment string, before, after int, args ...any) string {
+ if comment == "" {
+ return ""
+ }
+ prefix := fmt.Sprintf("%s//%s", strings.Repeat(" ", before), strings.Repeat(" ", after))
+ result := prefix + strings.ReplaceAll(comment, "\n", "\n"+prefix)
+ return fmt.Sprintf(result, args...) + "\n"
+}
+
+// generateSrcFiles generates source files for the given services.
+func generateSrcFiles(services []Service, app *apps.Instance) (map[paths.RelSlash]string, error) {
+ svcPaths, err := newServicePaths(app)
+ if err != nil {
+ return nil, err
+ }
+ needAuth := fns.Any(fns.FlatMap(services, Service.GetEndpoints), (*Endpoint).Auth)
+ files := map[paths.RelSlash]string{}
+ if needAuth {
+ md, err := app.CachedMetadata()
+ if err != nil {
+ return nil, err
+ }
+ if md.AuthHandler == nil {
+ relFile, err := svcPaths.RelFileName("auth", "handler")
+ if err != nil {
+ return nil, err
+ }
+ file := paths.FS(app.Root()).JoinSlash(relFile)
+ err = os.MkdirAll(file.Dir().ToIO(), 0755)
+ if err != nil {
+ return nil, err
+ }
+ files[relFile] = string(defAuthHandler)
+ }
+ }
+ for _, s := range services {
+ if svcPaths.IsNew(s.Name) {
+ relFile, err := svcPaths.RelFileName(s.Name, s.Name)
+ if err != nil {
+ return nil, err
+ }
+ file := paths.FS(app.Root()).JoinSlash(relFile)
+ err = os.MkdirAll(file.Dir().ToIO(), 0755)
+ if err != nil {
+ return nil, err
+ }
+ files[relFile] = fmt.Sprintf("%spackage %s\n", fmtComment(s.Doc, 0, 1), strings.ToLower(s.Name))
+ }
+ for _, e := range s.Endpoints {
+ relFile, err := svcPaths.RelFileName(s.Name, e.Name)
+ if err != nil {
+ return nil, err
+ }
+ filePath := paths.FS(app.Root()).JoinSlash(relFile)
+ _, content := toSrcFile(filePath, s.Name, e.EndpointSource, e.TypeSource)
+ files[relFile], err = addMissingFuncBodies(content)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return files, nil
+}
+
+// addMissingFuncBodies adds a panic statement to functions that are missing a body.
+// This is used to generate a valid Go source file when the user has not implemented
+// the body of the endpoint functions.
+func addMissingFuncBodies(content []byte) (string, error) {
+ set := token.NewFileSet()
+ rewriter := rewrite.New(content, 0)
+ file, err := parser.ParseFile(set, "", content, parser.ParseComments|parser.AllErrors)
+ if err != nil {
+ return "", err
+ }
+ ast.Inspect(file, func(n ast.Node) bool {
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ if n.Body != nil {
+ break
+ }
+ rewriter.Insert(n.End()-1, []byte(" {\n panic(\"not yet implemented\")\n}\n"))
+ }
+ return true
+ })
+ return string(rewriter.Data()), err
+}
+
+// writeFiles writes the generated source files to disk.
+func writeFiles(services []Service, app *apps.Instance) ([]paths.RelSlash, error) {
+ files, err := generateSrcFiles(services, app)
+ if err != nil {
+ return nil, err
+ }
+ for fileName, content := range files {
+ root := paths.FS(app.Root())
+ err = os.WriteFile(root.JoinSlash(fileName).ToIO(), []byte(content), 0644)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return maps.Keys(files), nil
+}
+
+// toSrcFile wraps a code fragment in a package declaration and adds missing imports
+// using the goimports tool.
+func toSrcFile(filePath paths.FS, svc string, srcs ...string) (offset token.Position, data []byte) {
+ const divider = "// @code-start\n"
+ header := fmt.Sprintf("package %s\n\n", strings.ToLower(svc))
+ src := []byte(header + divider + strings.Join(srcs, "\n"))
+ importedSrc, err := imports.Process(filePath.ToIO(), src, &imports.Options{
+ Comments: true,
+ TabIndent: false,
+ TabWidth: 4,
+ })
+ // We don't need to handle the error here, as we'll catch parser/scanner errors in a later
+ // phase. This is just a best effort to import missing packages.
+ if err == nil {
+ src = importedSrc
+ }
+ codeOffset := bytes.Index(src, []byte(divider))
+ // Remove the divider and any formatting made by the imports tool
+ src = append(src[:codeOffset], strings.Join(srcs, "\n")...)
+ // Compute offset of the user defined code
+ lines := strings.Split(string(src[:codeOffset]), "\n")
+ return token.Position{
+ Filename: filePath.ToIO(),
+ Offset: codeOffset,
+ Line: len(lines) - 1,
+ Column: 0,
+ }, src
+}
+
+// updateCode updates the source code fields of the EndpointInputs in the given services.
+// if overwrite is set, the code will be regenerated from scratch and replace the existing code,
+// otherwise, we'll modify the code in place
+func updateCode(ctx context.Context, services []Service, app *apps.Instance, overwrite bool) (rtn *SyncResult, err error) {
+ overlays, err := newOverlays(app, overwrite, services...)
+ fset := token.NewFileSet()
+ perrs := perr.NewList(ctx, fset, overlays.ReadFile)
+ defer func() {
+ perr.CatchBailout(recover())
+ if rtn == nil {
+ rtn = &SyncResult{
+ Services: services,
+ }
+ }
+ rtn.Errors = overlays.validationErrors(perrs)
+ }()
+ for p, olay := range overlays.items {
+ astFile, err := parser.ParseFile(fset, p.ToIO(), olay.content, parser.ParseComments|parser.AllErrors)
+ if err != nil {
+ perrs.AddStd(err)
+ }
+ rewriter := rewrite.New(olay.content, int(astFile.FileStart))
+ typeByName := map[string]*ast.GenDecl{}
+ funcByName := map[string]*ast.FuncDecl{}
+ for _, decl := range astFile.Decls {
+ switch decl := decl.(type) {
+ case *ast.GenDecl:
+ if decl.Tok != token.TYPE {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ typeSpec := spec.(*ast.TypeSpec)
+ typeByName[typeSpec.Name.Name] = decl
+ }
+ case *ast.FuncDecl:
+ funcByName[decl.Name.Name] = decl
+ }
+ }
+ if olay.codeType == CodeTypeEndpoint {
+ funcDecl, ok := funcByName[olay.endpoint.Name]
+ if !ok {
+ for _, f := range funcByName {
+ dir, _, _ := directive.Parse(perrs, f.Doc)
+ if dir != nil && dir.Name == "api" {
+ funcDecl = f
+ break
+ }
+ }
+ }
+ if funcDecl != nil {
+ start := funcDecl.Pos()
+ if funcDecl.Doc != nil {
+ start = funcDecl.Doc.Pos()
+ }
+ end := funcDecl.End()
+ if funcDecl.Body != nil {
+ end = funcDecl.Body.Lbrace
+ }
+ rewriter.Replace(start, end, []byte(olay.endpoint.Render()))
+ } else {
+ if len(funcByName) > 0 {
+ rewriter.Append([]byte("\n"))
+ }
+ rewriter.Append([]byte(olay.endpoint.Render()))
+ }
+ olay.content = rewriter.Data()
+ content := string(olay.content[olay.headerOffset.Offset:])
+ olay.endpoint.EndpointSource = strings.TrimSpace(content)
+ } else {
+ for _, typ := range olay.endpoint.Types {
+ typeSpec := typeByName[typ.Name]
+ code := typ.Render()
+ if typeSpec != nil {
+ start := typeSpec.Pos()
+ if typeSpec.Doc != nil {
+ start = typeSpec.Doc.Pos()
+ }
+ rewriter.Replace(start, typeSpec.End(), []byte(code))
+ } else {
+ rewriter.Append([]byte("\n\n" + code))
+ }
+ }
+ olay.content = rewriter.Data()
+ content := string(olay.content[olay.headerOffset.Offset:])
+ olay.endpoint.TypeSource = strings.TrimSpace(content)
+ }
+ }
+ goRoot := paths.RootedFSPath(env.EncoreGoRoot(), ".")
+
+ // Parse the end result to catch any syntax errors
+ pkginfo.UpdateGoPath(goRoot)
+ pkgs, err := packages.Load(&packages.Config{
+ Mode: packages.NeedTypes | packages.NeedSyntax,
+ Dir: app.Root(),
+ Env: append(os.Environ(),
+ "GOOS="+runtime.GOOS,
+ "GOARCH="+runtime.GOARCH,
+ "GOROOT="+goRoot.ToIO(),
+ "PATH="+goRoot.Join("bin").ToIO()+string(filepath.ListSeparator)+os.Getenv("PATH"),
+ ),
+ Fset: fset,
+ Overlay: overlays.PkgOverlay(),
+ }, fns.Map(overlays.pkgPaths(), paths.Pkg.String)...)
+ if err != nil {
+ return nil, err
+ }
+ for _, pkg := range pkgs {
+ for _, err := range pkg.Errors {
+ // ignore missing function bodies error (it's allowed)
+ if strings.Contains(err.Error(), "missing function body") {
+ continue
+ }
+ perrs.AddStd(err)
+ }
+ }
+ return &SyncResult{
+ Services: services,
+ }, nil
+}
diff --git a/cli/daemon/dash/ai/conv.go b/cli/daemon/dash/ai/conv.go
new file mode 100644
index 0000000000..e149fd8ea0
--- /dev/null
+++ b/cli/daemon/dash/ai/conv.go
@@ -0,0 +1,142 @@
+package ai
+
+import (
+ "slices"
+ "strings"
+
+ "encr.dev/pkg/clientgen"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+ schema "encr.dev/proto/encore/parser/schema/v1"
+ "encr.dev/v2/internals/resourcepaths"
+)
+
+func toPathSegments(p *resourcepaths.Path, docs map[string]string) []PathSegment {
+ rtn := make([]PathSegment, 0, len(p.Segments))
+ for _, s := range p.Segments {
+ switch s.Type {
+ case resourcepaths.Literal:
+ rtn = append(rtn, PathSegment{Type: SegmentTypeLiteral, Value: ptr(s.Value)})
+ case resourcepaths.Param:
+ rtn = append(rtn, PathSegment{
+ Type: SegmentTypeParam,
+ Value: ptr(s.Value),
+ ValueType: ptr(SegmentValueType(strings.ToLower(s.ValueType.String()))),
+ Doc: docs[s.Value],
+ })
+ case resourcepaths.Wildcard:
+ rtn = append(rtn, PathSegment{
+ Type: SegmentTypeWildcard,
+ Value: ptr(s.Value),
+ ValueType: ptr(SegmentValueType(strings.ToLower(s.ValueType.String()))),
+ Doc: docs[s.Value],
+ })
+ case resourcepaths.Fallback:
+ rtn = append(rtn, PathSegment{
+ Type: SegmentTypeFallback,
+ Value: ptr(s.Value),
+ ValueType: ptr(SegmentValueType(strings.ToLower(s.ValueType.String()))),
+ Doc: docs[s.Value],
+ })
+ }
+ }
+ return rtn
+}
+
+func metaPathToPathSegments(metaPath *meta.Path) []PathSegment {
+ var segments []PathSegment
+ for _, seg := range metaPath.Segments {
+ segments = append(segments, PathSegment{
+ Type: toSegmentType(seg.Type),
+ Value: ptr(seg.Value),
+ ValueType: ptr(toSegmentValueType(seg.ValueType)),
+ })
+ }
+ return segments
+}
+
+func toSegmentValueType(valueType meta.PathSegment_ParamType) SegmentValueType {
+ switch valueType {
+ case meta.PathSegment_UUID:
+ return "string"
+ default:
+ return SegmentValueType(strings.ToLower(valueType.String()))
+ }
+}
+
+func toSegmentType(segmentType meta.PathSegment_SegmentType) SegmentType {
+ switch segmentType {
+ case meta.PathSegment_LITERAL:
+ return SegmentTypeLiteral
+ case meta.PathSegment_PARAM:
+ return SegmentTypeParam
+ case meta.PathSegment_WILDCARD:
+ return SegmentTypeWildcard
+ case meta.PathSegment_FALLBACK:
+ return SegmentTypeFallback
+ default:
+ panic("unknown segment type")
+ }
+}
+
+func toVisibility(accessType meta.RPC_AccessType) VisibilityType {
+ switch accessType {
+ case meta.RPC_PUBLIC:
+ return VisibilityTypePublic
+ case meta.RPC_PRIVATE:
+ return VisibilityTypePrivate
+ case meta.RPC_AUTH:
+ return ""
+ default:
+ panic("unknown access type")
+ }
+}
+
+func renderTypesFromMetadata(md *meta.Data, svcs ...string) string {
+ var types []*schema.Decl
+ for _, metaSvc := range md.Svcs {
+ if len(svcs) > 0 && !slices.Contains(svcs, metaSvc.Name) {
+ continue
+ }
+ for _, rpc := range metaSvc.Rpcs {
+ if rpc.RequestSchema != nil {
+ types = append(types, md.Decls[rpc.RequestSchema.GetNamed().Id])
+ }
+ if rpc.ResponseSchema != nil {
+ types = append(types, md.Decls[rpc.ResponseSchema.GetNamed().Id])
+ }
+ }
+ }
+ src, _ := clientgen.GenTypes(md, types...)
+ return string(src)
+}
+
+func parseServicesFromMetadata(md *meta.Data, svcs ...string) []ServiceInput {
+ services := []ServiceInput{}
+ for _, metaSvc := range md.Svcs {
+ if len(svcs) > 0 && !slices.Contains(svcs, metaSvc.Name) {
+ continue
+ }
+ svc := ServiceInput{
+ Name: metaSvc.Name,
+ }
+ for _, rpc := range metaSvc.Rpcs {
+ ep := &Endpoint{
+ Name: rpc.Name,
+ Method: rpc.HttpMethods[0],
+ Visibility: toVisibility(rpc.AccessType),
+ Path: metaPathToPathSegments(rpc.Path),
+ }
+ if rpc.RequestSchema != nil {
+ decl := md.Decls[rpc.RequestSchema.GetNamed().Id]
+ ep.RequestType = decl.Name
+ }
+ if rpc.ResponseSchema != nil {
+ decl := md.Decls[rpc.ResponseSchema.GetNamed().Id]
+ ep.ResponseType = decl.Name
+ }
+ svc.Endpoints = append(svc.Endpoints, ep)
+ }
+ services = append(services, svc)
+ }
+ return services
+}
diff --git a/cli/daemon/dash/ai/manager.go b/cli/daemon/dash/ai/manager.go
new file mode 100644
index 0000000000..3d740a457d
--- /dev/null
+++ b/cli/daemon/dash/ai/manager.go
@@ -0,0 +1,91 @@
+package ai
+
+import (
+ "context"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/paths"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+var ErrorCodeMap = map[string]int64{
+ "ai_task_limit_reached": 100,
+}
+
+// Manager exposes the ai functionality to the local dashboard
+type Manager struct{}
+
+func NewAIManager() *Manager {
+ return &Manager{}
+}
+
+func (m *Manager) DefineEndpoints(ctx context.Context, appSlug string, sessionID AISessionID, prompt string, md *meta.Data, proposed []Service, notifier AINotifier) (*AITask, error) {
+ svcs := fns.Map(proposed, Service.GetName)
+ return startAITask[struct {
+ Message *AIStreamMessage `graphql:"result: defineEndpoints(appSlug: $appSlug, sessionID: $sessionID, prompt: $prompt, current: $current, proposedDesign: $proposedDesign, existingTypes: $existingTypes)"`
+ }](ctx, map[string]interface{}{
+ "appSlug": appSlug,
+ "prompt": prompt,
+ "current": parseServicesFromMetadata(md, svcs...),
+ "proposedDesign": fns.Map(proposed, Service.GraphQL),
+ "sessionID": sessionID,
+ "existingTypes": renderTypesFromMetadata(md, svcs...),
+ }, newEndpointAssemblerHandler(proposed, notifier, true))
+}
+
+func (m *Manager) ProposeSystemDesign(ctx context.Context, appSlug, prompt string, md *meta.Data, notifier AINotifier) (*AITask, error) {
+ return startAITask[struct {
+ Message *AIStreamMessage `graphql:"result: proposeSystemDesign(appSlug: $appSlug, prompt: $prompt, current: $current)"`
+ }](ctx, map[string]interface{}{
+ "appSlug": appSlug,
+ "prompt": prompt,
+ "current": parseServicesFromMetadata(md),
+ }, newEndpointAssemblerHandler(nil, notifier, false))
+}
+
+func (m *Manager) ModifySystemDesign(ctx context.Context, appSlug string, sessionID AISessionID, originalPrompt string, proposed []Service, newPrompt string, md *meta.Data, notifier AINotifier) (*AITask, error) {
+ return startAITask[struct {
+ Message *AIStreamMessage `graphql:"result: modifySystemDesign(appSlug: $appSlug, sessionID: $sessionID, originalPrompt: $originalPrompt, proposedDesign: $proposedDesign, newPrompt: $newPrompt, current: $current)"`
+ }](ctx, map[string]interface{}{
+ "appSlug": appSlug,
+ "originalPrompt": originalPrompt,
+ "proposedDesign": fns.Map(proposed, Service.GraphQL),
+ "current": parseServicesFromMetadata(md),
+ "newPrompt": newPrompt,
+ "sessionID": sessionID,
+ }, newEndpointAssemblerHandler(proposed, notifier, false))
+}
+
+func (m *Manager) ParseCode(ctx context.Context, services []Service, app *apps.Instance) (*SyncResult, error) {
+ return parseCode(ctx, app, services)
+}
+
+func (m *Manager) UpdateCode(ctx context.Context, services []Service, app *apps.Instance, overwrite bool) (*SyncResult, error) {
+ return updateCode(ctx, services, app, overwrite)
+}
+
+type WriteFilesResponse struct {
+ FilesPaths []paths.RelSlash `json:"paths"`
+}
+
+func (m *Manager) WriteFiles(ctx context.Context, services []Service, app *apps.Instance) (*WriteFilesResponse, error) {
+ files, err := writeFiles(services, app)
+ return &WriteFilesResponse{FilesPaths: files}, err
+}
+
+type PreviewFile struct {
+ Path paths.RelSlash `json:"path"`
+ Content string `json:"content"`
+}
+
+type PreviewFilesResponse struct {
+ Files []PreviewFile `json:"files"`
+}
+
+func (m *Manager) PreviewFiles(ctx context.Context, services []Service, app *apps.Instance) (*PreviewFilesResponse, error) {
+ files, err := generateSrcFiles(services, app)
+ return &PreviewFilesResponse{Files: fns.TransformMapToSlice(files, func(k paths.RelSlash, v string) PreviewFile {
+ return PreviewFile{Path: k, Content: v}
+ })}, err
+}
diff --git a/cli/daemon/dash/ai/overlay.go b/cli/daemon/dash/ai/overlay.go
new file mode 100644
index 0000000000..8976445522
--- /dev/null
+++ b/cli/daemon/dash/ai/overlay.go
@@ -0,0 +1,347 @@
+package ai
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/exp/maps"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/pkg/errinsrc"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/idents"
+ "encr.dev/pkg/paths"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+ "encr.dev/v2/internals/parsectx"
+ "encr.dev/v2/internals/perr"
+)
+
+// servicePaths is a helper struct to manage mapping between service names, pkg paths and filepaths
+// It's created by parsing the metadata of the app
+type servicePaths struct {
+ relPaths map[string]paths.RelSlash
+ root paths.FS
+ module paths.Mod
+}
+
+func (s *servicePaths) IsNew(svc string) bool {
+ _, ok := s.relPaths[svc]
+ return !ok
+}
+
+func (s *servicePaths) Add(svc string, path paths.RelSlash) *servicePaths {
+ s.relPaths[svc] = path
+ return s
+}
+
+func (s *servicePaths) PkgPath(svc string) paths.Pkg {
+ rel := s.RelPath(svc)
+ return s.module.Pkg(rel)
+}
+
+func (s *servicePaths) FullPath(svc string) paths.FS {
+ rel := s.RelPath(svc)
+ return s.root.JoinSlash(rel)
+}
+
+func (s *servicePaths) RelPath(svc string) paths.RelSlash {
+ pkgName, ok := s.relPaths[svc]
+ if !ok {
+ pkgName = paths.RelSlash(strings.ToLower(svc))
+ }
+ return pkgName
+}
+
+func (s *servicePaths) FileName(svc, name string) (paths.FS, error) {
+ relPath, err := s.RelFileName(svc, name)
+ if err != nil {
+ return "", err
+ }
+ return s.root.JoinSlash(relPath), nil
+}
+
+func (s *servicePaths) RelFileName(svc, name string) (paths.RelSlash, error) {
+ pkgPath := s.FullPath(svc)
+ name = idents.Convert(name, idents.SnakeCase)
+ fileName := name + ".go"
+ var i int
+ for {
+ fspath := pkgPath.Join(fileName)
+ if _, err := os.Stat(fspath.ToIO()); os.IsNotExist(err) {
+ return s.RelPath(svc).Join(fileName), nil
+ } else if err != nil {
+ return "", err
+ }
+ i++
+ fileName = fmt.Sprintf("%s_%d.go", name, i)
+ }
+}
+
+func newServicePaths(app *apps.Instance) (*servicePaths, error) {
+ md, err := app.CachedMetadata()
+ if err != nil {
+ return nil, err
+ }
+ pkgRelPath := fns.ToMap(md.Pkgs, func(p *meta.Package) string { return p.RelPath })
+ svcPaths := &servicePaths{
+ relPaths: map[string]paths.RelSlash{},
+ root: paths.FS(app.Root()),
+ module: paths.Mod(md.ModulePath),
+ }
+ for _, svc := range md.Svcs {
+ if pkgRelPath[svc.RelPath] != nil {
+ svcPaths.Add(svc.Name, paths.RelSlash(pkgRelPath[svc.RelPath].RelPath))
+ }
+ }
+ return svcPaths, nil
+}
+
+// An overlay is a virtual file that is used to store the source code of an endpoint or types
+// It automatically generates a header with pkg name and imports.
+// It implements os.FileInfo and os.DirEntry interfaces
+type overlay struct {
+ path paths.FS
+ endpoint *Endpoint
+ service *Service
+ codeType CodeType
+ content []byte
+ headerOffset token.Position
+}
+
+func (o *overlay) Type() os.FileMode {
+ return o.Mode()
+}
+
+func (o *overlay) Info() (os.FileInfo, error) {
+ return o, nil
+}
+
+func (o *overlay) Name() string {
+ return o.path.Base()
+}
+
+func (o *overlay) Size() int64 {
+ return int64(len(o.content))
+}
+
+func (o *overlay) Mode() os.FileMode {
+ return os.ModePerm
+}
+
+func (o *overlay) ModTime() time.Time {
+ return time.Now()
+}
+
+func (o *overlay) IsDir() bool {
+ return false
+}
+
+func (o *overlay) Sys() any {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (o *overlay) Stat() (os.FileInfo, error) {
+ return o, nil
+}
+
+func (o *overlay) Reader() io.ReadCloser {
+ return &overlayReader{o, bytes.NewReader(o.content)}
+}
+
+// overlayReader is a wrapper around the overlay to implement io.ReadCloser
+type overlayReader struct {
+ *overlay
+ *bytes.Reader
+}
+
+func (o *overlayReader) Close() error { return nil }
+
+var (
+ _ os.FileInfo = (*overlay)(nil)
+ _ os.DirEntry = (*overlay)(nil)
+)
+
+func newOverlays(app *apps.Instance, overwrite bool, services ...Service) (*overlays, error) {
+ svcPaths, err := newServicePaths(app)
+ if err != nil {
+ return nil, err
+ }
+ o := &overlays{
+ items: map[paths.FS]*overlay{},
+ paths: svcPaths,
+ }
+ for _, s := range services {
+ for _, e := range s.Endpoints {
+ if overwrite {
+ e.TypeSource = ""
+ e.EndpointSource = ""
+ }
+ if err := o.add(s, e); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return o, nil
+}
+
+// overlays is a collection of virtual files that are used to store the source code of endpoints and types
+// in memory. It's modelled as a replacement for the os package.
+type overlays struct {
+ items map[paths.FS]*overlay
+ paths *servicePaths
+}
+
+func (o *overlays) Stat(name string) (os.FileInfo, error) {
+ f, ok := o.items[paths.FS(name)]
+ if !ok {
+ // else return the filesystem file
+ return os.Stat(name)
+ }
+ return f, nil
+}
+
+func (o *overlays) ReadDir(name string) ([]os.DirEntry, error) {
+ entries := map[string]os.DirEntry{}
+ osFiles, err := os.ReadDir(name)
+ for _, f := range osFiles {
+ entries[f.Name()] = f
+ }
+ dir := paths.FS(name)
+ for _, info := range o.items {
+ if dir == info.path.Dir() {
+ entries[info.path.Base()] = info
+ }
+ }
+ if len(entries) == 0 && err != nil {
+ return nil, err
+ }
+ return maps.Values(entries), nil
+}
+
+func (o *overlays) PkgOverlay() map[string][]byte {
+ files := map[string][]byte{}
+ for f, info := range o.items {
+ files[f.ToIO()] = info.content
+ }
+ return files
+}
+
+func (o *overlays) ReadFile(name string) ([]byte, error) {
+ f, ok := o.items[paths.FS(name)]
+ if !ok {
+ // else return the filesystem file
+ return os.ReadFile(name)
+ }
+ return f.content, nil
+}
+
+func (o *overlays) Open(name string) (io.ReadCloser, error) {
+ f, ok := o.items[paths.FS(name)]
+ if !ok {
+ // else return the filesystem file
+ return os.Open(name)
+ }
+ return f.Reader(), nil
+}
+
+func (o *overlays) pkgPaths() []paths.Pkg {
+ pkgs := map[paths.Pkg]struct{}{}
+ for _, info := range o.items {
+ pkgs[o.paths.PkgPath(info.service.Name)] = struct{}{}
+ }
+ return maps.Keys(pkgs)
+}
+
+func (o *overlays) get(p paths.FS) (*overlay, bool) {
+ rtn, ok := o.items[p]
+ return rtn, ok
+}
+
+// validationErrors converts a perr.List into a slice of ValidationErrors
+func (o *overlays) validationErrors(list *perr.List) []ValidationError {
+ var rtn []ValidationError
+ for i := 0; i < list.Len(); i++ {
+ err := list.At(i)
+ rtn = append(rtn, o.validationError(err)...)
+ }
+ return rtn
+}
+
+// validationError translates errinsrc.ErrInSrc into a ValidationError which is a simplified error
+// used for displaying errors in the dashboard
+func (o *overlays) validationError(err *errinsrc.ErrInSrc) []ValidationError {
+ if err.Params.Locations == nil {
+ return []ValidationError{{
+ Message: err.Params.Summary,
+ }}
+ }
+ var rtn []ValidationError
+ for _, loc := range err.Params.Locations {
+ o, ok := o.get(paths.FS(loc.File.FullPath))
+ if !ok {
+ rtn = append(rtn, ValidationError{
+ Message: err.Params.Summary,
+ })
+ continue
+ }
+ rtn = append(rtn, ValidationError{
+ Service: o.service.ID,
+ Endpoint: o.endpoint.ID,
+ CodeType: o.codeType,
+ Message: err.Params.Summary,
+ Start: &Pos{
+ Line: loc.Start.Line - o.headerOffset.Line,
+ Column: loc.Start.Col - o.headerOffset.Column,
+ },
+ End: &Pos{
+ Line: loc.End.Line - o.headerOffset.Line,
+ Column: loc.End.Col - o.headerOffset.Column,
+ },
+ })
+ }
+ return rtn
+}
+
+// add creates new overlays for an endpoint and its types.
+// We create separate overlays for each endpoint and its types to allow for easier parsing and code generation.
+func (o *overlays) add(s Service, e *Endpoint) error {
+ p, err := o.paths.FileName(s.Name, e.Name+"_func")
+ if err != nil {
+ return err
+ }
+ offset, content := toSrcFile(p, s.Name, e.EndpointSource)
+ e.EndpointSource = string(content[offset.Offset:])
+ o.items[p] = &overlay{
+ path: p,
+ endpoint: e,
+ service: &s,
+ codeType: CodeTypeEndpoint,
+ content: content,
+ headerOffset: offset,
+ }
+ p, err = o.paths.FileName(s.Name, e.Name+"_types")
+ if err != nil {
+ return err
+ }
+ offset, content = toSrcFile(p, s.Name, e.TypeSource)
+ e.TypeSource = string(content[offset.Offset:])
+ o.items[p] = &overlay{
+ path: p,
+ endpoint: e,
+ service: &s,
+ codeType: CodeTypeTypes,
+ content: content,
+ headerOffset: offset,
+ }
+ return nil
+}
+
+var (
+ _ parsectx.OverlaidOSFS = (*overlays)(nil)
+)
diff --git a/cli/daemon/dash/ai/parser.go b/cli/daemon/dash/ai/parser.go
new file mode 100644
index 0000000000..331a0bd50a
--- /dev/null
+++ b/cli/daemon/dash/ai/parser.go
@@ -0,0 +1,315 @@
+package ai
+
+import (
+ "context"
+ "go/ast"
+ "go/token"
+ "runtime"
+ "slices"
+ "strings"
+
+ "github.com/rs/zerolog"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/internal/env"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/paths"
+ "encr.dev/v2/internals/parsectx"
+ "encr.dev/v2/internals/perr"
+ "encr.dev/v2/internals/pkginfo"
+ "encr.dev/v2/internals/schema"
+ "encr.dev/v2/parser/apis"
+ "encr.dev/v2/parser/apis/api"
+ "encr.dev/v2/parser/apis/api/apienc"
+ "encr.dev/v2/parser/resource/resourceparser"
+)
+
+// parseErrorList parses a list of errors docs from a doc string.
+func parseErrorList(doc string) (string, []*Error) {
+ doc, errs := parseDocList(doc, ErrDocPrefix)
+ return doc, fns.Map(errs, func(e docListItem) *Error {
+ return &Error{
+ Code: e.Key,
+ Doc: e.Doc,
+ }
+ })
+}
+
+// parsePathList parses a list of path docs from a doc string.
+func parsePathList(doc string) (string, map[string]string) {
+ doc, docs := parseDocList(doc, PathDocPrefix)
+ rtn := map[string]string{}
+ for _, d := range docs {
+ rtn[d.Key] = d.Doc
+ }
+ return doc, rtn
+}
+
+// parseDocList parses a list of key-value pairs from a doc string.
+// e.g.
+//
+// Errors:
+// - NotFound: The requested resource was not found.
+// - InvalidArgument: The request had invalid arguments.
+func parseDocList(doc, section string) (string, []docListItem) {
+ var errs []docListItem
+ lines := strings.Split(doc, "\n")
+ start := -1
+ end := -1
+ for i, line := range lines {
+ end = i
+ if strings.HasPrefix(strings.TrimSpace(line), section+":") {
+ start = i
+
+ } else if start == -1 {
+ continue
+ } else if len(line) > 2 {
+ switch strings.TrimSpace(line[:2]) {
+ case "-", "":
+ default:
+ end = i - 1
+ break
+ }
+ }
+ lines[i] = strings.TrimSpace(line)
+ if line == "" && lines[i-1] == "" {
+ break
+ }
+ }
+ if start == -1 {
+ return doc, errs
+ }
+
+ for _, line := range lines[start+1 : end+1] {
+ key, doc, ok := strings.Cut(line, ":")
+ key = strings.TrimPrefix(key, "-")
+ key = strings.TrimSpace(key)
+ if ok {
+ errs = append(errs, docListItem{
+ Key: key,
+ Doc: strings.TrimSpace(doc),
+ })
+ } else if len(errs) > 0 && line != "" {
+ errs[len(errs)-1].Doc += "\n" + line
+ }
+ }
+ return strings.Join(lines[:start], "\n"), errs
+}
+
+// docListItem represents a key-value pair in a doc list.
+type docListItem struct {
+ Key string
+ Doc string
+}
+
+// deref returns the underlying type of a pointer type.
+func deref(p schema.Type) schema.Type {
+ for {
+ if pt, ok := p.(schema.PointerType); ok {
+ p = pt.Elem
+ } else {
+ return p
+ }
+ }
+}
+
+// parseCode updates the structured EndpointInput data based on the code in
+// EndpointInput.TypeSource and EndpointInput.EndpointSource fields.
+func parseCode(ctx context.Context, app *apps.Instance, services []Service) (rtn *SyncResult, err error) {
+ // assamble an overlay with all our newly defined endpoints
+ overlays, err := newOverlays(app, false, services...)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := token.NewFileSet()
+ errs := perr.NewList(ctx, fs, overlays.ReadFile)
+ rootDir := paths.RootedFSPath(app.Root(), ".")
+ pc := &parsectx.Context{
+ Ctx: ctx,
+ Log: zerolog.Logger{},
+ Build: parsectx.BuildInfo{
+ Experiments: nil,
+ GOROOT: paths.RootedFSPath(env.EncoreGoRoot(), "."),
+ GOARCH: runtime.GOARCH,
+ GOOS: runtime.GOOS,
+ },
+ MainModuleDir: rootDir,
+ FS: fs,
+ ParseTests: false,
+ Errs: errs,
+ Overlay: overlays,
+ }
+
+ // Catch parser bailouts and convert them to ValidationErrors
+ defer func() {
+ perr.CatchBailout(recover())
+ if rtn == nil {
+ rtn = &SyncResult{
+ Services: services,
+ }
+ }
+ rtn.Errors = overlays.validationErrors(errs)
+ }()
+
+ // Load overlay packages using the encore loader
+ loader := pkginfo.New(pc)
+ pkgs := map[paths.Pkg]*pkginfo.Package{}
+ for _, pkgPath := range overlays.pkgPaths() {
+ pkg, ok := loader.LoadPkg(token.NoPos, pkgPath)
+ if ok {
+ pkgs[pkgPath] = pkg
+ }
+ }
+
+ // Create a schema parser to help us parse the types
+ schemaParser := schema.NewParser(pc, loader)
+
+ for _, pkg := range pkgs {
+ // Use the API parser to parser the endpoints for each overlaid package
+ pass := &resourceparser.Pass{
+ Context: pc,
+ SchemaParser: schemaParser,
+ Pkg: pkg,
+ }
+ apis.Parser.Run(pass)
+ for _, r := range pass.Resources() {
+ switch r := r.(type) {
+ case *api.Endpoint:
+ // We're only interested in endpoints that are in our overlays
+ overlay, ok := overlays.get(r.File.FSPath)
+ if !ok {
+ continue
+ }
+ e := overlay.endpoint
+
+ pathDocs := map[string]string{}
+ e.Doc, e.Errors = parseErrorList(r.Doc)
+ e.Doc, pathDocs = parsePathList(e.Doc)
+ e.Name = r.Name
+ e.Method = r.HTTPMethods[0]
+ e.Visibility = VisibilityType(r.Access)
+ e.Language = "GO"
+ e.Path = toPathSegments(r.Path, pathDocs)
+
+ // Clear the types as we will reparse them
+ e.Types = []*Type{}
+ if nr, ok := deref(r.Request).(schema.NamedType); ok {
+ e.RequestType = nr.String()
+ // If the request type is in the overlays, we should parse it and
+ // add it to the endpoint associated with the overlay
+ ov, ok := overlays.get(nr.DeclInfo.File.FSPath)
+ if len(r.RequestEncoding()) > 0 && ok {
+ e = ov.endpoint
+ e.Types = append(e.Types, &Type{
+ Name: nr.String(),
+ Doc: strings.TrimSpace(nr.DeclInfo.Doc),
+ Fields: fns.Map(r.RequestEncoding()[0].AllParameters(), func(f *apienc.ParameterEncoding) *TypeField {
+ return &TypeField{
+ Name: f.SrcName,
+ WireName: f.WireName,
+ Location: f.Location,
+ Type: f.Type.String(),
+ Doc: strings.TrimSpace(f.Doc),
+ }
+ }),
+ })
+ }
+ }
+ if nr, ok := deref(r.Response).(schema.NamedType); ok {
+ e.ResponseType = nr.String()
+ // If the response type is in the overlays, we should parse it and
+ // add it to the endpoint associated with the overlay
+ ov, ok := overlays.get(nr.DeclInfo.File.FSPath)
+ if r.ResponseEncoding() != nil && ok {
+ e = ov.endpoint
+ e.Types = append(e.Types, &Type{
+ Name: nr.String(),
+ Doc: strings.TrimSpace(nr.DeclInfo.Doc),
+ Fields: fns.Map(r.ResponseEncoding().AllParameters(), func(f *apienc.ParameterEncoding) *TypeField {
+ return &TypeField{
+ Name: f.SrcName,
+ WireName: f.WireName,
+ Location: f.Location,
+ Type: f.Type.String(),
+ Doc: strings.TrimSpace(f.Doc),
+ }
+ }),
+ })
+ }
+ }
+ }
+ }
+ // Parse types which are in the overlays but not used in request/response
+ for _, file := range pkg.Files {
+ ast.Inspect(file.AST(), func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.GenDecl:
+ // We're only interested in type declarations
+ if node.Tok != token.TYPE {
+ return true
+ }
+ for _, spec := range node.Specs {
+ d := spec.(*ast.TypeSpec)
+ // If the type is not defined in our overlays, skip it.
+ olay, ok := overlays.get(file.FSPath)
+ if !ok {
+ continue
+ }
+
+ // If it's not a struct type, skip it.
+ s, ok := schemaParser.ParseType(file, d.Type).(schema.StructType)
+ if !ok {
+ continue
+ }
+
+ e := olay.endpoint
+ // If the type has already been parsed, skip it.
+ if slices.ContainsFunc(e.Types, func(t *Type) bool { return t.Name == d.Name.Name }) {
+ continue
+ }
+
+ // Otherwise we should add it
+ e.Types = append(e.Types, &Type{
+ Name: d.Name.Name,
+ Doc: docText(node.Doc),
+ Fields: fns.MapAndFilter(s.Fields, parseTypeField),
+ })
+ }
+ }
+ return true
+ })
+ }
+ }
+ return &SyncResult{
+ Services: services,
+ }, nil
+}
+
+// parseTypeField is a helper function to parse a schema field into a TypeField.
+func parseTypeField(f schema.StructField) (*TypeField, bool) {
+ name, ok := f.Name.Get()
+ if !ok {
+ return nil, false
+ }
+ // Fields which are parsed by this functions are not a request or response type,
+ // so we can assume the wire name is the json tag name.
+ wireName := name
+ if tag, err := f.Tag.Get("json"); err == nil {
+ wireName = tag.Name
+ }
+ return &TypeField{
+ Name: name,
+ Type: f.Type.String(),
+ Doc: f.Doc,
+ WireName: wireName,
+ }, true
+}
+
+// helper function to extract the text from a comment node or "" if nil
+func docText(c *ast.CommentGroup) string {
+ if c == nil {
+ return ""
+ }
+ return strings.TrimSpace(c.Text())
+}
diff --git a/cli/daemon/dash/ai/sql.go b/cli/daemon/dash/ai/sql.go
new file mode 100644
index 0000000000..48ac26485d
--- /dev/null
+++ b/cli/daemon/dash/ai/sql.go
@@ -0,0 +1,28 @@
+package ai
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/golang/protobuf/proto"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/proto/encore/daemon"
+)
+
+// ParseSQLSchema uses SQLC to parse the migration files for an encore database and returns
+// the parsed catalog
+func ParseSQLSchema(app *apps.Instance, schema string) (*daemon.SQLCPlugin_Catalog, error) {
+ schemaPath := filepath.Join(app.Root(), schema)
+ cmd := exec.Command(os.Args[0], "generate-sql-schema", "--proto", schemaPath)
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ var req daemon.SQLCPlugin_GenerateRequest
+ if err := proto.Unmarshal(output, &req); err != nil {
+ return nil, err
+ }
+ return req.Catalog, nil
+}
diff --git a/cli/daemon/dash/ai/types.go b/cli/daemon/dash/ai/types.go
new file mode 100644
index 0000000000..8b0ea15568
--- /dev/null
+++ b/cli/daemon/dash/ai/types.go
@@ -0,0 +1,248 @@
+package ai
+
+import (
+ "encr.dev/v2/parser/apis/api/apienc"
+)
+
+type VisibilityType string
+
+const (
+ VisibilityTypePublic VisibilityType = "public"
+ VisibilityTypePrivate VisibilityType = "private"
+ VisibilityTypeAuth VisibilityType = "auth"
+)
+
+type SegmentType string
+
+const (
+ SegmentTypeLiteral SegmentType = "literal"
+ SegmentTypeParam SegmentType = "param"
+ SegmentTypeWildcard SegmentType = "wildcard"
+ SegmentTypeFallback SegmentType = "fallback"
+)
+
+type SegmentValueType string
+
+const SegmentValueTypeString SegmentValueType = "string"
+
+type PathSegments []PathSegment
+
+type PathSegment struct {
+ Type SegmentType `json:"type,omitempty"`
+ Value *string `json:"value,omitempty"`
+ ValueType *SegmentValueType `json:"valueType,omitempty"`
+ Doc string `graphql:"-" json:"doc,omitempty"`
+}
+
+func (p PathSegment) DocItem() (string, string) {
+ return *p.Value, p.Doc
+}
+
+type Endpoint struct {
+ ID string `json:"id,omitempty"`
+ Name string `json:"name"`
+ Doc string `json:"doc"`
+ Method string `json:"method"`
+ Visibility VisibilityType `json:"visibility"`
+ Path PathSegments `json:"path"`
+ RequestType string `json:"requestType,omitempty"`
+ ResponseType string `json:"responseType,omitempty"`
+ Errors []*Error `json:"errors,omitempty"`
+ Types []*Type `json:"types,omitempty"`
+ Language string `json:"language,omitempty"`
+ TypeSource string `json:"typeSource,omitempty"`
+ EndpointSource string `json:"endpointSource,omitempty"`
+}
+
+func (s *Endpoint) Auth() bool {
+ return s.Visibility == VisibilityTypeAuth
+}
+
+// GraphQL scrubs data that is not needed for the graphql client
+func (s *Endpoint) GraphQL() *Endpoint {
+ s.ID = ""
+ s.EndpointSource = ""
+ s.TypeSource = ""
+ s.Types = nil
+ s.Language = ""
+ for i, _ := range s.Path {
+ s.Path[i].Doc = ""
+ }
+ return s
+}
+
+type Type struct {
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Fields []*TypeField `json:"fields,omitempty"`
+}
+
+type Service struct {
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Endpoints []*Endpoint `json:"endpoints,omitempty"`
+}
+
+func (s Service) GetName() string {
+ return s.Name
+}
+
+func (s Service) GetEndpoints() []*Endpoint {
+ return s.Endpoints
+}
+
+// ServiceInput is the graphql input type for our queries
+// the graphQL client we use requires the type name to match the
+// graphql type
+type ServiceInput Service
+
+// GraphQL scrubs data that is not needed for the graphql client
+func (s Service) GraphQL() ServiceInput {
+ s.ID = ""
+ for _, e := range s.Endpoints {
+ e.GraphQL()
+ }
+ return ServiceInput(s)
+}
+
+type BaseAIUpdateType struct {
+ Type string `graphql:"__typename" json:"type"`
+}
+
+func (b BaseAIUpdateType) IsAIUpdateType() {}
+
+type AIUpdateType interface {
+ IsAIUpdateType()
+}
+
+type AIStreamUpdate = Result[AIUpdateType]
+
+func ptr[T any](val T) *T {
+ return &val
+}
+
+type Result[T any] struct {
+ Value T
+ Finished *bool
+ Error *string
+}
+
+type EndpointUpdate struct {
+ BaseAIUpdateType
+ Service string `json:"service,omitempty"`
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Method string `json:"method,omitempty"`
+ Visibility VisibilityType `json:"visibility,omitempty"`
+ Path []PathSegment `json:"path,omitempty"`
+ RequestType string `json:"requestType,omitempty"`
+ ResponseType string `json:"responseType,omitempty"`
+ Errors []string `json:"errors,omitempty"`
+}
+
+type ServiceUpdate struct {
+ BaseAIUpdateType
+ Name string `json:"name,omitempty"`
+ Doc string `json:"doc,omitempty"`
+}
+
+type TypeUpdate struct {
+ BaseAIUpdateType
+ Service string `json:"service,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ Name string `json:"name,omitempty"`
+ Doc string `graphql:"mdoc: doc" json:"doc,omitempty"`
+}
+
+type AISessionID string
+
+type SessionUpdate struct {
+ BaseAIUpdateType
+ Id AISessionID
+}
+
+type TitleUpdate struct {
+ BaseAIUpdateType
+ Title string
+}
+
+type LocalEndpointUpdate struct {
+ Type string `json:"type,omitempty"`
+ Service string `json:"service,omitempty"`
+ Endpoint *Endpoint `json:"endpoint,omitempty"`
+}
+
+type TypeField struct {
+ Name string `json:"name,omitempty"`
+ WireName string `json:"wireName,omitempty"`
+ Type string `json:"type,omitempty"`
+ Location apienc.WireLoc `json:"location,omitempty"`
+ Doc string `json:"doc,omitempty"`
+}
+
+type TypeFieldUpdate struct {
+ BaseAIUpdateType
+ Service string `json:"service,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ Struct string `json:"struct,omitempty"`
+ Name string `json:"name,omitempty"`
+ Type string `json:"type,omitempty"`
+ Doc string `graphql:"mdoc: doc" json:"doc,omitempty"`
+}
+
+type Error struct {
+ Code string `json:"code,omitempty"`
+ Doc string `json:"doc,omitempty"`
+}
+
+func (e Error) DocItem() (string, string) {
+ return e.Code, e.Doc
+}
+
+func (e Error) String() string {
+ return e.Code
+}
+
+type ErrorUpdate struct {
+ BaseAIUpdateType
+ Code string `json:"code,omitempty"`
+ Doc string `json:"doc,omitempty"`
+ Service string `json:"service,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+}
+
+type PathParamUpdate struct {
+ BaseAIUpdateType
+ Service string `json:"service,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ Param string `json:"param,omitempty"`
+ Doc string `json:"doc,omitempty"`
+}
+
+type SyncResult struct {
+ Services []Service `json:"services"`
+ Errors []ValidationError `json:"errors"`
+}
+
+// ValidationError is a simplified ErrInSrc to return to the dashboard
+type ValidationError struct {
+ Service string `json:"service"`
+ Endpoint string `json:"endpoint"`
+ CodeType CodeType `json:"codeType"`
+ Message string `json:"message"`
+ Start *Pos `json:"start,omitempty"`
+ End *Pos `json:"end,omitempty"`
+}
+
+type CodeType string
+
+const (
+ CodeTypeEndpoint CodeType = "endpoint"
+ CodeTypeTypes CodeType = "types"
+)
+
+type Pos struct {
+ Line int `json:"line"`
+ Column int `json:"column"`
+}
diff --git a/cli/daemon/dash/ai/types_test.go b/cli/daemon/dash/ai/types_test.go
new file mode 100644
index 0000000000..cb513853db
--- /dev/null
+++ b/cli/daemon/dash/ai/types_test.go
@@ -0,0 +1,39 @@
+package ai
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestWrapDoc(t *testing.T) {
+ var wrapTests = []struct {
+ width int
+ string string
+ }{
+ {1, "Lorem ipsum dolor sit amet"},
+ {80, "Lorem ipsum dolor sit amet"},
+ {80, "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."},
+ {80, "Lorem Loremipsumdolorsitamet,consecteturadipiscingelit,seddoeiusmodtemporincididuntutlaboreetdoloremagna"},
+ {30, "Loremipsumdolorsitamet,consecteturadipiscingelit,seddoeiusmodtemporincididuntutlaboreetdoloremagna"},
+ {80, ""},
+ {80, "a\nb\nc\nd"},
+ }
+ for _, test := range wrapTests {
+ t.Run(fmt.Sprintf("WrapDoc(%d, %s)", test.width, test.string), func(t *testing.T) {
+ result := wrapDoc(test.string, test.width)
+ lines := strings.Split(result, "\n")
+ for i, line := range lines {
+ if len(line) > test.width && strings.Contains(line, " ") {
+ t.Errorf("Line too long: %s", line)
+ }
+ if i+1 < len(lines) {
+ nextWord, _, _ := strings.Cut(lines[i+1], " ")
+ if len(line)+len(nextWord) < test.width {
+ t.Errorf("Line too short: %s", line)
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/cli/daemon/dash/apiproxy/apiproxy.go b/cli/daemon/dash/apiproxy/apiproxy.go
new file mode 100644
index 0000000000..f17232bff7
--- /dev/null
+++ b/cli/daemon/dash/apiproxy/apiproxy.go
@@ -0,0 +1,45 @@
+package apiproxy
+
+import (
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "runtime"
+
+ "github.com/cockroachdb/errors"
+ "golang.org/x/oauth2"
+
+ "encr.dev/internal/conf"
+ "encr.dev/internal/version"
+)
+
+func New(targetURL string) (*httputil.ReverseProxy, error) {
+ target, err := url.Parse(targetURL)
+ if err != nil {
+ return nil, errors.Wrap(err, "parse target url")
+ }
+
+ proxy := &httputil.ReverseProxy{
+ Transport: &oauth2.Transport{
+ Base: http.DefaultTransport,
+ Source: oauth2.ReuseTokenSource(nil, conf.DefaultTokenSource),
+ },
+ ErrorHandler: func(writer http.ResponseWriter, request *http.Request, err error) {
+ if errors.Is(err, conf.ErrNotLoggedIn) {
+ writer.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ writer.WriteHeader(http.StatusBadGateway)
+
+ },
+ Rewrite: func(r *httputil.ProxyRequest) {
+ r.Out.URL = target
+ r.Out.Header.Set("User-Agent", "EncoreCLI/"+version.Version)
+ r.Out.Header.Set("X-Encore-Dev-Dash", "true")
+ r.Out.Header.Set("X-Encore-Version", version.Version)
+ r.Out.Header.Set("X-Encore-GOOS", runtime.GOOS)
+ r.Out.Header.Set("X-Encore-GOARCH", runtime.GOARCH)
+ },
+ }
+ return proxy, nil
+}
diff --git a/cli/daemon/dash/dash.go b/cli/daemon/dash/dash.go
index d8ae14dae6..bff59e1c4d 100644
--- a/cli/daemon/dash/dash.go
+++ b/cli/daemon/dash/dash.go
@@ -2,28 +2,87 @@
package dash
import (
- "bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
- "net/http"
"path/filepath"
+ "slices"
+ "strings"
+ "sync"
+ "time"
- "encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/runtime/trace"
- "encr.dev/cli/internal/jsonrpc2"
"github.com/golang/protobuf/jsonpb"
"github.com/rs/zerolog/log"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/dash/ai"
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run"
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/cli/internal/browser"
+ "encr.dev/cli/internal/jsonrpc2"
+ "encr.dev/cli/internal/onboarding"
+ "encr.dev/cli/internal/telemetry"
+ "encr.dev/internal/version"
+ "encr.dev/parser/encoding"
+ "encr.dev/pkg/editors"
+ "encr.dev/pkg/errlist"
+ "encr.dev/pkg/jsonext"
+ tracepb2 "encr.dev/proto/encore/engine/trace2"
+ meta "encr.dev/proto/encore/parser/meta/v1"
)
type handler struct {
- rpc jsonrpc2.Conn
- run *run.Manager
- tr *trace.Store
+ rpc jsonrpc2.Conn
+ apps *apps.Manager
+ run *run.Manager
+ ns *namespace.Manager
+ ai *ai.Manager
+ tr trace2.Store
+}
+
+func (h *handler) GetMeta(appID string) (*meta.Data, error) {
+ runInstance := h.run.FindRunByAppID(appID)
+ var md *meta.Data
+ if runInstance != nil && runInstance.ProcGroup() != nil {
+ md = runInstance.ProcGroup().Meta
+ } else {
+ app, err := h.apps.FindLatestByPlatformOrLocalID(appID)
+ if err != nil {
+ return nil, err
+ }
+ md, err = app.CachedMetadata()
+ if err != nil {
+ return nil, err
+ } else if md == nil {
+ return nil, err
+ }
+ }
+ return md, nil
+}
+
+func (h *handler) GetNamespace(ctx context.Context, appID string) (*namespace.Namespace, error) {
+ runInstance := h.run.FindRunByAppID(appID)
+ if runInstance != nil && runInstance.ProcGroup() != nil {
+ return runInstance.NS, nil
+ } else {
+ app, err := h.apps.FindLatestByPlatformOrLocalID(appID)
+ if err != nil {
+ return nil, err
+ }
+ ns, err := h.ns.GetActive(ctx, app)
+ if err != nil {
+ return nil, err
+ }
+ return ns, nil
+ }
}
func (h *handler) Handle(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
+ reply = makeProtoReplier(reply)
+
unmarshal := func(dst interface{}) error {
if r.Params() == nil {
return fmt.Errorf("missing params")
@@ -32,46 +91,210 @@ func (h *handler) Handle(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2
}
switch r.Method() {
+ case "db/query":
+ var p QueryRequest
+ if err := unmarshal(&p); err != nil {
+ return reply(ctx, nil, err)
+ }
+ res, err := h.Query(ctx, p)
+ return reply(ctx, res, err)
+ case "db/transaction":
+ var p TransactionRequest
+ if err := unmarshal(&p); err != nil {
+ return reply(ctx, nil, err)
+ }
+ res, err := h.Transaction(ctx, p)
+ return reply(ctx, res, err)
+ case "onboarding/get":
+ state, err := onboarding.Load()
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ resp := map[string]time.Time{}
+ for key, val := range state.EventMap {
+ if val.IsSet() {
+ resp[key] = val.UTC()
+ }
+ }
+ return reply(ctx, resp, nil)
+ case "onboarding/set":
+ type params struct {
+ Properties []string `json:"properties"`
+ }
+ var p params
+ if err := unmarshal(&p); err != nil {
+ return reply(ctx, nil, err)
+ }
+ state, err := onboarding.Load()
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ for _, prop := range p.Properties {
+ state.Property(prop).Set()
+ }
+ err = state.Write()
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ return reply(ctx, nil, nil)
+ case "telemetry":
+ type params struct {
+ Event string `json:"event"`
+ Properties map[string]interface{} `json:"properties"`
+ Once bool `json:"once,omitempty"`
+ }
+ var p params
+ if err := unmarshal(&p); err != nil {
+ return reply(ctx, nil, err)
+ }
+ if p.Once {
+ telemetry.SendOnce(p.Event, p.Properties)
+ } else {
+ telemetry.Send(p.Event, p.Properties)
+ }
+ return reply(ctx, "ok", nil)
+ case "version":
+ type versionResp struct {
+ Version string `json:"version"`
+ Channel string `json:"channel"`
+ }
+
+ rtn := versionResp{
+ Version: version.Version,
+ Channel: string(version.Channel),
+ }
+
+ return reply(ctx, rtn, nil)
+
case "list-apps":
type app struct {
- ID string `json:"id"`
- Name string `json:"name"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ AppRoot string `json:"app_root"`
+ Offline bool `json:"offline,omitempty"`
}
- runs := h.run.ListRuns()
+
apps := []app{} // prevent marshalling as null
- seen := make(map[string]bool)
- for _, r := range runs {
- id := r.AppID
- name := r.AppSlug
- if name == "" {
- name = filepath.Base(r.Root)
+
+ // Load all the apps we know about
+ allApp, err := h.apps.List()
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ for _, instance := range allApp {
+ data := app{
+ ID: instance.PlatformOrLocalID(),
+ Name: instance.Name(),
+ AppRoot: instance.Root(),
+ Offline: true,
}
- if !seen[id] {
- seen[id] = true
- apps = append(apps, app{ID: id, Name: name})
+
+ if run := h.run.FindRunByAppID(instance.PlatformOrLocalID()); run != nil {
+ data.Offline = false
}
+
+ apps = append(apps, data)
}
+
+ // Sort the apps by offline status, then by name
+ slices.SortStableFunc(apps, func(a, b app) int {
+ if a.Offline == b.Offline {
+ return strings.Compare(a.Name, b.Name)
+ }
+ if a.Offline {
+ return 1
+ }
+ return -1
+ })
+
return reply(ctx, apps, nil)
+ case "traces/clear":
+ telemetry.Send("traces.clear")
+ var params struct {
+ AppID string `json:"app_id"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ err := h.tr.Clear(ctx, params.AppID)
+ return reply(ctx, "ok", err)
+ case "traces/list":
+ telemetry.Send("traces.list")
+ var params struct {
+ AppID string `json:"app_id"`
+ MessageID string `json:"message_id"`
+ TestTraces *bool `json:"test_traces,omitempty"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+
+ query := &trace2.Query{
+ AppID: params.AppID,
+ TestFilter: params.TestTraces,
+ MessageID: params.MessageID,
+ Limit: 100,
+ }
+ var list []*tracepb2.SpanSummary
+ iter := func(s *tracepb2.SpanSummary) bool {
+ list = append(list, s)
+ return true
+ }
+ err := h.tr.List(ctx, query, iter)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: could not list traces")
+ }
+ return reply(ctx, list, err)
+
+ case "traces/get":
+ telemetry.Send("traces.get")
+ var params struct {
+ AppID string `json:"app_id"`
+ TraceID string `json:"trace_id"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
- case "list-traces":
+ var events []*tracepb2.TraceEvent
+ iter := func(ev *tracepb2.TraceEvent) bool {
+ events = append(events, ev)
+ return true
+ }
+ err := h.tr.Get(ctx, params.AppID, params.TraceID, iter)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: could not list trace events")
+ }
+ return reply(ctx, events, err)
+
+ case "status":
var params struct {
AppID string
}
if err := unmarshal(¶ms); err != nil {
return reply(ctx, nil, err)
}
- traces := h.tr.List(params.AppID)
- tr := make([]*Trace, len(traces))
- for i, t := range traces {
- tt, err := TransformTrace(t)
- if err != nil {
+
+ // Find the latest app by platform ID or local ID.
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ if errors.Is(err, apps.ErrNotFound) {
+ return reply(ctx, map[string]interface{}{"running": false}, nil)
+ } else {
return reply(ctx, nil, err)
}
- tr[i] = tt
}
- return reply(ctx, tr, nil)
- case "status":
+ // Now find the running instance(s)
+ runInstance := h.run.FindRunByAppID(params.AppID)
+ status, err := buildAppStatus(app, runInstance)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: could not build app status")
+ return reply(ctx, nil, err)
+ }
+
+ return reply(ctx, status, nil)
+ case "db-migration-status":
var params struct {
AppID string
}
@@ -79,82 +302,290 @@ func (h *handler) Handle(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2
return reply(ctx, nil, err)
}
- run := h.run.FindRunByAppID(params.AppID)
- if run == nil {
- return reply(ctx, map[string]interface{}{"running": false}, nil)
- }
- proc := run.Proc()
- if proc == nil {
- return reply(ctx, map[string]interface{}{"running": false}, nil)
+ // Find the latest app by platform ID or local ID.
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
}
- m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true}
- for _, svc := range proc.Meta.Svcs {
- for _, rpc := range svc.Rpcs {
- log.Info().Str("rpc", svc.Name+"."+rpc.Name).Msg("rpc")
- }
+ appMeta, err := h.GetMeta(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
}
- str, err := m.MarshalToString(proc.Meta)
+ namespace, err := h.GetNamespace(ctx, params.AppID)
if err != nil {
- log.Error().Err(err).Msg("dash: could not marshal app metadata")
return reply(ctx, nil, err)
}
- return reply(ctx, map[string]interface{}{
- "running": true,
- "appID": run.AppID,
- "pid": run.ID,
- "meta": json.RawMessage(str),
- "port": run.Port,
- }, nil)
+ clusterType := sqldb.Run
+ cluster, ok := h.run.ClusterMgr.Get(sqldb.GetClusterID(app, clusterType, namespace))
+ if !ok {
+ return reply(ctx, nil, fmt.Errorf("failed to get database cluster of type %s", clusterType))
+ }
+ status := buildDbMigrationStatus(ctx, appMeta, cluster)
+
+ return reply(ctx, status, nil)
case "api-call":
- var params struct {
- AppID string
- Endpoint string
- Payload []byte
- AuthToken string
+ telemetry.Send("api.call")
+ var params run.ApiCallParams
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ res, err := run.CallAPI(ctx, h.run.FindRunByAppID(params.AppID), ¶ms)
+ return reply(ctx, res, err)
+
+ case "editors/list":
+ var resp struct {
+ Editors []string `json:"editors"`
}
+ found, err := editors.Resolve(ctx)
+ if err != nil {
+ log.Err(err).Msg("dash: could not list editors")
+ return reply(ctx, nil, err)
+ }
+
+ for _, e := range found {
+ resp.Editors = append(resp.Editors, string(e.Editor))
+ }
+ return reply(ctx, resp, nil)
+ case "ai/propose-system-design":
+ telemetry.Send("ai.propose")
+ log.Debug().Msg("dash: propose-system-design")
+ var params struct {
+ AppID string `json:"app_id"`
+ Prompt string `json:"prompt"`
+ }
if err := unmarshal(¶ms); err != nil {
return reply(ctx, nil, err)
}
- run := h.run.FindRunByAppID(params.AppID)
- if run == nil {
- log.Error().Str("appID", params.AppID).Msg("dash: cannot make api call: app not running")
- return reply(ctx, nil, fmt.Errorf("app not running"))
+ md, err := h.GetMeta(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ sessionCh := make(chan *ai.AINotification)
+ defer close(sessionCh)
+ idResp := sync.Once{}
+ task, err := h.ai.ProposeSystemDesign(ctx, params.AppID, params.Prompt, md, func(ctx context.Context, msg *ai.AINotification) error {
+ if _, ok := msg.Value.(ai.SessionUpdate); ok || msg.Error != nil {
+ idResp.Do(func() {
+ sessionCh <- msg
+ })
+ if ok {
+ return nil
+ }
+ }
+ return h.rpc.Notify(ctx, r.Method()+"/stream", msg)
+ })
+ if err != nil {
+ return reply(ctx, nil, err)
}
- url := fmt.Sprintf("http://localhost:%d/%s", run.Port, params.Endpoint)
- log := log.With().Str("appID", params.AppID).Str("endpoint", params.Endpoint).Logger()
+ select {
+ case msg := <-sessionCh:
+ su, ok := msg.Value.(ai.SessionUpdate)
+ if !ok || msg.Error != nil {
+ if msg.Error != nil {
+ err = jsonrpc2.NewError(ai.ErrorCodeMap[msg.Error.Code], msg.Error.Message)
+ } else {
+ err = jsonrpc2.NewError(1, "missing session_id")
+ }
+ return reply(ctx, nil, err)
+ }
+ return reply(ctx, map[string]string{
+ "session_id": string(su.Id),
+ "subscription_id": task.SubscriptionID,
+ }, nil)
+ case <-ctx.Done():
+ return reply(ctx, nil, ctx.Err())
+ case <-time.NewTimer(10 * time.Second).C:
+ _ = task.Stop()
+ return reply(ctx, nil, errors.New("timed out waiting for response"))
+ }
- req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(params.Payload))
+ case "ai/modify-system-design":
+ telemetry.Send("ai.modify")
+ log.Debug().Msg("dash: modify-system-design")
+ var params struct {
+ AppID string `json:"app_id"`
+ SessionID ai.AISessionID `json:"session_id"`
+ OriginalPrompt string `json:"original_prompt"`
+ Prompt string `json:"prompt"`
+ Proposed []ai.Service `json:"proposed"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ md, err := h.GetMeta(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ task, err := h.ai.ModifySystemDesign(ctx, params.AppID, params.SessionID, params.OriginalPrompt, params.Proposed, params.Prompt, md, func(ctx context.Context, msg *ai.AINotification) error {
+ return h.rpc.Notify(ctx, r.Method()+"/stream", msg)
+ })
+ return reply(ctx, task.SubscriptionID, err)
+ case "ai/define-endpoints":
+ telemetry.Send("ai.details")
+ log.Debug().Msg("dash: define-endpoints")
+ log.Debug().Msg("dash: define-endpoints")
+ var params struct {
+ AppID string `json:"app_id"`
+ SessionID ai.AISessionID `json:"session_id"`
+ Prompt string `json:"prompt"`
+ Proposed []ai.Service `json:"proposed"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ md, err := h.GetMeta(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ task, err := h.ai.DefineEndpoints(ctx, params.AppID, params.SessionID, params.Prompt, md, params.Proposed, func(ctx context.Context, msg *ai.AINotification) error {
+ return h.rpc.Notify(ctx, r.Method()+"/stream", msg)
+ })
+ return reply(ctx, task.SubscriptionID, err)
+ case "ai/parse-code":
+ log.Debug().Msg("dash: parse-code")
+ var params struct {
+ AppID string `json:"app_id"`
+ Services []ai.Service `json:"services"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ results, err := h.ai.ParseCode(ctx, params.Services, app)
+ return reply(ctx, results, err)
+ case "ai/update-code":
+ log.Debug().Msg("dash: update-code")
+ var params struct {
+ AppID string `json:"app_id"`
+ Services []ai.Service `json:"services"`
+ Overwrite bool `json:"overwrite"` // Ovwerwrite any existing endpoint code
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ results, err := h.ai.UpdateCode(ctx, params.Services, app, params.Overwrite)
+ return reply(ctx, results, err)
+ case "ai/preview-files":
+ telemetry.Send("ai.preview")
+ log.Debug().Msg("dash: preview-files")
+ var params struct {
+ AppID string `json:"app_id"`
+ Services []ai.Service `json:"services"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ result, err := h.ai.PreviewFiles(ctx, params.Services, app)
+ return reply(ctx, result, err)
+ case "ai/write-files":
+ telemetry.Send("ai.write")
+ log.Debug().Msg("dash: write-files")
+ var params struct {
+ AppID string `json:"app_id"`
+ Services []ai.Service `json:"services"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ result, err := h.ai.WriteFiles(ctx, params.Services, app)
+ return reply(ctx, result, err)
+ case "ai/parse-sql-schema":
+ var params struct {
+ AppID string `json:"app_id"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ return reply(ctx, nil, err)
+ }
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ md, err := h.GetMeta(params.AppID)
if err != nil {
- log.Err(err).Msg("dash: api call failed")
return reply(ctx, nil, err)
}
- if tok := params.AuthToken; tok != "" {
- req.Header.Set("Authorization", "Bearer "+tok)
+ for _, db := range md.SqlDatabases {
+ _, err := ai.ParseSQLSchema(app, *db.MigrationRelPath)
+ if err != nil {
+ return reply(ctx, nil, err)
+ }
+ }
+ return reply(ctx, true, err)
+ case "editors/open":
+ telemetry.Send("editors.open")
+ var params struct {
+ AppID string `json:"app_id"`
+ Editor editors.EditorName `json:"editor"`
+ File string `json:"file"`
+ StartLine int `json:"start_line,omitempty"`
+ StartCol int `json:"start_col,omitempty"`
+ EndLine int `json:"end_line,omitempty"`
+ EndCol int `json:"end_col,omitempty"`
+ }
+ if err := unmarshal(¶ms); err != nil {
+ log.Warn().Err(err).Msg("dash: could not parse open command")
+ return reply(ctx, nil, err)
+ }
+
+ editor, err := editors.Find(ctx, params.Editor)
+ if err != nil {
+ log.Err(err).Str("editor", string(params.Editor)).Msg("dash: could not find editor")
+ return reply(ctx, nil, err)
}
- resp, err := http.DefaultClient.Do(req)
+
+ app, err := h.apps.FindLatestByPlatformOrLocalID(params.AppID)
if err != nil {
- log.Error().Err(err).Msg("dash: api call failed")
+ if errors.Is(err, apps.ErrNotFound) {
+ return reply(ctx, nil, fmt.Errorf("app not found, try running encore run"))
+ }
+ log.Err(err).Str("app_id", params.AppID).Msg("dash: could not find app")
return reply(ctx, nil, err)
}
- body, _ := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- log.Info().Int("status", resp.StatusCode).Msg("dash: api call completed")
- return reply(ctx, map[string]interface{}{
- "status": resp.Status,
- "status_code": resp.StatusCode,
- "body": body,
- }, nil)
+
+ if !filepath.IsLocal(params.File) {
+ log.Warn().Str("file", params.File).Msg("dash: file was not local to the repo")
+ return reply(ctx, nil, errors.New("file path must be local"))
+ }
+ params.File = filepath.Join(app.Root(), params.File)
+
+ if err := editors.LaunchExternalEditor(params.File, params.StartLine, params.StartCol, editor); err != nil {
+ log.Err(err).Str("editor", string(params.Editor)).Msg("dash: could not open file")
+ return reply(ctx, nil, err)
+ }
+
+ type openResp struct{}
+ return reply(ctx, openResp{}, nil)
}
return jsonrpc2.MethodNotFound(ctx, reply, r)
}
+type sourceContextResponse struct {
+ Lines []string `json:"lines"`
+ Start int `json:"start"`
+}
+
func (h *handler) listenNotify(ctx context.Context, ch <-chan *notification) {
for {
select {
@@ -169,9 +600,8 @@ func (h *handler) listenNotify(ctx context.Context, ch <-chan *notification) {
}
func (s *Server) listenTraces() {
- for tt := range s.traceCh {
- // Transforming a trace is fairly expensive, so only do it
- // if somebody is listening.
+ for sp := range s.traceCh {
+ // Only marshal the trace if someone's listening.
s.mu.Lock()
hasClients := len(s.clients) > 0
s.mu.Unlock()
@@ -179,14 +609,19 @@ func (s *Server) listenTraces() {
continue
}
- tr, err := TransformTrace(tt)
+ data, err := jsonext.ProtoEncoder.Marshal(sp.Span)
if err != nil {
- log.Error().Err(err).Msg("dash: could not process trace")
+ log.Error().Err(err).Msg("dash: could not marshal trace")
continue
}
+
s.notify(¬ification{
Method: "trace/new",
- Params: tr,
+ Params: map[string]any{
+ "app_id": sp.AppID,
+ "test_trace": sp.TestTrace,
+ "span": json.RawMessage(data),
+ },
})
}
}
@@ -195,52 +630,65 @@ var _ run.EventListener = (*Server)(nil)
// OnStart notifies active websocket clients about the started run.
func (s *Server) OnStart(r *run.Run) {
- m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true}
- proc := r.Proc()
- str, err := m.MarshalToString(proc.Meta)
+ status, err := buildAppStatus(r.App, r)
if err != nil {
- log.Error().Err(err).Msg("dash: could not marshal app meta")
+ log.Error().Err(err).Msg("dash: could not build app status")
return
}
+ // Open the browser if needed.
+ browserMode := r.Params.Browser
+ if browserMode == run.BrowserModeAlways || (browserMode == run.BrowserModeAuto && !s.hasClients()) {
+ u := fmt.Sprintf("http://localhost:%d/%s", s.dashPort, r.App.PlatformOrLocalID())
+ browser.Open(u)
+ }
+
s.notify(¬ification{
Method: "process/start",
- Params: map[string]interface{}{
- "appID": r.AppID,
- "pid": r.ID,
- "port": r.Port,
- "meta": json.RawMessage(str),
- },
+ Params: status,
+ })
+}
+
+func (s *Server) OnCompileStart(r *run.Run) {
+ status, err := buildAppStatus(r.App, r)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: could not build app status")
+ return
+ }
+
+ status.Compiling = true
+
+ s.notify(¬ification{
+ Method: "process/compile-start",
+ Params: status,
})
}
// OnReload notifies active websocket clients about the reloaded run.
func (s *Server) OnReload(r *run.Run) {
- m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true}
- proc := r.Proc()
- str, err := m.MarshalToString(proc.Meta)
+ status, err := buildAppStatus(r.App, r)
if err != nil {
- log.Error().Err(err).Msg("dash: could not marshal app meta")
+ log.Error().Err(err).Msg("dash: could not build app status")
return
}
+
s.notify(¬ification{
Method: "process/reload",
- Params: map[string]interface{}{
- "appID": r.AppID,
- "pid": r.ID,
- "meta": json.RawMessage(str),
- },
+ Params: status,
})
}
// OnStop notifies active websocket clients about the stopped run.
func (s *Server) OnStop(r *run.Run) {
+ status, err := buildAppStatus(r.App, nil)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: could not build app status")
+ return
+ }
+
s.notify(¬ification{
Method: "process/stop",
- Params: map[string]interface{}{
- "appID": r.AppID,
- "pid": r.ID,
- },
+ Params: status,
})
}
@@ -254,6 +702,27 @@ func (s *Server) OnStderr(r *run.Run, out []byte) {
s.onOutput(r, out)
}
+func (s *Server) OnError(r *run.Run, err *errlist.List) {
+ if err == nil {
+ return
+ }
+
+ status, statusErr := buildAppStatus(r.App, nil)
+ if statusErr != nil {
+ log.Error().Err(statusErr).Msg("dash: could not build app status")
+ return
+ }
+
+ err.MakeRelative(r.App.Root(), "")
+
+ status.CompileError = err.Error()
+
+ s.notify(¬ification{
+ Method: "process/compile-error",
+ Params: status,
+ })
+}
+
func (s *Server) onOutput(r *run.Run, out []byte) {
// Copy to a new slice since we cannot retain it after the call ends, and notify is async.
out2 := make([]byte, len(out))
@@ -261,9 +730,151 @@ func (s *Server) onOutput(r *run.Run, out []byte) {
s.notify(¬ification{
Method: "process/output",
Params: map[string]interface{}{
- "appID": r.AppID,
+ "appID": r.App.PlatformOrLocalID(),
"pid": r.ID,
"output": out2,
},
})
}
+
+// protoReplier is a jsonrpc2.Replier that wraps another replier and serializes
+// any protobuf message with protojson.
+func makeProtoReplier(rep jsonrpc2.Replier) jsonrpc2.Replier {
+ return func(ctx context.Context, result any, err error) error {
+ if err != nil {
+ return rep(ctx, nil, err)
+ }
+ jsonData, err := jsonext.ProtoEncoder.Marshal(result)
+ return rep(ctx, json.RawMessage(jsonData), err)
+ }
+}
+
+// appStatus is the the shared data structure to communicate app status to the client.
+//
+// It is mirrored in the frontend at src/lib/client/dev-dash-client.ts as `AppStatus`.
+type appStatus struct {
+ Running bool `json:"running"`
+ Tutorial string `json:"tutorial,omitempty"`
+ AppID string `json:"appID"`
+ PlatformID string `json:"platformID,omitempty"`
+ AppRoot string `json:"appRoot"`
+ PID string `json:"pid,omitempty"`
+ Meta json.RawMessage `json:"meta,omitempty"`
+ Addr string `json:"addr,omitempty"`
+ APIEncoding *encoding.APIEncoding `json:"apiEncoding,omitempty"`
+ Compiling bool `json:"compiling"`
+ CompileError string `json:"compileError,omitempty"`
+}
+
+type dbMigrationHistory struct {
+ DatabaseName string `json:"databaseName"`
+ Migrations []dbMigration `json:"migrations"`
+}
+
+type dbMigration struct {
+ Filename string `json:"filename"`
+ Number uint64 `json:"number"`
+ Description string `json:"description"`
+ Applied bool `json:"applied"`
+}
+
+func buildAppStatus(app *apps.Instance, runInstance *run.Run) (s appStatus, err error) {
+ // Now try and grab latest metadata for the app
+ var md *meta.Data
+ if runInstance != nil {
+ proc := runInstance.ProcGroup()
+ if proc != nil {
+ md = proc.Meta
+ }
+ }
+
+ if md == nil {
+ md, err = app.CachedMetadata()
+ if err != nil {
+ return appStatus{}, err
+ }
+ }
+
+ // Convert the metadata into a format we can send to the client
+ mdStr := "null"
+ var apiEnc *encoding.APIEncoding
+ if md != nil {
+ m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true}
+
+ mdStr, err = m.MarshalToString(md)
+ if err != nil {
+ return appStatus{}, err
+ }
+
+ apiEnc = encoding.DescribeAPI(md)
+ }
+
+ // Build the response
+ resp := appStatus{
+ Running: false,
+ Tutorial: app.Tutorial(),
+ AppID: app.PlatformOrLocalID(),
+ PlatformID: app.PlatformID(),
+ Meta: json.RawMessage(mdStr),
+ AppRoot: app.Root(),
+ APIEncoding: apiEnc,
+ }
+ if runInstance != nil {
+ resp.Running = true
+ resp.PID = runInstance.ID
+ resp.Addr = runInstance.ListenAddr
+ }
+
+ return resp, nil
+}
+
+func buildDbMigrationStatus(ctx context.Context, appMeta *meta.Data, cluster *sqldb.Cluster) []dbMigrationHistory {
+ var statuses []dbMigrationHistory
+ for _, dbMeta := range appMeta.SqlDatabases {
+ db, ok := cluster.GetDB(dbMeta.Name)
+ if !ok {
+ // Remote database migration status are not supported yet
+ continue
+ }
+ appliedVersions, err := db.ListAppliedMigrations(ctx)
+ if err != nil {
+ log.Error().Msgf("failed to list applied migrations for database %s: %v", dbMeta.Name, err)
+ continue
+ }
+ statuses = append(statuses, buildMigrationHistory(dbMeta, appliedVersions))
+ }
+ return statuses
+}
+
+func buildMigrationHistory(dbMeta *meta.SQLDatabase, appliedVersions map[uint64]bool) dbMigrationHistory {
+ history := dbMigrationHistory{
+ DatabaseName: dbMeta.Name,
+ Migrations: []dbMigration{},
+ }
+ // Go over migrations from latest to earliest
+ sortedMigrations := make([]*meta.DBMigration, len(dbMeta.Migrations))
+ copy(sortedMigrations, dbMeta.Migrations)
+ slices.SortStableFunc(sortedMigrations, func(a, b *meta.DBMigration) int {
+ return int(b.Number - a.Number)
+ })
+ implicitlyApplied := false
+ for _, migration := range sortedMigrations {
+ dirty, attempted := appliedVersions[migration.Number]
+ applied := attempted && !dirty
+ // If the database doesn't allow non-sequential migrations,
+ // then any migrations before the last applied will also have
+ // been applied even if we don't see them in the database.
+ if !dbMeta.AllowNonSequentialMigrations && applied {
+ implicitlyApplied = true
+ }
+
+ status := dbMigration{
+ Filename: migration.Filename,
+ Number: migration.Number,
+ Description: migration.Description,
+ Applied: applied || implicitlyApplied,
+ }
+ history.Migrations = append(history.Migrations, status)
+ }
+ return history
+}
diff --git a/cli/daemon/dash/dash_test.go b/cli/daemon/dash/dash_test.go
new file mode 100644
index 0000000000..b5fa38acd9
--- /dev/null
+++ b/cli/daemon/dash/dash_test.go
@@ -0,0 +1,138 @@
+package dash
+
+import (
+ "reflect"
+ "testing"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+func TestBuildMigrationHistory(t *testing.T) {
+ tests := []struct {
+ name string
+ dbMeta *meta.SQLDatabase
+ appliedVersions map[uint64]bool
+ want dbMigrationHistory
+ }{
+ {
+ name: "sequential migrations all applied cleanly",
+ dbMeta: &meta.SQLDatabase{
+ Name: "test-db",
+ Migrations: []*meta.DBMigration{
+ {Number: 1, Filename: "001.sql", Description: "first"},
+ {Number: 2, Filename: "002.sql", Description: "second"},
+ {Number: 3, Filename: "003.sql", Description: "third"},
+ },
+ AllowNonSequentialMigrations: false,
+ },
+ appliedVersions: map[uint64]bool{
+ 1: false, // clean
+ 2: false, // clean
+ 3: false, // clean
+ },
+ want: dbMigrationHistory{
+ DatabaseName: "test-db",
+ Migrations: []dbMigration{
+ {Number: 3, Filename: "003.sql", Description: "third", Applied: true},
+ {Number: 2, Filename: "002.sql", Description: "second", Applied: true},
+ {Number: 1, Filename: "001.sql", Description: "first", Applied: true},
+ },
+ },
+ },
+ {
+ name: "sequential migrations with dirty migration",
+ dbMeta: &meta.SQLDatabase{
+ Name: "test-db",
+ Migrations: []*meta.DBMigration{
+ {Number: 1, Filename: "001.sql", Description: "first"},
+ {Number: 2, Filename: "002.sql", Description: "second"},
+ {Number: 3, Filename: "003.sql", Description: "third"},
+ },
+ AllowNonSequentialMigrations: false,
+ },
+ appliedVersions: map[uint64]bool{
+ 1: false, // clean
+ 2: true, // dirty
+ },
+ want: dbMigrationHistory{
+ DatabaseName: "test-db",
+ Migrations: []dbMigration{
+ {Number: 3, Filename: "003.sql", Description: "third", Applied: false},
+ {Number: 2, Filename: "002.sql", Description: "second", Applied: false},
+ {Number: 1, Filename: "001.sql", Description: "first", Applied: true},
+ },
+ },
+ },
+ {
+ name: "sequential migrations partially applied",
+ dbMeta: &meta.SQLDatabase{
+ Name: "test-db",
+ Migrations: []*meta.DBMigration{
+ {Number: 1, Filename: "001.sql", Description: "first"},
+ {Number: 2, Filename: "002.sql", Description: "second"},
+ {Number: 3, Filename: "003.sql", Description: "third"},
+ },
+ AllowNonSequentialMigrations: false,
+ },
+ appliedVersions: map[uint64]bool{
+ 1: false, // clean
+ 2: false, // clean
+ },
+ want: dbMigrationHistory{
+ DatabaseName: "test-db",
+ Migrations: []dbMigration{
+ {Number: 3, Filename: "003.sql", Description: "third", Applied: false},
+ {Number: 2, Filename: "002.sql", Description: "second", Applied: true},
+ {Number: 1, Filename: "001.sql", Description: "first", Applied: true},
+ },
+ },
+ },
+ {
+ name: "non-sequential migrations with mix of clean and dirty",
+ dbMeta: &meta.SQLDatabase{
+ Name: "test-db",
+ Migrations: []*meta.DBMigration{
+ {Number: 1, Filename: "001.sql", Description: "first"},
+ {Number: 2, Filename: "002.sql", Description: "second"},
+ {Number: 3, Filename: "003.sql", Description: "third"},
+ },
+ AllowNonSequentialMigrations: true,
+ },
+ appliedVersions: map[uint64]bool{
+ 1: false, // clean
+ 2: true, // dirty
+ 3: false, // clean
+ },
+ want: dbMigrationHistory{
+ DatabaseName: "test-db",
+ Migrations: []dbMigration{
+ {Number: 3, Filename: "003.sql", Description: "third", Applied: true},
+ {Number: 2, Filename: "002.sql", Description: "second", Applied: false},
+ {Number: 1, Filename: "001.sql", Description: "first", Applied: true},
+ },
+ },
+ },
+ {
+ name: "empty migrations list",
+ dbMeta: &meta.SQLDatabase{
+ Name: "test-db",
+ Migrations: []*meta.DBMigration{},
+ AllowNonSequentialMigrations: false,
+ },
+ appliedVersions: map[uint64]bool{},
+ want: dbMigrationHistory{
+ DatabaseName: "test-db",
+ Migrations: []dbMigration{},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := buildMigrationHistory(tt.dbMeta, tt.appliedVersions)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("buildMigrationHistory() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/cli/daemon/dash/dashapp/.gitignore b/cli/daemon/dash/dashapp/.gitignore
deleted file mode 100644
index d451ff16c1..0000000000
--- a/cli/daemon/dash/dashapp/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-node_modules
-.DS_Store
-dist
-dist-ssr
-*.local
diff --git a/cli/daemon/dash/dashapp/index.html b/cli/daemon/dash/dashapp/index.html
deleted file mode 100644
index aa72ddc665..0000000000
--- a/cli/daemon/dash/dashapp/index.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
- Encore Dashboard
-
-
-
-
-
-
-
diff --git a/cli/daemon/dash/dashapp/package-lock.json b/cli/daemon/dash/dashapp/package-lock.json
deleted file mode 100644
index ef5fc1dd9b..0000000000
--- a/cli/daemon/dash/dashapp/package-lock.json
+++ /dev/null
@@ -1,2883 +0,0 @@
-{
- "name": "dashapp",
- "version": "0.0.0",
- "lockfileVersion": 2,
- "requires": true,
- "packages": {
- "": {
- "version": "0.0.0",
- "dependencies": {
- "@headlessui/react": "^0.2.0",
- "@tailwindcss/forms": "^0.2.1",
- "codemirror": "^5.59.2",
- "events": "^3.2.0",
- "json-rpc-protocol": "^0.13.1",
- "luxon": "^1.25.0",
- "react": "^17.0.0",
- "react-dom": "^17.0.0",
- "react-router-dom": "^5.2.0",
- "tailwindcss": "^2.0.2"
- },
- "devDependencies": {
- "@types/codemirror": "^0.0.108",
- "@types/events": "^3.0.0",
- "@types/luxon": "^1.25.1",
- "@types/node": "^14.14.25",
- "@types/react": "^17.0.0",
- "@types/react-dom": "^17.0.0",
- "@types/react-router-dom": "^5.1.7",
- "@vitejs/plugin-react-refresh": "^1.1.0",
- "autoprefixer": "^10.2.4",
- "postcss": "^8.2.5",
- "typescript": "^4.1.2",
- "vite": "^2.0.0-beta.64"
- }
- },
- "node_modules/@babel/code-frame": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz",
- "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==",
- "dev": true,
- "dependencies": {
- "@babel/highlight": "^7.12.13"
- }
- },
- "node_modules/@babel/core": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.13.tgz",
- "integrity": "sha512-BQKE9kXkPlXHPeqissfxo0lySWJcYdEP0hdtJOH/iJfDdhOCcgtNCjftCJg3qqauB4h+lz2N6ixM++b9DN1Tcw==",
- "dev": true,
- "dependencies": {
- "@babel/code-frame": "^7.12.13",
- "@babel/generator": "^7.12.13",
- "@babel/helper-module-transforms": "^7.12.13",
- "@babel/helpers": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13",
- "convert-source-map": "^1.7.0",
- "debug": "^4.1.0",
- "gensync": "^1.0.0-beta.1",
- "json5": "^2.1.2",
- "lodash": "^4.17.19",
- "semver": "^5.4.1",
- "source-map": "^0.5.0"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/generator": {
- "version": "7.12.15",
- "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.15.tgz",
- "integrity": "sha512-6F2xHxBiFXWNSGb7vyCUTBF8RCLY66rS0zEPcP8t/nQyXjha5EuK4z7H5o7fWG8B4M7y6mqVWq1J+1PuwRhecQ==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13",
- "jsesc": "^2.5.1",
- "source-map": "^0.5.0"
- }
- },
- "node_modules/@babel/helper-function-name": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz",
- "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==",
- "dev": true,
- "dependencies": {
- "@babel/helper-get-function-arity": "^7.12.13",
- "@babel/template": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-get-function-arity": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz",
- "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-member-expression-to-functions": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.13.tgz",
- "integrity": "sha512-B+7nN0gIL8FZ8SvMcF+EPyB21KnCcZHQZFczCxbiNGV/O0rsrSBlWGLzmtBJ3GMjSVMIm4lpFhR+VdVBuIsUcQ==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-module-imports": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.13.tgz",
- "integrity": "sha512-NGmfvRp9Rqxy0uHSSVP+SRIW1q31a7Ji10cLBcqSDUngGentY4FRiHOFZFE1CLU5eiL0oE8reH7Tg1y99TDM/g==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-module-transforms": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.13.tgz",
- "integrity": "sha512-acKF7EjqOR67ASIlDTupwkKM1eUisNAjaSduo5Cz+793ikfnpe7p4Q7B7EWU2PCoSTPWsQkR7hRUWEIZPiVLGA==",
- "dev": true,
- "dependencies": {
- "@babel/helper-module-imports": "^7.12.13",
- "@babel/helper-replace-supers": "^7.12.13",
- "@babel/helper-simple-access": "^7.12.13",
- "@babel/helper-split-export-declaration": "^7.12.13",
- "@babel/helper-validator-identifier": "^7.12.11",
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13",
- "lodash": "^4.17.19"
- }
- },
- "node_modules/@babel/helper-optimise-call-expression": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz",
- "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-plugin-utils": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.12.13.tgz",
- "integrity": "sha512-C+10MXCXJLiR6IeG9+Wiejt9jmtFpxUc3MQqCmPY8hfCjyUGl9kT+B2okzEZrtykiwrc4dbCPdDoz0A/HQbDaA==",
- "dev": true
- },
- "node_modules/@babel/helper-replace-supers": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.13.tgz",
- "integrity": "sha512-pctAOIAMVStI2TMLhozPKbf5yTEXc0OJa0eENheb4w09SrgOWEs+P4nTOZYJQCqs8JlErGLDPDJTiGIp3ygbLg==",
- "dev": true,
- "dependencies": {
- "@babel/helper-member-expression-to-functions": "^7.12.13",
- "@babel/helper-optimise-call-expression": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-simple-access": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.13.tgz",
- "integrity": "sha512-0ski5dyYIHEfwpWGx5GPWhH35j342JaflmCeQmsPWcrOQDtCN6C1zKAVRFVbK53lPW2c9TsuLLSUDf0tIGJ5hA==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-split-export-declaration": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz",
- "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==",
- "dev": true,
- "dependencies": {
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/helper-validator-identifier": {
- "version": "7.12.11",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz",
- "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==",
- "dev": true
- },
- "node_modules/@babel/helpers": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.13.tgz",
- "integrity": "sha512-oohVzLRZ3GQEk4Cjhfs9YkJA4TdIDTObdBEZGrd6F/T0GPSnuV6l22eMcxlvcvzVIPH3VTtxbseudM1zIE+rPQ==",
- "dev": true,
- "dependencies": {
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/highlight": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz",
- "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==",
- "dev": true,
- "dependencies": {
- "@babel/helper-validator-identifier": "^7.12.11",
- "chalk": "^2.0.0",
- "js-tokens": "^4.0.0"
- }
- },
- "node_modules/@babel/parser": {
- "version": "7.12.15",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.15.tgz",
- "integrity": "sha512-AQBOU2Z9kWwSZMd6lNjCX0GUgFonL1wAM1db8L8PMk9UDaGsRCArBkU4Sc+UCM3AE4hjbXx+h58Lb3QT4oRmrA==",
- "dev": true,
- "bin": {
- "parser": "bin/babel-parser.js"
- },
- "engines": {
- "node": ">=6.0.0"
- }
- },
- "node_modules/@babel/plugin-syntax-import-meta": {
- "version": "7.10.4",
- "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
- "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
- "dev": true,
- "dependencies": {
- "@babel/helper-plugin-utils": "^7.10.4"
- }
- },
- "node_modules/@babel/runtime": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.13.tgz",
- "integrity": "sha512-8+3UMPBrjFa/6TtKi/7sehPKqfAm4g6K+YQjyyFOLUTxzOngcRZTlAVY8sc2CORJYqdHQY8gRPHmn+qo15rCBw==",
- "dependencies": {
- "regenerator-runtime": "^0.13.4"
- }
- },
- "node_modules/@babel/template": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz",
- "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==",
- "dev": true,
- "dependencies": {
- "@babel/code-frame": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "node_modules/@babel/traverse": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.13.tgz",
- "integrity": "sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA==",
- "dev": true,
- "dependencies": {
- "@babel/code-frame": "^7.12.13",
- "@babel/generator": "^7.12.13",
- "@babel/helper-function-name": "^7.12.13",
- "@babel/helper-split-export-declaration": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/types": "^7.12.13",
- "debug": "^4.1.0",
- "globals": "^11.1.0",
- "lodash": "^4.17.19"
- }
- },
- "node_modules/@babel/types": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.13.tgz",
- "integrity": "sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ==",
- "dev": true,
- "dependencies": {
- "@babel/helper-validator-identifier": "^7.12.11",
- "lodash": "^4.17.19",
- "to-fast-properties": "^2.0.0"
- }
- },
- "node_modules/@fullhuman/postcss-purgecss": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/@fullhuman/postcss-purgecss/-/postcss-purgecss-3.1.3.tgz",
- "integrity": "sha512-kwOXw8fZ0Lt1QmeOOrd+o4Ibvp4UTEBFQbzvWldjlKv5n+G9sXfIPn1hh63IQIL8K8vbvv1oYMJiIUbuy9bGaA==",
- "dependencies": {
- "purgecss": "^3.1.3"
- }
- },
- "node_modules/@headlessui/react": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-0.2.0.tgz",
- "integrity": "sha512-YV+vF+QhTRcspydPdHF3ZXe+FkOiJpRdqMjjFIIX9bSdT2O2T7GurgKQdGgamNUM+B99MZBOTRqxS8Dlh485eg==",
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/@tailwindcss/forms": {
- "version": "0.2.1",
- "resolved": "https://registry.npmjs.org/@tailwindcss/forms/-/forms-0.2.1.tgz",
- "integrity": "sha512-czfvEdY+J2Ogfd6RUSr/ZSUmDxTujr34M++YLnp2cCPC3oJ4kFvFMaRXA6cEXKw7F1hJuapdjXRjsXIEXGgORg==",
- "dependencies": {
- "mini-svg-data-uri": "^1.2.3"
- }
- },
- "node_modules/@types/codemirror": {
- "version": "0.0.108",
- "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.108.tgz",
- "integrity": "sha512-3FGFcus0P7C2UOGCNUVENqObEb4SFk+S8Dnxq7K6aIsLVs/vDtlangl3PEO0ykaKXyK56swVF6Nho7VsA44uhw==",
- "dev": true,
- "dependencies": {
- "@types/tern": "*"
- }
- },
- "node_modules/@types/estree": {
- "version": "0.0.46",
- "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.46.tgz",
- "integrity": "sha512-laIjwTQaD+5DukBZaygQ79K1Z0jb1bPEMRrkXSLjtCcZm+abyp5YbrqpSLzD42FwWW6gK/aS4NYpJ804nG2brg==",
- "dev": true
- },
- "node_modules/@types/events": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz",
- "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==",
- "dev": true
- },
- "node_modules/@types/history": {
- "version": "4.7.8",
- "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.8.tgz",
- "integrity": "sha512-S78QIYirQcUoo6UJZx9CSP0O2ix9IaeAXwQi26Rhr/+mg7qqPy8TzaxHSUut7eGjL8WmLccT7/MXf304WjqHcA==",
- "dev": true
- },
- "node_modules/@types/luxon": {
- "version": "1.25.1",
- "resolved": "https://registry.npmjs.org/@types/luxon/-/luxon-1.25.1.tgz",
- "integrity": "sha512-enkMO4WJcbdkhK1eZrItF616buau02wtrSN+DDt9Qj9U23boSAXNJm0fMlgwpTDaRHq3S0D/SPIRbxy4YxBjiA==",
- "dev": true
- },
- "node_modules/@types/node": {
- "version": "14.14.25",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.25.tgz",
- "integrity": "sha512-EPpXLOVqDvisVxtlbvzfyqSsFeQxltFbluZNRndIb8tr9KiBnYNLzrc1N3pyKUCww2RNrfHDViqDWWE1LCJQtQ==",
- "dev": true
- },
- "node_modules/@types/prop-types": {
- "version": "15.7.3",
- "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.3.tgz",
- "integrity": "sha512-KfRL3PuHmqQLOG+2tGpRO26Ctg+Cq1E01D2DMriKEATHgWLfeNDmq9e29Q9WIky0dQ3NPkd1mzYH8Lm936Z9qw==",
- "dev": true
- },
- "node_modules/@types/react": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.1.tgz",
- "integrity": "sha512-w8t9f53B2ei4jeOqf/gxtc2Sswnc3LBK5s0DyJcg5xd10tMHXts2N31cKjWfH9IC/JvEPa/YF1U4YeP1t4R6HQ==",
- "dev": true,
- "dependencies": {
- "@types/prop-types": "*",
- "csstype": "^3.0.2"
- }
- },
- "node_modules/@types/react-dom": {
- "version": "17.0.0",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.0.tgz",
- "integrity": "sha512-lUqY7OlkF/RbNtD5nIq7ot8NquXrdFrjSOR6+w9a9RFQevGi1oZO1dcJbXMeONAPKtZ2UrZOEJ5UOCVsxbLk/g==",
- "dev": true,
- "dependencies": {
- "@types/react": "*"
- }
- },
- "node_modules/@types/react-router": {
- "version": "5.1.11",
- "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.11.tgz",
- "integrity": "sha512-ofHbZMlp0Y2baOHgsWBQ4K3AttxY61bDMkwTiBOkPg7U6C/3UwwB5WaIx28JmSVi/eX3uFEMRo61BV22fDQIvg==",
- "dev": true,
- "dependencies": {
- "@types/history": "*",
- "@types/react": "*"
- }
- },
- "node_modules/@types/react-router-dom": {
- "version": "5.1.7",
- "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.1.7.tgz",
- "integrity": "sha512-D5mHD6TbdV/DNHYsnwBTv+y73ei+mMjrkGrla86HthE4/PVvL1J94Bu3qABU+COXzpL23T1EZapVVpwHuBXiUg==",
- "dev": true,
- "dependencies": {
- "@types/history": "*",
- "@types/react": "*",
- "@types/react-router": "*"
- }
- },
- "node_modules/@types/tern": {
- "version": "0.23.3",
- "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.3.tgz",
- "integrity": "sha512-imDtS4TAoTcXk0g7u4kkWqedB3E4qpjXzCpD2LU5M5NAXHzCDsypyvXSaG7mM8DKYkCRa7tFp4tS/lp/Wo7Q3w==",
- "dev": true,
- "dependencies": {
- "@types/estree": "*"
- }
- },
- "node_modules/@vitejs/plugin-react-refresh": {
- "version": "1.2.2",
- "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-refresh/-/plugin-react-refresh-1.2.2.tgz",
- "integrity": "sha512-MEVSqncF/u1nvfeZsBJtPc3pLZWccN77CjY0itW7/Vji5BMmttW25a1kjSmooE+4JK4kaF3ElwF3LbV2kiVZWw==",
- "dev": true,
- "dependencies": {
- "@babel/core": "^7.12.10",
- "@babel/plugin-syntax-import-meta": "^7.10.4",
- "react-refresh": "^0.9.0"
- },
- "engines": {
- "node": ">=12.0.0"
- }
- },
- "node_modules/acorn": {
- "version": "7.4.1",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
- "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
- "bin": {
- "acorn": "bin/acorn"
- },
- "engines": {
- "node": ">=0.4.0"
- }
- },
- "node_modules/acorn-node": {
- "version": "1.8.2",
- "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz",
- "integrity": "sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==",
- "dependencies": {
- "acorn": "^7.0.0",
- "acorn-walk": "^7.0.0",
- "xtend": "^4.0.2"
- }
- },
- "node_modules/acorn-walk": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz",
- "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==",
- "engines": {
- "node": ">=0.4.0"
- }
- },
- "node_modules/ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "dependencies": {
- "color-convert": "^1.9.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/at-least-node": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
- "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==",
- "engines": {
- "node": ">= 4.0.0"
- }
- },
- "node_modules/autoprefixer": {
- "version": "10.2.4",
- "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.2.4.tgz",
- "integrity": "sha512-DCCdUQiMD+P/as8m3XkeTUkUKuuRqLGcwD0nll7wevhqoJfMRpJlkFd1+MQh1pvupjiQuip42lc/VFvfUTMSKw==",
- "dev": true,
- "dependencies": {
- "browserslist": "^4.16.1",
- "caniuse-lite": "^1.0.30001181",
- "colorette": "^1.2.1",
- "fraction.js": "^4.0.13",
- "normalize-range": "^0.1.2",
- "postcss-value-parser": "^4.1.0"
- },
- "bin": {
- "autoprefixer": "bin/autoprefixer"
- },
- "engines": {
- "node": "^10 || ^12 || >=14"
- }
- },
- "node_modules/balanced-match": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
- "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
- },
- "node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
- "dependencies": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
- "node_modules/browserslist": {
- "version": "4.16.3",
- "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.3.tgz",
- "integrity": "sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw==",
- "dev": true,
- "dependencies": {
- "caniuse-lite": "^1.0.30001181",
- "colorette": "^1.2.1",
- "electron-to-chromium": "^1.3.649",
- "escalade": "^3.1.1",
- "node-releases": "^1.1.70"
- },
- "bin": {
- "browserslist": "cli.js"
- },
- "engines": {
- "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
- }
- },
- "node_modules/bytes": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
- "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==",
- "engines": {
- "node": ">= 0.8"
- }
- },
- "node_modules/camelcase-css": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
- "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/caniuse-lite": {
- "version": "1.0.30001185",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001185.tgz",
- "integrity": "sha512-Fpi4kVNtNvJ15H0F6vwmXtb3tukv3Zg3qhKkOGUq7KJ1J6b9kf4dnNgtEAFXhRsJo0gNj9W60+wBvn0JcTvdTg==",
- "dev": true
- },
- "node_modules/chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "dependencies": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/codemirror": {
- "version": "5.59.2",
- "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.59.2.tgz",
- "integrity": "sha512-/D5PcsKyzthtSy2NNKCyJi3b+htRkoKv3idswR/tR6UAvMNKA7SrmyZy6fOONJxSRs1JlUWEDAbxqfdArbK8iA=="
- },
- "node_modules/color": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz",
- "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==",
- "dependencies": {
- "color-convert": "^1.9.1",
- "color-string": "^1.5.4"
- }
- },
- "node_modules/color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "dependencies": {
- "color-name": "1.1.3"
- }
- },
- "node_modules/color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
- },
- "node_modules/color-string": {
- "version": "1.5.4",
- "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.4.tgz",
- "integrity": "sha512-57yF5yt8Xa3czSEW1jfQDE79Idk0+AkN/4KWad6tbdxUmAs3MvjxlWSWD4deYytcRfoZ9nhKyFl1kj5tBvidbw==",
- "dependencies": {
- "color-name": "^1.0.0",
- "simple-swizzle": "^0.2.2"
- }
- },
- "node_modules/colorette": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.1.tgz",
- "integrity": "sha512-puCDz0CzydiSYOrnXpz/PKd69zRrribezjtE9yd4zvytoRc8+RY/KJPvtPFKZS3E3wP6neGyMe0vOTlHO5L3Pw=="
- },
- "node_modules/commander": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
- "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==",
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/concat-map": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
- "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
- },
- "node_modules/convert-source-map": {
- "version": "1.7.0",
- "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
- "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
- "dev": true,
- "dependencies": {
- "safe-buffer": "~5.1.1"
- }
- },
- "node_modules/css-unit-converter": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.2.tgz",
- "integrity": "sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA=="
- },
- "node_modules/cssesc": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
- "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
- "bin": {
- "cssesc": "bin/cssesc"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/csstype": {
- "version": "3.0.6",
- "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.6.tgz",
- "integrity": "sha512-+ZAmfyWMT7TiIlzdqJgjMb7S4f1beorDbWbsocyK4RaiqA5RTX3K14bnBWmmA9QEM0gRdsjyyrEmcyga8Zsxmw==",
- "dev": true
- },
- "node_modules/debug": {
- "version": "4.3.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
- "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
- "dev": true,
- "dependencies": {
- "ms": "2.1.2"
- },
- "engines": {
- "node": ">=6.0"
- }
- },
- "node_modules/defined": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz",
- "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM="
- },
- "node_modules/detective": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/detective/-/detective-5.2.0.tgz",
- "integrity": "sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg==",
- "dependencies": {
- "acorn-node": "^1.6.1",
- "defined": "^1.0.0",
- "minimist": "^1.1.1"
- },
- "bin": {
- "detective": "bin/detective.js"
- },
- "engines": {
- "node": ">=0.8.0"
- }
- },
- "node_modules/didyoumean": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.1.tgz",
- "integrity": "sha1-6S7f2tplN9SE1zwBcv0eugxJdv8="
- },
- "node_modules/electron-to-chromium": {
- "version": "1.3.657",
- "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.657.tgz",
- "integrity": "sha512-/9ROOyvEflEbaZFUeGofD+Tqs/WynbSTbNgNF+/TJJxH1ePD/e6VjZlDJpW3FFFd3nj5l3Hd8ki2vRwy+gyRFw==",
- "dev": true
- },
- "node_modules/esbuild": {
- "version": "0.8.42",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.8.42.tgz",
- "integrity": "sha512-zUtj5RMqROCCCH0vV/a7cd8YQg8I0GWBhV3A3PklWRT+oM/YwVbnrtFnITzE1otGdnXplWHWdZ4OcYiV0PN+JQ==",
- "dev": true,
- "bin": {
- "esbuild": "bin/esbuild"
- }
- },
- "node_modules/escalade": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
- "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
- "dev": true,
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/escape-string-regexp": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
- "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
- "engines": {
- "node": ">=0.8.0"
- }
- },
- "node_modules/events": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/events/-/events-3.2.0.tgz",
- "integrity": "sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg==",
- "engines": {
- "node": ">=0.8.x"
- }
- },
- "node_modules/fraction.js": {
- "version": "4.0.13",
- "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.0.13.tgz",
- "integrity": "sha512-E1fz2Xs9ltlUp+qbiyx9wmt2n9dRzPsS11Jtdb8D2o+cC7wr9xkkKsVKJuBX0ST+LVS+LhLO+SbLJNtfWcJvXA==",
- "dev": true,
- "engines": {
- "node": "*"
- }
- },
- "node_modules/fs-extra": {
- "version": "9.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
- "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
- "dependencies": {
- "at-least-node": "^1.0.0",
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/fs.realpath": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
- "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
- },
- "node_modules/fsevents": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz",
- "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==",
- "dev": true,
- "optional": true,
- "os": [
- "darwin"
- ],
- "engines": {
- "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
- }
- },
- "node_modules/function-bind": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
- "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
- },
- "node_modules/gensync": {
- "version": "1.0.0-beta.2",
- "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
- "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
- "dev": true,
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/glob": {
- "version": "7.1.6",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
- "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/globals": {
- "version": "11.12.0",
- "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
- "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/graceful-fs": {
- "version": "4.2.5",
- "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.5.tgz",
- "integrity": "sha512-kBBSQbz2K0Nyn+31j/w36fUfxkBW9/gfwRWdUY1ULReH3iokVJgddZAFcD1D0xlgTmFxJCbUkUclAlc6/IDJkw=="
- },
- "node_modules/has": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
- "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
- "dependencies": {
- "function-bind": "^1.1.1"
- },
- "engines": {
- "node": ">= 0.4.0"
- }
- },
- "node_modules/has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/history": {
- "version": "4.10.1",
- "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
- "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
- "dependencies": {
- "@babel/runtime": "^7.1.2",
- "loose-envify": "^1.2.0",
- "resolve-pathname": "^3.0.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0",
- "value-equal": "^1.0.1"
- }
- },
- "node_modules/hoist-non-react-statics": {
- "version": "3.3.2",
- "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
- "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
- "dependencies": {
- "react-is": "^16.7.0"
- }
- },
- "node_modules/html-tags": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz",
- "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/indexes-of": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz",
- "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc="
- },
- "node_modules/inflight": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
- "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
- "dependencies": {
- "once": "^1.3.0",
- "wrappy": "1"
- }
- },
- "node_modules/inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
- },
- "node_modules/is-arrayish": {
- "version": "0.3.2",
- "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
- "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
- },
- "node_modules/is-core-module": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz",
- "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==",
- "dependencies": {
- "has": "^1.0.3"
- }
- },
- "node_modules/isarray": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
- "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8="
- },
- "node_modules/js-tokens": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
- "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
- },
- "node_modules/jsesc": {
- "version": "2.5.2",
- "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
- "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
- "dev": true,
- "bin": {
- "jsesc": "bin/jsesc"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/json-rpc-protocol": {
- "version": "0.13.1",
- "resolved": "https://registry.npmjs.org/json-rpc-protocol/-/json-rpc-protocol-0.13.1.tgz",
- "integrity": "sha512-gdb0TnNf0ITRjLm0QGNODgK1E6ORLhe+6L+RV8owg4X3d6x8jAfyHQC+xMG4T/qU2SPaNLpav29QRLC+3oF6gg==",
- "dependencies": {
- "make-error": "^1.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/json5": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz",
- "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==",
- "dev": true,
- "dependencies": {
- "minimist": "^1.2.5"
- },
- "bin": {
- "json5": "lib/cli.js"
- },
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/jsonfile": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
- "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
- "dependencies": {
- "graceful-fs": "^4.1.6",
- "universalify": "^2.0.0"
- }
- },
- "node_modules/lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA=="
- },
- "node_modules/lodash.toarray": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/lodash.toarray/-/lodash.toarray-4.4.0.tgz",
- "integrity": "sha1-JMS/zWsvuji/0FlNsRedjptlZWE="
- },
- "node_modules/loose-envify": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
- "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
- "dependencies": {
- "js-tokens": "^3.0.0 || ^4.0.0"
- },
- "bin": {
- "loose-envify": "cli.js"
- }
- },
- "node_modules/luxon": {
- "version": "1.25.0",
- "resolved": "https://registry.npmjs.org/luxon/-/luxon-1.25.0.tgz",
- "integrity": "sha512-hEgLurSH8kQRjY6i4YLey+mcKVAWXbDNlZRmM6AgWDJ1cY3atl8Ztf5wEY7VBReFbmGnwQPz7KYJblL8B2k0jQ==",
- "engines": {
- "node": "*"
- }
- },
- "node_modules/make-error": {
- "version": "1.3.6",
- "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
- "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw=="
- },
- "node_modules/mini-create-react-context": {
- "version": "0.4.1",
- "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz",
- "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==",
- "dependencies": {
- "@babel/runtime": "^7.12.1",
- "tiny-warning": "^1.0.3"
- }
- },
- "node_modules/mini-svg-data-uri": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/mini-svg-data-uri/-/mini-svg-data-uri-1.2.3.tgz",
- "integrity": "sha512-zd6KCAyXgmq6FV1mR10oKXYtvmA9vRoB6xPSTUJTbFApCtkefDnYueVR1gkof3KcdLZo1Y8mjF2DFmQMIxsHNQ=="
- },
- "node_modules/minimatch": {
- "version": "3.0.4",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
- "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
- "dependencies": {
- "brace-expansion": "^1.1.7"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/minimist": {
- "version": "1.2.5",
- "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
- "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
- },
- "node_modules/modern-normalize": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/modern-normalize/-/modern-normalize-1.0.0.tgz",
- "integrity": "sha512-1lM+BMLGuDfsdwf3rsgBSrxJwAZHFIrQ8YR61xIqdHo0uNKI9M52wNpHSrliZATJp51On6JD0AfRxd4YGSU0lw==",
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
- "dev": true
- },
- "node_modules/nanoid": {
- "version": "3.1.20",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz",
- "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==",
- "bin": {
- "nanoid": "bin/nanoid.cjs"
- },
- "engines": {
- "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
- }
- },
- "node_modules/node-emoji": {
- "version": "1.10.0",
- "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.10.0.tgz",
- "integrity": "sha512-Yt3384If5H6BYGVHiHwTL+99OzJKHhgp82S8/dktEK73T26BazdgZ4JZh92xSVtGNJvz9UbXdNAc5hcrXV42vw==",
- "dependencies": {
- "lodash.toarray": "^4.4.0"
- }
- },
- "node_modules/node-releases": {
- "version": "1.1.70",
- "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.70.tgz",
- "integrity": "sha512-Slf2s69+2/uAD79pVVQo8uSiC34+g8GWY8UH2Qtqv34ZfhYrxpYpfzs9Js9d6O0mbDmALuxaTlplnBTnSELcrw==",
- "dev": true
- },
- "node_modules/normalize-range": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
- "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=",
- "dev": true,
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/object-assign": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
- "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/object-hash": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz",
- "integrity": "sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ==",
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
- "dependencies": {
- "wrappy": "1"
- }
- },
- "node_modules/path-is-absolute": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
- "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/path-parse": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
- "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
- },
- "node_modules/path-to-regexp": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
- "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
- "dependencies": {
- "isarray": "0.0.1"
- }
- },
- "node_modules/postcss": {
- "version": "8.2.5",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.2.5.tgz",
- "integrity": "sha512-wMcb7BpDcm3gxQOQx46NDNT36Kk0Ao6PJLLI2ed5vehbbbxCEuslSQzbQ2sfSKy+gkYxhWcGWSeaK+gwm4KIZg==",
- "dependencies": {
- "colorette": "^1.2.1",
- "nanoid": "^3.1.20",
- "source-map": "^0.6.1"
- },
- "engines": {
- "node": "^10 || ^12 || >=14"
- }
- },
- "node_modules/postcss-functions": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/postcss-functions/-/postcss-functions-3.0.0.tgz",
- "integrity": "sha1-DpTQFERwCkgd4g3k1V+yZAVkJQ4=",
- "dependencies": {
- "glob": "^7.1.2",
- "object-assign": "^4.1.1",
- "postcss": "^6.0.9",
- "postcss-value-parser": "^3.3.0"
- }
- },
- "node_modules/postcss-functions/node_modules/postcss": {
- "version": "6.0.23",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz",
- "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==",
- "dependencies": {
- "chalk": "^2.4.1",
- "source-map": "^0.6.1",
- "supports-color": "^5.4.0"
- },
- "engines": {
- "node": ">=4.0.0"
- }
- },
- "node_modules/postcss-functions/node_modules/postcss-value-parser": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
- "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
- },
- "node_modules/postcss-functions/node_modules/source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/postcss-js": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-3.0.3.tgz",
- "integrity": "sha512-gWnoWQXKFw65Hk/mi2+WTQTHdPD5UJdDXZmX073EY/B3BWnYjO4F4t0VneTCnCGQ5E5GsCdMkzPaTXwl3r5dJw==",
- "dependencies": {
- "camelcase-css": "^2.0.1",
- "postcss": "^8.1.6"
- },
- "engines": {
- "node": ">=10.0"
- }
- },
- "node_modules/postcss-nested": {
- "version": "5.0.3",
- "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.3.tgz",
- "integrity": "sha512-R2LHPw+u5hFfDgJG748KpGbJyTv7Yr33/2tIMWxquYuHTd9EXu27PYnKi7BxMXLtzKC0a0WVsqHtd7qIluQu/g==",
- "dependencies": {
- "postcss-selector-parser": "^6.0.4"
- },
- "engines": {
- "node": ">=10.0"
- }
- },
- "node_modules/postcss-selector-parser": {
- "version": "6.0.4",
- "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz",
- "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==",
- "dependencies": {
- "cssesc": "^3.0.0",
- "indexes-of": "^1.0.1",
- "uniq": "^1.0.1",
- "util-deprecate": "^1.0.2"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/postcss-value-parser": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz",
- "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ=="
- },
- "node_modules/postcss/node_modules/source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/pretty-hrtime": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz",
- "integrity": "sha1-t+PqQkNaTJsnWdmeDyAesZWALuE=",
- "engines": {
- "node": ">= 0.8"
- }
- },
- "node_modules/prop-types": {
- "version": "15.7.2",
- "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
- "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
- "dependencies": {
- "loose-envify": "^1.4.0",
- "object-assign": "^4.1.1",
- "react-is": "^16.8.1"
- }
- },
- "node_modules/purgecss": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/purgecss/-/purgecss-3.1.3.tgz",
- "integrity": "sha512-hRSLN9mguJ2lzlIQtW4qmPS2kh6oMnA9RxdIYK8sz18QYqd6ePp4GNDl18oWHA1f2v2NEQIh51CO8s/E3YGckQ==",
- "dependencies": {
- "commander": "^6.0.0",
- "glob": "^7.0.0",
- "postcss": "^8.2.1",
- "postcss-selector-parser": "^6.0.2"
- },
- "bin": {
- "purgecss": "bin/purgecss.js"
- }
- },
- "node_modules/react": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/react/-/react-17.0.1.tgz",
- "integrity": "sha512-lG9c9UuMHdcAexXtigOZLX8exLWkW0Ku29qPRU8uhF2R9BN96dLCt0psvzPLlHc5OWkgymP3qwTRgbnw5BKx3w==",
- "dependencies": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1"
- },
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/react-dom": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.1.tgz",
- "integrity": "sha512-6eV150oJZ9U2t9svnsspTMrWNyHc6chX0KzDeAOXftRa8bNeOKTTfCJ7KorIwenkHd2xqVTBTCZd79yk/lx/Ug==",
- "dependencies": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1",
- "scheduler": "^0.20.1"
- }
- },
- "node_modules/react-is": {
- "version": "16.13.1",
- "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
- "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
- },
- "node_modules/react-refresh": {
- "version": "0.9.0",
- "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.9.0.tgz",
- "integrity": "sha512-Gvzk7OZpiqKSkxsQvO/mbTN1poglhmAV7gR/DdIrRrSMXraRQQlfikRJOr3Nb9GTMPC5kof948Zy6jJZIFtDvQ==",
- "dev": true,
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/react-router": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.2.0.tgz",
- "integrity": "sha512-smz1DUuFHRKdcJC0jobGo8cVbhO3x50tCL4icacOlcwDOEQPq4TMqwx3sY1TP+DvtTgz4nm3thuo7A+BK2U0Dw==",
- "dependencies": {
- "@babel/runtime": "^7.1.2",
- "history": "^4.9.0",
- "hoist-non-react-statics": "^3.1.0",
- "loose-envify": "^1.3.1",
- "mini-create-react-context": "^0.4.0",
- "path-to-regexp": "^1.7.0",
- "prop-types": "^15.6.2",
- "react-is": "^16.6.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0"
- }
- },
- "node_modules/react-router-dom": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.2.0.tgz",
- "integrity": "sha512-gxAmfylo2QUjcwxI63RhQ5G85Qqt4voZpUXSEqCwykV0baaOTQDR1f0PmY8AELqIyVc0NEZUj0Gov5lNGcXgsA==",
- "dependencies": {
- "@babel/runtime": "^7.1.2",
- "history": "^4.9.0",
- "loose-envify": "^1.3.1",
- "prop-types": "^15.6.2",
- "react-router": "5.2.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0"
- }
- },
- "node_modules/reduce-css-calc": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz",
- "integrity": "sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==",
- "dependencies": {
- "css-unit-converter": "^1.1.1",
- "postcss-value-parser": "^3.3.0"
- }
- },
- "node_modules/reduce-css-calc/node_modules/postcss-value-parser": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
- "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
- },
- "node_modules/regenerator-runtime": {
- "version": "0.13.7",
- "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz",
- "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew=="
- },
- "node_modules/resolve": {
- "version": "1.19.0",
- "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.19.0.tgz",
- "integrity": "sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg==",
- "dependencies": {
- "is-core-module": "^2.1.0",
- "path-parse": "^1.0.6"
- }
- },
- "node_modules/resolve-pathname": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
- "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng=="
- },
- "node_modules/rollup": {
- "version": "2.38.5",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.38.5.tgz",
- "integrity": "sha512-VoWt8DysFGDVRGWuHTqZzT02J0ASgjVq/hPs9QcBOGMd7B+jfTr/iqMVEyOi901rE3xq+Deq66GzIT1yt7sGwQ==",
- "dev": true,
- "bin": {
- "rollup": "dist/bin/rollup"
- },
- "engines": {
- "node": ">=10.0.0"
- },
- "optionalDependencies": {
- "fsevents": "~2.3.1"
- }
- },
- "node_modules/rollup/node_modules/fsevents": {
- "version": "2.3.2",
- "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
- "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
- "dev": true,
- "optional": true,
- "os": [
- "darwin"
- ],
- "engines": {
- "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
- }
- },
- "node_modules/safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
- "dev": true
- },
- "node_modules/scheduler": {
- "version": "0.20.1",
- "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.1.tgz",
- "integrity": "sha512-LKTe+2xNJBNxu/QhHvDR14wUXHRQbVY5ZOYpOGWRzhydZUqrLb2JBvLPY7cAqFmqrWuDED0Mjk7013SZiOz6Bw==",
- "dependencies": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1"
- }
- },
- "node_modules/semver": {
- "version": "5.7.1",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
- "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
- "dev": true,
- "bin": {
- "semver": "bin/semver"
- }
- },
- "node_modules/simple-swizzle": {
- "version": "0.2.2",
- "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
- "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=",
- "dependencies": {
- "is-arrayish": "^0.3.1"
- }
- },
- "node_modules/source-map": {
- "version": "0.5.7",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
- "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
- "dev": true,
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
- "dependencies": {
- "has-flag": "^3.0.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/tailwindcss": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-2.0.2.tgz",
- "integrity": "sha512-nO9JRE1pO7SF9RnYAl6g7uzeHdrmKAFqNjT9NtZUfxqimJZAOOLOEyIEUiMq12+xIc7mC2Ey3Vf90XjHpWKfbw==",
- "dependencies": {
- "@fullhuman/postcss-purgecss": "^3.0.0",
- "bytes": "^3.0.0",
- "chalk": "^4.1.0",
- "color": "^3.1.3",
- "detective": "^5.2.0",
- "didyoumean": "^1.2.1",
- "fs-extra": "^9.0.1",
- "html-tags": "^3.1.0",
- "lodash": "^4.17.20",
- "modern-normalize": "^1.0.0",
- "node-emoji": "^1.8.1",
- "object-hash": "^2.0.3",
- "postcss-functions": "^3",
- "postcss-js": "^3.0.3",
- "postcss-nested": "^5.0.1",
- "postcss-selector-parser": "^6.0.4",
- "postcss-value-parser": "^4.1.0",
- "pretty-hrtime": "^1.0.3",
- "reduce-css-calc": "^2.1.6",
- "resolve": "^1.19.0"
- },
- "bin": {
- "tailwind": "lib/cli.js",
- "tailwindcss": "lib/cli.js"
- },
- "engines": {
- "node": ">=12.13.0"
- }
- },
- "node_modules/tailwindcss/node_modules/ansi-styles": {
- "version": "4.3.0",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
- "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
- "dependencies": {
- "color-convert": "^2.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/tailwindcss/node_modules/chalk": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
- "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
- "dependencies": {
- "ansi-styles": "^4.1.0",
- "supports-color": "^7.1.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/tailwindcss/node_modules/color-convert": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
- "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
- "dependencies": {
- "color-name": "~1.1.4"
- },
- "engines": {
- "node": ">=7.0.0"
- }
- },
- "node_modules/tailwindcss/node_modules/color-name": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
- "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
- },
- "node_modules/tailwindcss/node_modules/has-flag": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
- "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/tailwindcss/node_modules/supports-color": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
- "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
- "dependencies": {
- "has-flag": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/tiny-invariant": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.1.0.tgz",
- "integrity": "sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw=="
- },
- "node_modules/tiny-warning": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
- "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="
- },
- "node_modules/to-fast-properties": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
- "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/typescript": {
- "version": "4.1.3",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.1.3.tgz",
- "integrity": "sha512-B3ZIOf1IKeH2ixgHhj6la6xdwR9QrLC5d1VKeCSY4tvkqhF2eqd9O7txNlS0PO3GrBAFIdr3L1ndNwteUbZLYg==",
- "dev": true,
- "bin": {
- "tsc": "bin/tsc",
- "tsserver": "bin/tsserver"
- },
- "engines": {
- "node": ">=4.2.0"
- }
- },
- "node_modules/uniq": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz",
- "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8="
- },
- "node_modules/universalify": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
- "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
- "node_modules/util-deprecate": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
- "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
- },
- "node_modules/value-equal": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
- "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw=="
- },
- "node_modules/vite": {
- "version": "2.0.0-beta.65",
- "resolved": "https://registry.npmjs.org/vite/-/vite-2.0.0-beta.65.tgz",
- "integrity": "sha512-mdHNTP6fGeb8m8lWAM3UbSPw1+un1lUv0i4MQJcNiK2/P01RHIY02VjQeXBv3NemkExkgLji88LN9ySFMUQpIw==",
- "dev": true,
- "dependencies": {
- "esbuild": "^0.8.34",
- "postcss": "^8.2.1",
- "resolve": "^1.19.0",
- "rollup": "^2.35.1"
- },
- "bin": {
- "vite": "bin/vite.js"
- },
- "engines": {
- "node": ">=12.0.0"
- },
- "optionalDependencies": {
- "fsevents": "~2.1.2"
- }
- },
- "node_modules/wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
- },
- "node_modules/xtend": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
- "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
- "engines": {
- "node": ">=0.4"
- }
- }
- },
- "dependencies": {
- "@babel/code-frame": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz",
- "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==",
- "dev": true,
- "requires": {
- "@babel/highlight": "^7.12.13"
- }
- },
- "@babel/core": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.13.tgz",
- "integrity": "sha512-BQKE9kXkPlXHPeqissfxo0lySWJcYdEP0hdtJOH/iJfDdhOCcgtNCjftCJg3qqauB4h+lz2N6ixM++b9DN1Tcw==",
- "dev": true,
- "requires": {
- "@babel/code-frame": "^7.12.13",
- "@babel/generator": "^7.12.13",
- "@babel/helper-module-transforms": "^7.12.13",
- "@babel/helpers": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13",
- "convert-source-map": "^1.7.0",
- "debug": "^4.1.0",
- "gensync": "^1.0.0-beta.1",
- "json5": "^2.1.2",
- "lodash": "^4.17.19",
- "semver": "^5.4.1",
- "source-map": "^0.5.0"
- }
- },
- "@babel/generator": {
- "version": "7.12.15",
- "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.15.tgz",
- "integrity": "sha512-6F2xHxBiFXWNSGb7vyCUTBF8RCLY66rS0zEPcP8t/nQyXjha5EuK4z7H5o7fWG8B4M7y6mqVWq1J+1PuwRhecQ==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13",
- "jsesc": "^2.5.1",
- "source-map": "^0.5.0"
- }
- },
- "@babel/helper-function-name": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz",
- "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==",
- "dev": true,
- "requires": {
- "@babel/helper-get-function-arity": "^7.12.13",
- "@babel/template": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-get-function-arity": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz",
- "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-member-expression-to-functions": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.13.tgz",
- "integrity": "sha512-B+7nN0gIL8FZ8SvMcF+EPyB21KnCcZHQZFczCxbiNGV/O0rsrSBlWGLzmtBJ3GMjSVMIm4lpFhR+VdVBuIsUcQ==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-module-imports": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.13.tgz",
- "integrity": "sha512-NGmfvRp9Rqxy0uHSSVP+SRIW1q31a7Ji10cLBcqSDUngGentY4FRiHOFZFE1CLU5eiL0oE8reH7Tg1y99TDM/g==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-module-transforms": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.13.tgz",
- "integrity": "sha512-acKF7EjqOR67ASIlDTupwkKM1eUisNAjaSduo5Cz+793ikfnpe7p4Q7B7EWU2PCoSTPWsQkR7hRUWEIZPiVLGA==",
- "dev": true,
- "requires": {
- "@babel/helper-module-imports": "^7.12.13",
- "@babel/helper-replace-supers": "^7.12.13",
- "@babel/helper-simple-access": "^7.12.13",
- "@babel/helper-split-export-declaration": "^7.12.13",
- "@babel/helper-validator-identifier": "^7.12.11",
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13",
- "lodash": "^4.17.19"
- }
- },
- "@babel/helper-optimise-call-expression": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz",
- "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-plugin-utils": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.12.13.tgz",
- "integrity": "sha512-C+10MXCXJLiR6IeG9+Wiejt9jmtFpxUc3MQqCmPY8hfCjyUGl9kT+B2okzEZrtykiwrc4dbCPdDoz0A/HQbDaA==",
- "dev": true
- },
- "@babel/helper-replace-supers": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.13.tgz",
- "integrity": "sha512-pctAOIAMVStI2TMLhozPKbf5yTEXc0OJa0eENheb4w09SrgOWEs+P4nTOZYJQCqs8JlErGLDPDJTiGIp3ygbLg==",
- "dev": true,
- "requires": {
- "@babel/helper-member-expression-to-functions": "^7.12.13",
- "@babel/helper-optimise-call-expression": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-simple-access": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.13.tgz",
- "integrity": "sha512-0ski5dyYIHEfwpWGx5GPWhH35j342JaflmCeQmsPWcrOQDtCN6C1zKAVRFVbK53lPW2c9TsuLLSUDf0tIGJ5hA==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-split-export-declaration": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz",
- "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==",
- "dev": true,
- "requires": {
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/helper-validator-identifier": {
- "version": "7.12.11",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz",
- "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==",
- "dev": true
- },
- "@babel/helpers": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.13.tgz",
- "integrity": "sha512-oohVzLRZ3GQEk4Cjhfs9YkJA4TdIDTObdBEZGrd6F/T0GPSnuV6l22eMcxlvcvzVIPH3VTtxbseudM1zIE+rPQ==",
- "dev": true,
- "requires": {
- "@babel/template": "^7.12.13",
- "@babel/traverse": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/highlight": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.12.13.tgz",
- "integrity": "sha512-kocDQvIbgMKlWxXe9fof3TQ+gkIPOUSEYhJjqUjvKMez3krV7vbzYCDq39Oj11UAVK7JqPVGQPlgE85dPNlQww==",
- "dev": true,
- "requires": {
- "@babel/helper-validator-identifier": "^7.12.11",
- "chalk": "^2.0.0",
- "js-tokens": "^4.0.0"
- }
- },
- "@babel/parser": {
- "version": "7.12.15",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.15.tgz",
- "integrity": "sha512-AQBOU2Z9kWwSZMd6lNjCX0GUgFonL1wAM1db8L8PMk9UDaGsRCArBkU4Sc+UCM3AE4hjbXx+h58Lb3QT4oRmrA==",
- "dev": true
- },
- "@babel/plugin-syntax-import-meta": {
- "version": "7.10.4",
- "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
- "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
- "dev": true,
- "requires": {
- "@babel/helper-plugin-utils": "^7.10.4"
- }
- },
- "@babel/runtime": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.13.tgz",
- "integrity": "sha512-8+3UMPBrjFa/6TtKi/7sehPKqfAm4g6K+YQjyyFOLUTxzOngcRZTlAVY8sc2CORJYqdHQY8gRPHmn+qo15rCBw==",
- "requires": {
- "regenerator-runtime": "^0.13.4"
- }
- },
- "@babel/template": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz",
- "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==",
- "dev": true,
- "requires": {
- "@babel/code-frame": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/types": "^7.12.13"
- }
- },
- "@babel/traverse": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.13.tgz",
- "integrity": "sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA==",
- "dev": true,
- "requires": {
- "@babel/code-frame": "^7.12.13",
- "@babel/generator": "^7.12.13",
- "@babel/helper-function-name": "^7.12.13",
- "@babel/helper-split-export-declaration": "^7.12.13",
- "@babel/parser": "^7.12.13",
- "@babel/types": "^7.12.13",
- "debug": "^4.1.0",
- "globals": "^11.1.0",
- "lodash": "^4.17.19"
- }
- },
- "@babel/types": {
- "version": "7.12.13",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.13.tgz",
- "integrity": "sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ==",
- "dev": true,
- "requires": {
- "@babel/helper-validator-identifier": "^7.12.11",
- "lodash": "^4.17.19",
- "to-fast-properties": "^2.0.0"
- }
- },
- "@fullhuman/postcss-purgecss": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/@fullhuman/postcss-purgecss/-/postcss-purgecss-3.1.3.tgz",
- "integrity": "sha512-kwOXw8fZ0Lt1QmeOOrd+o4Ibvp4UTEBFQbzvWldjlKv5n+G9sXfIPn1hh63IQIL8K8vbvv1oYMJiIUbuy9bGaA==",
- "requires": {
- "purgecss": "^3.1.3"
- }
- },
- "@headlessui/react": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-0.2.0.tgz",
- "integrity": "sha512-YV+vF+QhTRcspydPdHF3ZXe+FkOiJpRdqMjjFIIX9bSdT2O2T7GurgKQdGgamNUM+B99MZBOTRqxS8Dlh485eg=="
- },
- "@tailwindcss/forms": {
- "version": "0.2.1",
- "resolved": "https://registry.npmjs.org/@tailwindcss/forms/-/forms-0.2.1.tgz",
- "integrity": "sha512-czfvEdY+J2Ogfd6RUSr/ZSUmDxTujr34M++YLnp2cCPC3oJ4kFvFMaRXA6cEXKw7F1hJuapdjXRjsXIEXGgORg==",
- "requires": {
- "mini-svg-data-uri": "^1.2.3"
- }
- },
- "@types/codemirror": {
- "version": "0.0.108",
- "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.108.tgz",
- "integrity": "sha512-3FGFcus0P7C2UOGCNUVENqObEb4SFk+S8Dnxq7K6aIsLVs/vDtlangl3PEO0ykaKXyK56swVF6Nho7VsA44uhw==",
- "dev": true,
- "requires": {
- "@types/tern": "*"
- }
- },
- "@types/estree": {
- "version": "0.0.46",
- "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.46.tgz",
- "integrity": "sha512-laIjwTQaD+5DukBZaygQ79K1Z0jb1bPEMRrkXSLjtCcZm+abyp5YbrqpSLzD42FwWW6gK/aS4NYpJ804nG2brg==",
- "dev": true
- },
- "@types/events": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz",
- "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==",
- "dev": true
- },
- "@types/history": {
- "version": "4.7.8",
- "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.8.tgz",
- "integrity": "sha512-S78QIYirQcUoo6UJZx9CSP0O2ix9IaeAXwQi26Rhr/+mg7qqPy8TzaxHSUut7eGjL8WmLccT7/MXf304WjqHcA==",
- "dev": true
- },
- "@types/luxon": {
- "version": "1.25.1",
- "resolved": "https://registry.npmjs.org/@types/luxon/-/luxon-1.25.1.tgz",
- "integrity": "sha512-enkMO4WJcbdkhK1eZrItF616buau02wtrSN+DDt9Qj9U23boSAXNJm0fMlgwpTDaRHq3S0D/SPIRbxy4YxBjiA==",
- "dev": true
- },
- "@types/node": {
- "version": "14.14.25",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.25.tgz",
- "integrity": "sha512-EPpXLOVqDvisVxtlbvzfyqSsFeQxltFbluZNRndIb8tr9KiBnYNLzrc1N3pyKUCww2RNrfHDViqDWWE1LCJQtQ==",
- "dev": true
- },
- "@types/prop-types": {
- "version": "15.7.3",
- "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.3.tgz",
- "integrity": "sha512-KfRL3PuHmqQLOG+2tGpRO26Ctg+Cq1E01D2DMriKEATHgWLfeNDmq9e29Q9WIky0dQ3NPkd1mzYH8Lm936Z9qw==",
- "dev": true
- },
- "@types/react": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.1.tgz",
- "integrity": "sha512-w8t9f53B2ei4jeOqf/gxtc2Sswnc3LBK5s0DyJcg5xd10tMHXts2N31cKjWfH9IC/JvEPa/YF1U4YeP1t4R6HQ==",
- "dev": true,
- "requires": {
- "@types/prop-types": "*",
- "csstype": "^3.0.2"
- }
- },
- "@types/react-dom": {
- "version": "17.0.0",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.0.tgz",
- "integrity": "sha512-lUqY7OlkF/RbNtD5nIq7ot8NquXrdFrjSOR6+w9a9RFQevGi1oZO1dcJbXMeONAPKtZ2UrZOEJ5UOCVsxbLk/g==",
- "dev": true,
- "requires": {
- "@types/react": "*"
- }
- },
- "@types/react-router": {
- "version": "5.1.11",
- "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.11.tgz",
- "integrity": "sha512-ofHbZMlp0Y2baOHgsWBQ4K3AttxY61bDMkwTiBOkPg7U6C/3UwwB5WaIx28JmSVi/eX3uFEMRo61BV22fDQIvg==",
- "dev": true,
- "requires": {
- "@types/history": "*",
- "@types/react": "*"
- }
- },
- "@types/react-router-dom": {
- "version": "5.1.7",
- "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.1.7.tgz",
- "integrity": "sha512-D5mHD6TbdV/DNHYsnwBTv+y73ei+mMjrkGrla86HthE4/PVvL1J94Bu3qABU+COXzpL23T1EZapVVpwHuBXiUg==",
- "dev": true,
- "requires": {
- "@types/history": "*",
- "@types/react": "*",
- "@types/react-router": "*"
- }
- },
- "@types/tern": {
- "version": "0.23.3",
- "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.3.tgz",
- "integrity": "sha512-imDtS4TAoTcXk0g7u4kkWqedB3E4qpjXzCpD2LU5M5NAXHzCDsypyvXSaG7mM8DKYkCRa7tFp4tS/lp/Wo7Q3w==",
- "dev": true,
- "requires": {
- "@types/estree": "*"
- }
- },
- "@vitejs/plugin-react-refresh": {
- "version": "1.2.2",
- "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-refresh/-/plugin-react-refresh-1.2.2.tgz",
- "integrity": "sha512-MEVSqncF/u1nvfeZsBJtPc3pLZWccN77CjY0itW7/Vji5BMmttW25a1kjSmooE+4JK4kaF3ElwF3LbV2kiVZWw==",
- "dev": true,
- "requires": {
- "@babel/core": "^7.12.10",
- "@babel/plugin-syntax-import-meta": "^7.10.4",
- "react-refresh": "^0.9.0"
- }
- },
- "acorn": {
- "version": "7.4.1",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
- "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A=="
- },
- "acorn-node": {
- "version": "1.8.2",
- "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz",
- "integrity": "sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==",
- "requires": {
- "acorn": "^7.0.0",
- "acorn-walk": "^7.0.0",
- "xtend": "^4.0.2"
- }
- },
- "acorn-walk": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz",
- "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA=="
- },
- "ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "requires": {
- "color-convert": "^1.9.0"
- }
- },
- "at-least-node": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
- "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg=="
- },
- "autoprefixer": {
- "version": "10.2.4",
- "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.2.4.tgz",
- "integrity": "sha512-DCCdUQiMD+P/as8m3XkeTUkUKuuRqLGcwD0nll7wevhqoJfMRpJlkFd1+MQh1pvupjiQuip42lc/VFvfUTMSKw==",
- "dev": true,
- "requires": {
- "browserslist": "^4.16.1",
- "caniuse-lite": "^1.0.30001181",
- "colorette": "^1.2.1",
- "fraction.js": "^4.0.13",
- "normalize-range": "^0.1.2",
- "postcss-value-parser": "^4.1.0"
- }
- },
- "balanced-match": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
- "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
- },
- "brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
- "requires": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
- "browserslist": {
- "version": "4.16.3",
- "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.3.tgz",
- "integrity": "sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw==",
- "dev": true,
- "requires": {
- "caniuse-lite": "^1.0.30001181",
- "colorette": "^1.2.1",
- "electron-to-chromium": "^1.3.649",
- "escalade": "^3.1.1",
- "node-releases": "^1.1.70"
- }
- },
- "bytes": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
- "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
- },
- "camelcase-css": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
- "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA=="
- },
- "caniuse-lite": {
- "version": "1.0.30001185",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001185.tgz",
- "integrity": "sha512-Fpi4kVNtNvJ15H0F6vwmXtb3tukv3Zg3qhKkOGUq7KJ1J6b9kf4dnNgtEAFXhRsJo0gNj9W60+wBvn0JcTvdTg==",
- "dev": true
- },
- "chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "requires": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- }
- },
- "codemirror": {
- "version": "5.59.2",
- "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.59.2.tgz",
- "integrity": "sha512-/D5PcsKyzthtSy2NNKCyJi3b+htRkoKv3idswR/tR6UAvMNKA7SrmyZy6fOONJxSRs1JlUWEDAbxqfdArbK8iA=="
- },
- "color": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz",
- "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==",
- "requires": {
- "color-convert": "^1.9.1",
- "color-string": "^1.5.4"
- }
- },
- "color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "requires": {
- "color-name": "1.1.3"
- }
- },
- "color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
- },
- "color-string": {
- "version": "1.5.4",
- "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.4.tgz",
- "integrity": "sha512-57yF5yt8Xa3czSEW1jfQDE79Idk0+AkN/4KWad6tbdxUmAs3MvjxlWSWD4deYytcRfoZ9nhKyFl1kj5tBvidbw==",
- "requires": {
- "color-name": "^1.0.0",
- "simple-swizzle": "^0.2.2"
- }
- },
- "colorette": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.1.tgz",
- "integrity": "sha512-puCDz0CzydiSYOrnXpz/PKd69zRrribezjtE9yd4zvytoRc8+RY/KJPvtPFKZS3E3wP6neGyMe0vOTlHO5L3Pw=="
- },
- "commander": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
- "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA=="
- },
- "concat-map": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
- "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
- },
- "convert-source-map": {
- "version": "1.7.0",
- "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
- "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
- "dev": true,
- "requires": {
- "safe-buffer": "~5.1.1"
- }
- },
- "css-unit-converter": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.2.tgz",
- "integrity": "sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA=="
- },
- "cssesc": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
- "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="
- },
- "csstype": {
- "version": "3.0.6",
- "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.6.tgz",
- "integrity": "sha512-+ZAmfyWMT7TiIlzdqJgjMb7S4f1beorDbWbsocyK4RaiqA5RTX3K14bnBWmmA9QEM0gRdsjyyrEmcyga8Zsxmw==",
- "dev": true
- },
- "debug": {
- "version": "4.3.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
- "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
- "dev": true,
- "requires": {
- "ms": "2.1.2"
- }
- },
- "defined": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz",
- "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM="
- },
- "detective": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/detective/-/detective-5.2.0.tgz",
- "integrity": "sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg==",
- "requires": {
- "acorn-node": "^1.6.1",
- "defined": "^1.0.0",
- "minimist": "^1.1.1"
- }
- },
- "didyoumean": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.1.tgz",
- "integrity": "sha1-6S7f2tplN9SE1zwBcv0eugxJdv8="
- },
- "electron-to-chromium": {
- "version": "1.3.657",
- "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.657.tgz",
- "integrity": "sha512-/9ROOyvEflEbaZFUeGofD+Tqs/WynbSTbNgNF+/TJJxH1ePD/e6VjZlDJpW3FFFd3nj5l3Hd8ki2vRwy+gyRFw==",
- "dev": true
- },
- "esbuild": {
- "version": "0.8.42",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.8.42.tgz",
- "integrity": "sha512-zUtj5RMqROCCCH0vV/a7cd8YQg8I0GWBhV3A3PklWRT+oM/YwVbnrtFnITzE1otGdnXplWHWdZ4OcYiV0PN+JQ==",
- "dev": true
- },
- "escalade": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
- "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
- "dev": true
- },
- "escape-string-regexp": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
- "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
- },
- "events": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/events/-/events-3.2.0.tgz",
- "integrity": "sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg=="
- },
- "fraction.js": {
- "version": "4.0.13",
- "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.0.13.tgz",
- "integrity": "sha512-E1fz2Xs9ltlUp+qbiyx9wmt2n9dRzPsS11Jtdb8D2o+cC7wr9xkkKsVKJuBX0ST+LVS+LhLO+SbLJNtfWcJvXA==",
- "dev": true
- },
- "fs-extra": {
- "version": "9.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
- "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
- "requires": {
- "at-least-node": "^1.0.0",
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- }
- },
- "fs.realpath": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
- "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
- },
- "fsevents": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz",
- "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==",
- "dev": true,
- "optional": true
- },
- "function-bind": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
- "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
- },
- "gensync": {
- "version": "1.0.0-beta.2",
- "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
- "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
- "dev": true
- },
- "glob": {
- "version": "7.1.6",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
- "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
- "requires": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- }
- },
- "globals": {
- "version": "11.12.0",
- "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
- "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
- "dev": true
- },
- "graceful-fs": {
- "version": "4.2.5",
- "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.5.tgz",
- "integrity": "sha512-kBBSQbz2K0Nyn+31j/w36fUfxkBW9/gfwRWdUY1ULReH3iokVJgddZAFcD1D0xlgTmFxJCbUkUclAlc6/IDJkw=="
- },
- "has": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
- "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
- "requires": {
- "function-bind": "^1.1.1"
- }
- },
- "has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0="
- },
- "history": {
- "version": "4.10.1",
- "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
- "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
- "requires": {
- "@babel/runtime": "^7.1.2",
- "loose-envify": "^1.2.0",
- "resolve-pathname": "^3.0.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0",
- "value-equal": "^1.0.1"
- }
- },
- "hoist-non-react-statics": {
- "version": "3.3.2",
- "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
- "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
- "requires": {
- "react-is": "^16.7.0"
- }
- },
- "html-tags": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz",
- "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg=="
- },
- "indexes-of": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz",
- "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc="
- },
- "inflight": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
- "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
- "requires": {
- "once": "^1.3.0",
- "wrappy": "1"
- }
- },
- "inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
- },
- "is-arrayish": {
- "version": "0.3.2",
- "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
- "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
- },
- "is-core-module": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz",
- "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==",
- "requires": {
- "has": "^1.0.3"
- }
- },
- "isarray": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
- "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8="
- },
- "js-tokens": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
- "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
- },
- "jsesc": {
- "version": "2.5.2",
- "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
- "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
- "dev": true
- },
- "json-rpc-protocol": {
- "version": "0.13.1",
- "resolved": "https://registry.npmjs.org/json-rpc-protocol/-/json-rpc-protocol-0.13.1.tgz",
- "integrity": "sha512-gdb0TnNf0ITRjLm0QGNODgK1E6ORLhe+6L+RV8owg4X3d6x8jAfyHQC+xMG4T/qU2SPaNLpav29QRLC+3oF6gg==",
- "requires": {
- "make-error": "^1.3.0"
- }
- },
- "json5": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz",
- "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==",
- "dev": true,
- "requires": {
- "minimist": "^1.2.5"
- }
- },
- "jsonfile": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
- "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
- "requires": {
- "graceful-fs": "^4.1.6",
- "universalify": "^2.0.0"
- }
- },
- "lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA=="
- },
- "lodash.toarray": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/lodash.toarray/-/lodash.toarray-4.4.0.tgz",
- "integrity": "sha1-JMS/zWsvuji/0FlNsRedjptlZWE="
- },
- "loose-envify": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
- "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
- "requires": {
- "js-tokens": "^3.0.0 || ^4.0.0"
- }
- },
- "luxon": {
- "version": "1.25.0",
- "resolved": "https://registry.npmjs.org/luxon/-/luxon-1.25.0.tgz",
- "integrity": "sha512-hEgLurSH8kQRjY6i4YLey+mcKVAWXbDNlZRmM6AgWDJ1cY3atl8Ztf5wEY7VBReFbmGnwQPz7KYJblL8B2k0jQ=="
- },
- "make-error": {
- "version": "1.3.6",
- "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
- "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw=="
- },
- "mini-create-react-context": {
- "version": "0.4.1",
- "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz",
- "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==",
- "requires": {
- "@babel/runtime": "^7.12.1",
- "tiny-warning": "^1.0.3"
- }
- },
- "mini-svg-data-uri": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/mini-svg-data-uri/-/mini-svg-data-uri-1.2.3.tgz",
- "integrity": "sha512-zd6KCAyXgmq6FV1mR10oKXYtvmA9vRoB6xPSTUJTbFApCtkefDnYueVR1gkof3KcdLZo1Y8mjF2DFmQMIxsHNQ=="
- },
- "minimatch": {
- "version": "3.0.4",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
- "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
- "requires": {
- "brace-expansion": "^1.1.7"
- }
- },
- "minimist": {
- "version": "1.2.5",
- "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
- "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
- },
- "modern-normalize": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/modern-normalize/-/modern-normalize-1.0.0.tgz",
- "integrity": "sha512-1lM+BMLGuDfsdwf3rsgBSrxJwAZHFIrQ8YR61xIqdHo0uNKI9M52wNpHSrliZATJp51On6JD0AfRxd4YGSU0lw=="
- },
- "ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
- "dev": true
- },
- "nanoid": {
- "version": "3.1.20",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz",
- "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw=="
- },
- "node-emoji": {
- "version": "1.10.0",
- "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.10.0.tgz",
- "integrity": "sha512-Yt3384If5H6BYGVHiHwTL+99OzJKHhgp82S8/dktEK73T26BazdgZ4JZh92xSVtGNJvz9UbXdNAc5hcrXV42vw==",
- "requires": {
- "lodash.toarray": "^4.4.0"
- }
- },
- "node-releases": {
- "version": "1.1.70",
- "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.70.tgz",
- "integrity": "sha512-Slf2s69+2/uAD79pVVQo8uSiC34+g8GWY8UH2Qtqv34ZfhYrxpYpfzs9Js9d6O0mbDmALuxaTlplnBTnSELcrw==",
- "dev": true
- },
- "normalize-range": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
- "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=",
- "dev": true
- },
- "object-assign": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
- "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
- },
- "object-hash": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.1.1.tgz",
- "integrity": "sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ=="
- },
- "once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
- "requires": {
- "wrappy": "1"
- }
- },
- "path-is-absolute": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
- "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
- },
- "path-parse": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
- "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
- },
- "path-to-regexp": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
- "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
- "requires": {
- "isarray": "0.0.1"
- }
- },
- "postcss": {
- "version": "8.2.5",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.2.5.tgz",
- "integrity": "sha512-wMcb7BpDcm3gxQOQx46NDNT36Kk0Ao6PJLLI2ed5vehbbbxCEuslSQzbQ2sfSKy+gkYxhWcGWSeaK+gwm4KIZg==",
- "requires": {
- "colorette": "^1.2.1",
- "nanoid": "^3.1.20",
- "source-map": "^0.6.1"
- },
- "dependencies": {
- "source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
- }
- }
- },
- "postcss-functions": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/postcss-functions/-/postcss-functions-3.0.0.tgz",
- "integrity": "sha1-DpTQFERwCkgd4g3k1V+yZAVkJQ4=",
- "requires": {
- "glob": "^7.1.2",
- "object-assign": "^4.1.1",
- "postcss": "^6.0.9",
- "postcss-value-parser": "^3.3.0"
- },
- "dependencies": {
- "postcss": {
- "version": "6.0.23",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz",
- "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==",
- "requires": {
- "chalk": "^2.4.1",
- "source-map": "^0.6.1",
- "supports-color": "^5.4.0"
- }
- },
- "postcss-value-parser": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
- "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
- },
- "source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
- }
- }
- },
- "postcss-js": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-3.0.3.tgz",
- "integrity": "sha512-gWnoWQXKFw65Hk/mi2+WTQTHdPD5UJdDXZmX073EY/B3BWnYjO4F4t0VneTCnCGQ5E5GsCdMkzPaTXwl3r5dJw==",
- "requires": {
- "camelcase-css": "^2.0.1",
- "postcss": "^8.1.6"
- }
- },
- "postcss-nested": {
- "version": "5.0.3",
- "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-5.0.3.tgz",
- "integrity": "sha512-R2LHPw+u5hFfDgJG748KpGbJyTv7Yr33/2tIMWxquYuHTd9EXu27PYnKi7BxMXLtzKC0a0WVsqHtd7qIluQu/g==",
- "requires": {
- "postcss-selector-parser": "^6.0.4"
- }
- },
- "postcss-selector-parser": {
- "version": "6.0.4",
- "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz",
- "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==",
- "requires": {
- "cssesc": "^3.0.0",
- "indexes-of": "^1.0.1",
- "uniq": "^1.0.1",
- "util-deprecate": "^1.0.2"
- }
- },
- "postcss-value-parser": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz",
- "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ=="
- },
- "pretty-hrtime": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz",
- "integrity": "sha1-t+PqQkNaTJsnWdmeDyAesZWALuE="
- },
- "prop-types": {
- "version": "15.7.2",
- "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
- "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
- "requires": {
- "loose-envify": "^1.4.0",
- "object-assign": "^4.1.1",
- "react-is": "^16.8.1"
- }
- },
- "purgecss": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/purgecss/-/purgecss-3.1.3.tgz",
- "integrity": "sha512-hRSLN9mguJ2lzlIQtW4qmPS2kh6oMnA9RxdIYK8sz18QYqd6ePp4GNDl18oWHA1f2v2NEQIh51CO8s/E3YGckQ==",
- "requires": {
- "commander": "^6.0.0",
- "glob": "^7.0.0",
- "postcss": "^8.2.1",
- "postcss-selector-parser": "^6.0.2"
- }
- },
- "react": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/react/-/react-17.0.1.tgz",
- "integrity": "sha512-lG9c9UuMHdcAexXtigOZLX8exLWkW0Ku29qPRU8uhF2R9BN96dLCt0psvzPLlHc5OWkgymP3qwTRgbnw5BKx3w==",
- "requires": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1"
- }
- },
- "react-dom": {
- "version": "17.0.1",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.1.tgz",
- "integrity": "sha512-6eV150oJZ9U2t9svnsspTMrWNyHc6chX0KzDeAOXftRa8bNeOKTTfCJ7KorIwenkHd2xqVTBTCZd79yk/lx/Ug==",
- "requires": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1",
- "scheduler": "^0.20.1"
- }
- },
- "react-is": {
- "version": "16.13.1",
- "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
- "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
- },
- "react-refresh": {
- "version": "0.9.0",
- "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.9.0.tgz",
- "integrity": "sha512-Gvzk7OZpiqKSkxsQvO/mbTN1poglhmAV7gR/DdIrRrSMXraRQQlfikRJOr3Nb9GTMPC5kof948Zy6jJZIFtDvQ==",
- "dev": true
- },
- "react-router": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.2.0.tgz",
- "integrity": "sha512-smz1DUuFHRKdcJC0jobGo8cVbhO3x50tCL4icacOlcwDOEQPq4TMqwx3sY1TP+DvtTgz4nm3thuo7A+BK2U0Dw==",
- "requires": {
- "@babel/runtime": "^7.1.2",
- "history": "^4.9.0",
- "hoist-non-react-statics": "^3.1.0",
- "loose-envify": "^1.3.1",
- "mini-create-react-context": "^0.4.0",
- "path-to-regexp": "^1.7.0",
- "prop-types": "^15.6.2",
- "react-is": "^16.6.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0"
- }
- },
- "react-router-dom": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.2.0.tgz",
- "integrity": "sha512-gxAmfylo2QUjcwxI63RhQ5G85Qqt4voZpUXSEqCwykV0baaOTQDR1f0PmY8AELqIyVc0NEZUj0Gov5lNGcXgsA==",
- "requires": {
- "@babel/runtime": "^7.1.2",
- "history": "^4.9.0",
- "loose-envify": "^1.3.1",
- "prop-types": "^15.6.2",
- "react-router": "5.2.0",
- "tiny-invariant": "^1.0.2",
- "tiny-warning": "^1.0.0"
- }
- },
- "reduce-css-calc": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz",
- "integrity": "sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==",
- "requires": {
- "css-unit-converter": "^1.1.1",
- "postcss-value-parser": "^3.3.0"
- },
- "dependencies": {
- "postcss-value-parser": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
- "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
- }
- }
- },
- "regenerator-runtime": {
- "version": "0.13.7",
- "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz",
- "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew=="
- },
- "resolve": {
- "version": "1.19.0",
- "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.19.0.tgz",
- "integrity": "sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg==",
- "requires": {
- "is-core-module": "^2.1.0",
- "path-parse": "^1.0.6"
- }
- },
- "resolve-pathname": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
- "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng=="
- },
- "rollup": {
- "version": "2.38.5",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.38.5.tgz",
- "integrity": "sha512-VoWt8DysFGDVRGWuHTqZzT02J0ASgjVq/hPs9QcBOGMd7B+jfTr/iqMVEyOi901rE3xq+Deq66GzIT1yt7sGwQ==",
- "dev": true,
- "requires": {
- "fsevents": "~2.3.1"
- },
- "dependencies": {
- "fsevents": {
- "version": "2.3.2",
- "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
- "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
- "dev": true,
- "optional": true
- }
- }
- },
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
- "dev": true
- },
- "scheduler": {
- "version": "0.20.1",
- "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.1.tgz",
- "integrity": "sha512-LKTe+2xNJBNxu/QhHvDR14wUXHRQbVY5ZOYpOGWRzhydZUqrLb2JBvLPY7cAqFmqrWuDED0Mjk7013SZiOz6Bw==",
- "requires": {
- "loose-envify": "^1.1.0",
- "object-assign": "^4.1.1"
- }
- },
- "semver": {
- "version": "5.7.1",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
- "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
- "dev": true
- },
- "simple-swizzle": {
- "version": "0.2.2",
- "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
- "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=",
- "requires": {
- "is-arrayish": "^0.3.1"
- }
- },
- "source-map": {
- "version": "0.5.7",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
- "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
- "dev": true
- },
- "supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
- "requires": {
- "has-flag": "^3.0.0"
- }
- },
- "tailwindcss": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-2.0.2.tgz",
- "integrity": "sha512-nO9JRE1pO7SF9RnYAl6g7uzeHdrmKAFqNjT9NtZUfxqimJZAOOLOEyIEUiMq12+xIc7mC2Ey3Vf90XjHpWKfbw==",
- "requires": {
- "@fullhuman/postcss-purgecss": "^3.0.0",
- "bytes": "^3.0.0",
- "chalk": "^4.1.0",
- "color": "^3.1.3",
- "detective": "^5.2.0",
- "didyoumean": "^1.2.1",
- "fs-extra": "^9.0.1",
- "html-tags": "^3.1.0",
- "lodash": "^4.17.20",
- "modern-normalize": "^1.0.0",
- "node-emoji": "^1.8.1",
- "object-hash": "^2.0.3",
- "postcss-functions": "^3",
- "postcss-js": "^3.0.3",
- "postcss-nested": "^5.0.1",
- "postcss-selector-parser": "^6.0.4",
- "postcss-value-parser": "^4.1.0",
- "pretty-hrtime": "^1.0.3",
- "reduce-css-calc": "^2.1.6",
- "resolve": "^1.19.0"
- },
- "dependencies": {
- "ansi-styles": {
- "version": "4.3.0",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
- "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
- "requires": {
- "color-convert": "^2.0.1"
- }
- },
- "chalk": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
- "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
- "requires": {
- "ansi-styles": "^4.1.0",
- "supports-color": "^7.1.0"
- }
- },
- "color-convert": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
- "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
- "requires": {
- "color-name": "~1.1.4"
- }
- },
- "color-name": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
- "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
- },
- "has-flag": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
- "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="
- },
- "supports-color": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
- "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
- "requires": {
- "has-flag": "^4.0.0"
- }
- }
- }
- },
- "tiny-invariant": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.1.0.tgz",
- "integrity": "sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw=="
- },
- "tiny-warning": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
- "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="
- },
- "to-fast-properties": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
- "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
- "dev": true
- },
- "typescript": {
- "version": "4.1.3",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.1.3.tgz",
- "integrity": "sha512-B3ZIOf1IKeH2ixgHhj6la6xdwR9QrLC5d1VKeCSY4tvkqhF2eqd9O7txNlS0PO3GrBAFIdr3L1ndNwteUbZLYg==",
- "dev": true
- },
- "uniq": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz",
- "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8="
- },
- "universalify": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
- "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
- },
- "util-deprecate": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
- "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
- },
- "value-equal": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
- "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw=="
- },
- "vite": {
- "version": "2.0.0-beta.65",
- "resolved": "https://registry.npmjs.org/vite/-/vite-2.0.0-beta.65.tgz",
- "integrity": "sha512-mdHNTP6fGeb8m8lWAM3UbSPw1+un1lUv0i4MQJcNiK2/P01RHIY02VjQeXBv3NemkExkgLji88LN9ySFMUQpIw==",
- "dev": true,
- "requires": {
- "esbuild": "^0.8.34",
- "fsevents": "~2.1.2",
- "postcss": "^8.2.1",
- "resolve": "^1.19.0",
- "rollup": "^2.35.1"
- }
- },
- "wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
- },
- "xtend": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
- "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
- }
- }
-}
diff --git a/cli/daemon/dash/dashapp/package.json b/cli/daemon/dash/dashapp/package.json
deleted file mode 100644
index 872e85d9db..0000000000
--- a/cli/daemon/dash/dashapp/package.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "name": "dashapp",
- "version": "0.0.0",
- "scripts": {
- "dev": "vite",
- "build": "tsc && vite build",
- "serve": "vite preview"
- },
- "dependencies": {
- "@headlessui/react": "^0.2.0",
- "@tailwindcss/forms": "^0.2.1",
- "codemirror": "^5.59.2",
- "events": "^3.2.0",
- "json-rpc-protocol": "^0.13.1",
- "luxon": "^1.25.0",
- "react": "^17.0.0",
- "react-dom": "^17.0.0",
- "react-router-dom": "^5.2.0",
- "tailwindcss": "^2.0.2"
- },
- "devDependencies": {
- "@types/codemirror": "^0.0.108",
- "@types/events": "^3.0.0",
- "@types/luxon": "^1.25.1",
- "@types/node": "^14.14.25",
- "@types/react": "^17.0.0",
- "@types/react-dom": "^17.0.0",
- "@types/react-router-dom": "^5.1.7",
- "@vitejs/plugin-react-refresh": "^1.1.0",
- "autoprefixer": "^10.2.4",
- "postcss": "^8.2.5",
- "typescript": "^4.1.2",
- "vite": "^2.0.0-beta.64"
- }
-}
diff --git a/cli/daemon/dash/dashapp/postcss.config.js b/cli/daemon/dash/dashapp/postcss.config.js
deleted file mode 100644
index 33ad091d26..0000000000
--- a/cli/daemon/dash/dashapp/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-module.exports = {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
diff --git a/cli/daemon/dash/dashapp/public/favicon.ico b/cli/daemon/dash/dashapp/public/favicon.ico
deleted file mode 100644
index f8a3d8683e..0000000000
Binary files a/cli/daemon/dash/dashapp/public/favicon.ico and /dev/null differ
diff --git a/cli/daemon/dash/dashapp/src/App.tsx b/cli/daemon/dash/dashapp/src/App.tsx
deleted file mode 100644
index 6f449b0844..0000000000
--- a/cli/daemon/dash/dashapp/src/App.tsx
+++ /dev/null
@@ -1,42 +0,0 @@
-import React, { useEffect, useRef, useState } from 'react'
-import {
- BrowserRouter as Router,
- Route, Switch
-} from "react-router-dom";
-import Client from '~lib/client/client';
-import JSONRPCConn from '~lib/client/jsonrpc';
-import AppList from '~p/AppList';
-import AppHome from '~p/AppHome';
-import { ConnContext } from '~lib/ctx';
-import AppAPI from '~p/AppAPI';
-
-function App() {
- const [conn, setConn] = useState(undefined)
- const [err, setErr] = useState(undefined)
- const mounted = useRef(true)
-
- useEffect(() => {
- const client = new Client()
- client.base.jsonrpc("/__encore").then(
- conn => mounted.current && setConn(conn)
- ).catch(err => mounted.current && setErr(err))
- return () => { conn?.close(); mounted.current = false }
- }, [])
-
- if (err) return Error: {err.message}
- if (!conn) return Loading...
-
- return (
-
-
-
-
-
-
-
-
-
- )
-}
-
-export default App
diff --git a/cli/daemon/dash/dashapp/src/components/Button.tsx b/cli/daemon/dash/dashapp/src/components/Button.tsx
deleted file mode 100644
index 9369e961eb..0000000000
--- a/cli/daemon/dash/dashapp/src/components/Button.tsx
+++ /dev/null
@@ -1,49 +0,0 @@
-import React, { FunctionComponent, MouseEventHandler } from "react";
-
-export interface Props {
- theme: "purple" | "purple:secondary" | "purple:border" | "white" | "red" | "red:secondary" | "gray" | "gray:border";
- size: "xxs" | "xs" | "sm" | "md" | "lg" | "xl";
- cls?: string;
- disabled?: boolean;
- onClick?: MouseEventHandler;
- type?: "button" | "submit";
-}
-
-const sizeClasses = {
- "xxs": "px-1 py-0.5 text-xs leading-4 rounded",
- "xs": "px-2.5 py-1.5 text-xs leading-4 rounded",
- "sm": "px-3 py-2 text-sm leading-4 rounded-md",
- "md": "px-4 py-2 text-sm leading-5 rounded-md",
- "lg": "px-4 py-2 text-base leading-6 rounded-md",
- "xl": "px-6 py-3 text-base leading-6 rounded-md",
-}
-
-const enabledClasses = {
- "purple": "border-transparent text-white bg-purple-600 hover:bg-purple-500 focus:outline-none focus:border-purple-700 focus:shadow-outline-purple active:bg-purple-700",
- "purple:secondary": "border-transparent text-purple-700 bg-purple-100 hover:bg-purple-50 focus:outline-none focus:border-purple-300 focus:shadow-outline-purple active:bg-purple-200",
- "purple:border": "border-purple-600 text-purple-700 bg-white hover:text-purple-500 hover:bg-purple-50 focus:outline-none focus:border-purple-500 focus:shadow-outline-purple active:text-purple-800 active:bg-gray-50",
- "white": "border-gray-300 text-gray-700 bg-white hover:text-gray-500 focus:outline-none focus:border-purple-300 focus:shadow-outline-purple active:text-gray-800 active:bg-gray-50",
- "red": "border-transparent bg-red-600 text-white hover:bg-red-500 focus:outline-none focus:border-red-700 focus:shadow-outline-red active:bg-red-700",
- "red:secondary": "border-red-600 text-red-700 bg-white hover:text-white hover:bg-red-600 focus:outline-none focus:border-red-500 focus:shadow-outline-red active:text-white active:bg-red-600",
- "gray": "border-transparent text-white bg-gray-700 hover:bg-gray-600 focus:outline-none active:bg-gray-800",
- "gray:border": "border-gray-700 text-gray-800 bg-white hover:text-gray-600 hover:bg-gray-50 focus:outline-none focus:border-gray-600 active:text-gray-800 active:bg-gray-50",
-}
-
-const disabledClasses = {
- "purple": "border-transparent text-white bg-purple-500 opacity-50 cursor-not-allowed focus:outline-none",
- "purple:secondary": "border-transparent text-purple-700 bg-purple-100 opacity-50 cursor-not-allowed focus:outline-none",
- "purple:border": "border-gray-200 text-gray-600 bg-white opacity-50 cursor-not-allowed focus:outline-none",
- "white": "border-gray-200 text-gray-600 bg-white opacity-50 cursor-not-allowed focus:outline-none",
- "red": "border-transparent text-white bg-red-500 opacity-50 cursor-not-allowed focus:outline-none",
- "red:secondary": "border-gray-200 text-red-800 bg-white opacity-50 cursor-not-allowed focus:outline-none",
- "gray": "border-transparent text-white bg-gray-500 opacity-50 cursor-not-allowed focus:outline-none",
- "gray:border": "border-gray-200 text-gray-600 bg-white opacity-50 cursor-not-allowed focus:outline-none",
-}
-
-const Button: FunctionComponent = (props) => {
- const baseCls = "inline-flex justify-center items-center border font-medium transition duration-150 ease-in-out"
- const cls = `${baseCls} ${props.disabled ? disabledClasses[props.theme] : enabledClasses[props.theme]} ${sizeClasses[props.size]} ${props.cls || ""}`
- return {props.children}
-}
-
-export default Button
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/Input.tsx b/cli/daemon/dash/dashapp/src/components/Input.tsx
deleted file mode 100644
index 4e5a050b7a..0000000000
--- a/cli/daemon/dash/dashapp/src/components/Input.tsx
+++ /dev/null
@@ -1,318 +0,0 @@
-import React, { FunctionComponent, useRef, useState, useEffect } from 'react'
-
-type Type = "text" | "number" | "email" | "password";
-
-export interface InputProps {
- id: string;
- value: string;
- type?: Type;
- onChange?: (value: string) => void;
-
- required?: boolean;
- label?: string;
- desc?: string;
- htmlDesc?: string;
- placeholder?: string;
- error?: string;
- prefix?: string;
- cls?: string;
- disabled?: boolean;
-}
-
-
-const Input: FunctionComponent = (props: InputProps) => {
- const typ = props.type || "text"
- const onChange = (event: React.ChangeEvent) => {
- if (props.onChange) {
- props.onChange(event.target.value)
- }
- }
-
- const extraCls = props.disabled ? "bg-gray-100 text-gray-600" : ""
-
- return (
-
- {props.label &&
-
{props.label}
- }
-
- {props.error ? (
- <>
- {props.prefix ? (
-
-
- {props.prefix}
-
-
-
-
- ) : (
-
- )}
-
{props.error}
- >
- ) : (
- <>
- {props.prefix ? (
-
-
- {props.prefix}
-
-
-
- ) : (
-
-
-
- )}
-
- {props.desc ? (
-
{props.desc}
- ) : props.htmlDesc ? (
-
- ) : null}
- >
- )}
-
- )
-}
-
-export interface TextAreaProps {
- id: string;
- value: string;
- onChange?: (value: string) => void;
-
- rows?: number;
- required?: boolean;
- label?: string;
- desc?: string;
- htmlDesc?: string;
- placeholder?: string;
- error?: string;
- cls?: string;
- disabled?: boolean;
-}
-
-
-export const TextArea: FunctionComponent = (props) => {
- const onChange = (event: React.ChangeEvent) => {
- if (props.onChange) {
- props.onChange(event.target.value)
- }
- }
-
- const extraCls = props.disabled ? "bg-gray-100 text-gray-600" : ""
-
- return (
-
- {props.label &&
-
{props.label}
- }
-
- {props.error ? (
- <>
-
-
-
-
{props.error}
- >
- ) : (
- <>
-
-
-
-
- {props.desc ? (
-
{props.desc}
- ) : props.htmlDesc ? (
-
- ) : null}
- >
- )}
-
- )
-}
-
-interface RangeProps {
- id?: string;
- value: number;
- min: number;
- max: number;
- onChange: (value: number, frac: number) => void;
-
- minLabel?: string;
- maxLabel?: string;
- valueLabel?: string;
- title?: string;
-}
-
-export const Range: FunctionComponent = (props) => {
- const filled = (((props.value - props.min) / (props.max - props.min)) * 100) + "%"
- const slider = useRef(null)
- const [dragging, setDragging] = useState(false)
-
- const update = (event: {pageX: number}) => {
- const rect = slider.current?.getBoundingClientRect()
- if (rect) {
- let frac = (event.pageX - rect.left) / rect.width
- frac = Math.max(Math.min(frac, 1), 0)
- const newValue = props.min + Math.round(frac * (props.max - props.min))
- props.onChange(newValue, frac)
- }
- }
-
- const onMouseUp = (event: MouseEvent) => {
- if (event.button !== 0 || !dragging) {
- return
- }
-
- const rect = slider.current?.getBoundingClientRect()
- const x = event.pageX
- const y = event.pageY
- if (rect && rect.left <= x && x <= rect.right && rect.top <= y && y <= rect.bottom) {
- update(event)
- }
-
- setDragging(false)
- event.stopPropagation()
- event.preventDefault()
- }
-
- const onMouseDown = (event: React.MouseEvent) => {
- if (event.button !== 0) {
- return
- }
- setDragging(true)
- event.stopPropagation()
- event.preventDefault()
- }
-
- const onClick = (event: React.MouseEvent) => {
- if (event.button !== 0) {
- return
- }
- update(event)
- event.stopPropagation()
- event.preventDefault()
- }
-
- useEffect(() => {
- document.addEventListener("mouseup", onMouseUp)
- return () => document.removeEventListener("mouseup", onMouseUp)
- })
-
- const onMouseMove = (event: React.MouseEvent) => {
- if (dragging && props.onChange) {
- const rect = slider.current?.getBoundingClientRect()
- if (rect) {
- let frac = (event.pageX - rect.left) / rect.width
- frac = Math.max(Math.min(frac, 1), 0)
- const newValue = props.min + Math.round(frac * (props.max - props.min))
- props.onChange(newValue, frac)
- }
- }
- event.stopPropagation()
- event.preventDefault()
- }
-
- return (
-
-
-
-
-
-
-
-
-
-
-
{props.valueLabel ?? props.value}
-
-
-
-
-
-
-
-
{props.minLabel ?? props.min}
-
{props.valueLabel ?? props.value}
-
{props.maxLabel ?? props.max}
-
-
-
- )
-}
-
-interface CounterProps {
- id?: string;
- label?: string;
- min?: number;
- max?: number;
- value: number;
- onChange: (val: number) => void;
- increment?: (val: number) => number;
- decrement?: (val: number) => number;
-}
-
-export const Counter: FunctionComponent = (props) => {
- const inc = props.increment ?? ((val: number) => val+1)
- const dec = props.decrement ?? ((val: number) => val-1)
- const update = (val: number) => {
- if (props.max && val > props.max) {
- val = props.max
- }
- if (props.min && val < props.min) {
- val = props.min
- }
- props.onChange(val)
- }
-
- return (
-
-
- {props.label &&
-
{props.label}
- }
-
- update(dec(props.value))} className="bg-gray-300 text-gray-600 hover:text-gray-700 hover:bg-gray-400 h-full w-20 rounded-l cursor-pointer outline-none">
- −
-
- update(parseInt(e.target.value))}/>
- update(inc(props.value))} className="bg-gray-300 text-gray-600 hover:text-gray-700 hover:bg-gray-400 h-full w-20 rounded-r cursor-pointer">
- +
-
-
-
- )
-}
-
-export default Input
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/Modal.tsx b/cli/daemon/dash/dashapp/src/components/Modal.tsx
deleted file mode 100644
index 1efff435e8..0000000000
--- a/cli/daemon/dash/dashapp/src/components/Modal.tsx
+++ /dev/null
@@ -1,97 +0,0 @@
-import React from "react";
-import ReactDOM from "react-dom";
-import { Transition } from "@headlessui/react"
-
-interface Props {
- show: boolean;
- close?: () => void;
- width?: string;
-}
-
-export class Modal extends React.Component {
- el?: HTMLDivElement;
- root?: HTMLElement;
- bgRef: React.RefObject;
-
- constructor(props: Props) {
- super(props)
- this.handleClick = this.handleClick.bind(this)
- this.handleKeyPress = this.handleKeyPress.bind(this)
- this.bgRef = React.createRef()
- }
-
- componentDidMount() {
- this.el = document.createElement("div");
- const root = document.getElementById("modal-root")
- if (root === null) {
- throw new Error("could not find #modal-root element")
- }
- this.root = root
- this.root.appendChild(this.el);
- window.addEventListener("keyup", this.handleKeyPress)
- }
-
- componentWillUnmount() {
- window.removeEventListener("keypress", this.handleKeyPress)
- if (this.root && this.el) {
- this.root.removeChild(this.el)
- }
- }
-
- handleClick(ev: React.MouseEvent) {
- ev.stopPropagation()
- if (ev.target === this.bgRef.current && this.props.close) {
- this.props.close()
- }
- }
-
- handleKeyPress(e: KeyboardEvent) {
- if(e.key === "Escape" && this.props.close) {
- this.props.close()
- }
- }
-
- render() {
- if (!this.el) {
- return null
- }
- const width = this.props.width ?? "sm:max-w-lg sm:w-full"
-
- return ReactDOM.createPortal(
- (
-
-
-
-
-
-
-
- {this.props.children}
-
-
-
- ),
- this.el
- );
- }
-}
diff --git a/cli/daemon/dash/dashapp/src/components/Nav.tsx b/cli/daemon/dash/dashapp/src/components/Nav.tsx
deleted file mode 100644
index c7672537b0..0000000000
--- a/cli/daemon/dash/dashapp/src/components/Nav.tsx
+++ /dev/null
@@ -1,142 +0,0 @@
-import React, { FunctionComponent, useState, useEffect } from "react";
-import { Link, useParams, useRouteMatch } from "react-router-dom"
-import { useConn } from "~lib/ctx"
-import logo from "../logo.svg"
-
-const menuItems: {href: string; name: string}[] = [
- {href: "", name: "Requests"},
- {href: "/api", name: "API Docs"},
-]
-
-const Nav: FunctionComponent = (props) => {
- const { appID } = useParams<{appID: string}>()
- const [menuOpen, setMenuOpen] = useState(false)
- const [appsOpen, setAppsOpen] = useState(false)
- const route = useRouteMatch()
-
- return (
-
- {appsOpen &&
- setAppsOpen(false)} />
- }
-
-
-
-
-
-
-
-
- {menuItems.map(it => {
- const as = `/${appID}${it.href}`
- const selected = route.path === ("/:appID"+it.href)
- return (
-
- {it.name}
-
- )
- })}
-
-
-
-
-
- {/* <-- App dropdown --> */}
-
-
-
-
-
setMenuOpen(!menuOpen)} className="inline-flex items-center justify-center p-2 rounded-md text-gray-400 hover:text-white hover:bg-gray-700 focus:outline-none focus:bg-gray-700 focus:text-white">
-
-
-
-
-
-
-
-
-
-
-
-
-
- {menuItems.map(it => {
- const as = `/${appID}${it.href}`
- const selected = false // TODO
- return (
-
- {it.name}
-
- )
- })}
-
-
-
-
- )
-}
-
-export default Nav
-
-interface AppDropdownProps {
- appID: string;
- open: boolean;
- setOpen: (open: boolean) => void;
-}
-
-const AppDropdown: FunctionComponent
= (props): JSX.Element => {
- interface app {
- id: string;
- name: string;
- }
- const [apps, setApps] = useState(undefined)
- const appName = apps?.find(a => a.id === props.appID)?.name
- const conn = useConn()
-
- useEffect(() => {
- conn.request("list-apps").then(apps => setApps(apps as app[]))
- }, [props.open])
-
- return (
- <>
-
-
-
-
-
- {props.open &&
-
-
-
- {apps !== undefined ? (
- <>
-
Running Apps
- {apps.map(app =>
-
props.setOpen(false)}>
-
{app.name}
-
- )}
- >
- ) : (
-
Loading...
- )}
-
-
-
- }
-
- >
- )
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/RPCCaller.tsx b/cli/daemon/dash/dashapp/src/components/api/RPCCaller.tsx
deleted file mode 100644
index 01a5c59031..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/RPCCaller.tsx
+++ /dev/null
@@ -1,352 +0,0 @@
-import { Menu, Transition } from "@headlessui/react";
-import CodeMirror, { EditorConfiguration } from "codemirror";
-import React, { FC } from "react";
-import * as icons from "~c/icons";
-import Input from "~c/Input";
-import { decodeBase64, encodeBase64 } from "~lib/base64";
-import JSONRPCConn from "~lib/client/jsonrpc";
-import { copyToClipboard } from "~lib/clipboard";
-import { APIMeta, RPC, Service } from "./api";
-import CM from "./cm/CM";
-import { BuiltinType, Decl, ListType, MapType, NamedType, StructType, Type } from "./schema";
-
-interface Props {
- conn: JSONRPCConn;
- appID: string;
- md: APIMeta;
- svc: Service;
- rpc: RPC;
- port?: number;
-}
-
-interface State {
- loading: boolean;
- response?: string;
- respErr?: string;
- authToken: string;
-}
-
-export const cfg: EditorConfiguration = {
- theme: "encore",
- mode: "json",
- lineNumbers: false,
- lineWrapping: false,
- indentWithTabs: true,
- indentUnit: 4,
- tabSize: 4,
- autoCloseBrackets: true,
- matchBrackets: true,
- styleActiveLine: false,
-}
-
-export default class RPCCaller extends React.Component {
- cm: React.RefObject;
- docs: Map;
-
- constructor(props: Props) {
- super(props)
- this.cm = React.createRef()
- this.docs = new Map()
- this.state = {loading: false, authToken: ""}
- }
-
- componentDidMount() {
- this.open(this.props.rpc)
- }
-
- componentDidUpdate(prevProps: Props) {
- if (prevProps.svc.name !== this.props.svc.name || prevProps.rpc.name !== this.props.rpc.name) {
- this.open(this.props.rpc)
- }
- }
-
- private open(rpc: RPC) {
- if (rpc.request_schema) {
- let doc = this.docs.get(rpc)
- if (doc === undefined) {
- const js = new JSONRenderer(this.props.md).render(rpc.request_schema!)
- doc = new CodeMirror.Doc(js, {
- name: "javascript",
- json: true
- })
- this.docs.set(rpc, doc)
- }
- this.cm.current?.open(doc)
- }
- this.setState({response: undefined, respErr: undefined})
- }
-
- async send() {
- const rpc = this.props.rpc
- let reqBody = ""
- if (rpc.request_schema) {
- const doc = this.docs.get(rpc)
- if (doc === undefined) {
- return
- }
- reqBody = doc.getValue()
- }
-
- const payload = encodeBase64(reqBody)
- const authToken = this.state.authToken
- const endpoint = `${this.props.svc.name}.${rpc.name}`
- try {
- this.setState({loading: true})
- const resp = await this.props.conn.request("api-call", {
- appID: this.props.appID,
- endpoint,
- payload,
- authToken,
- }) as any
-
- let respBody = ""
- if (resp.body.length > 0) {
- respBody = decodeBase64(resp.body)
- }
- if (resp.status_code !== 200) {
- this.setState({response: undefined, respErr: `HTTP ${resp.status}: ${respBody}`})
- } else if (rpc.response_schema) {
- this.setState({response: respBody, respErr: undefined})
- } else {
- this.setState({response: "Request completed successfully.", respErr: undefined})
- }
- } catch(err) {
- this.setState({response: undefined, respErr: `Internal Error: ${err}`})
- } finally {
- this.setState({loading: false})
- }
- }
-
- copyCurl() {
- let reqBody = ""
- const rpc = this.props.rpc
- if (rpc.request_schema) {
- const doc = this.docs.get(rpc)
- if (doc === undefined) {
- return
- }
- reqBody = doc.getValue()
- // Convert to JSON and back, if possible, to simplify indentation
- try {
- reqBody = JSON.stringify(JSON.parse(reqBody), undefined, " ")
- } catch(err) { /* do nothing */ }
-
- reqBody = reqBody.replaceAll("'", "'\''")
- }
-
- let cmd = `curl http://localhost:${this.props.port ?? 4060}/${this.props.svc.name}.${this.props.rpc.name}`
- if (reqBody !== "") {
- cmd += ` -d '${reqBody}'`
- }
- copyToClipboard(cmd)
- }
-
- render() {
- const rpc = this.props.rpc
- return (
-
-
- Request
-
-
-
-
-
- This API takes no request data.
-
-
- {this.props.md.auth_handler &&
-
- this.setState({authToken})} />
-
- }
-
this.send()} copyCurl={() => this.copyCurl()} />
-
-
-
-
- Response {this.state.loading && icons.loading("ml-1 h-5 w-5", "#A081D9", "transparent", 4)}
-
- {this.state.response ? (
-
- {this.state.response}
-
- ) : this.state.respErr ? (
-
- {this.state.respErr}
-
- ) :
Make a request to see the response.
}
-
- )
- }
-}
-
-class JSONRenderer {
- buf: string[];
- level: number;
- md: APIMeta;
- seenDecls: Set;
-
- constructor(md: APIMeta) {
- this.buf = []
- this.level = 0
- this.md = md
- this.seenDecls = new Set()
- }
-
- render(d: Decl): string {
- this.writeType(d.type)
- return this.buf.join("")
- }
-
- private writeType(t: Type) {
- t.struct ? this.renderStruct(t.struct) :
- t.map ? this.renderMap(t.map) :
- t.list ? this.renderList(t.list) :
- t.builtin ? this.write(this.renderBuiltin(t.builtin)) :
- t.named ? this.renderNamed(t.named)
- : this.write("")
- }
-
- private renderNamed(t: NamedType) {
- if (this.seenDecls.has(t.id)) {
- this.write("null")
- return
- }
-
- // Add the decl to our map while recursing to avoid infinite recursion.
- this.seenDecls.add(t.id)
- const decl = this.md.decls[t.id]
- this.writeType(decl.type)
- this.seenDecls.delete(t.id)
- }
-
- private renderStruct(t: StructType) {
- this.writeln("{")
- this.level++
- for (let i = 0; i < t.fields.length; i++) {
- const f = t.fields[i]
- this.indent()
- this.write(`"${f.json_name !== "" ? f.json_name : f.name}": `)
- this.writeType(f.typ)
- if (i < (t.fields.length-1)) {
- this.write(",")
- }
- this.writeln()
- }
- this.level--
- this.indent()
- this.write("}")
- }
-
- private renderMap(t: MapType) {
- this.writeln("{")
- this.level++
- this.indent()
- this.writeType(t.key)
- this.write(": ")
- this.writeType(t.value)
- this.writeln()
- this.write("}")
- }
-
- private renderList(t: ListType) {
- this.write("[")
- this.writeType(t.elem)
- this.write("]")
- }
-
- private renderBuiltin(t: BuiltinType) {
- switch (t) {
- case BuiltinType.Any: return ""
- case BuiltinType.Bool: return "true"
- case BuiltinType.Int8: return "1"
- case BuiltinType.Int16: return "1"
- case BuiltinType.Int32: return "1"
- case BuiltinType.Int64: return "1"
- case BuiltinType.Uint8: return "1"
- case BuiltinType.Uint16: return "1"
- case BuiltinType.Uint32: return "1"
- case BuiltinType.Uint64: return "1"
- case BuiltinType.Float32: return "2.3"
- case BuiltinType.Float64: return "fl2.3"
- case BuiltinType.String: return "\"some string\""
- case BuiltinType.Bytes: return "\"base64-encoded-bytes\""
- case BuiltinType.Time: return "\"2009-11-10T23:00:00Z\""
- case BuiltinType.UUID: return "\"7d42f515-3517-4e76-be13-30880443546f\""
- case BuiltinType.JSON: return "{\"some json data\": true}"
- default: return ""
- }
- }
-
- private indent() {
- this.write(" ".repeat(this.level*4))
- }
-
- private write(...strs: string[]) {
- for (const s of strs) {
- this.buf.push(s)
- }
- }
-
- private writeln(...strs: string[]) {
- this.write(...strs)
- this.write("\n")
- }
-}
-
-const APICallButton: FC<{send: () => void; copyCurl: () => void;}> = (props) => {
- return (
-
- props.send()}>
- Call API
-
-
-
- {({ open }) => (
- <>
-
- Open options
- {icons.chevronDown("h-5 w-5")}
-
-
-
-
-
-
- {({ active }) => (
- props.copyCurl()}
- >
- Copy as curl
-
- )}
-
-
-
-
- >
- )}
-
-
-
- )
-}
diff --git a/cli/daemon/dash/dashapp/src/components/api/SchemaView.tsx b/cli/daemon/dash/dashapp/src/components/api/SchemaView.tsx
deleted file mode 100644
index 7cf619b6dd..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/SchemaView.tsx
+++ /dev/null
@@ -1,403 +0,0 @@
-import { Type, StructType, MapType, ListType, BuiltinType, Decl, NamedType, Field } from "./schema";
-import React from "react";
-import { APIMeta } from "./api";
-
-export type Dialect = "go" | "typescript" | "json" | "table";
-
-interface Props {
- meta: APIMeta;
- decl: Decl;
- dialect: Dialect;
-}
-
-export default class extends React.Component {
- render() {
- const d = dialects[this.props.dialect](this.props.meta)
- return d.render(this.props.decl)
- }
-}
-
-abstract class DialectIface {
- meta: APIMeta;
- constructor(meta: APIMeta) {
- this.meta = meta
- }
-
- abstract render(d: Decl): JSX.Element;
-}
-
-class GoDialect extends DialectIface {
- seenDecls: Set;
- constructor(meta: APIMeta) {
- super(meta)
- this.seenDecls = new Set()
- }
-
- render(d: Decl) {
- this.seenDecls.add(d.id)
- const res = <>type {d.name} {this.renderType(d.type, 0)}>
- this.seenDecls.delete(d.id)
- return res
- }
-
- renderType(t: Type, level: number) {
- return {(
- t.struct ? this.renderStruct(t.struct, level) :
- t.map ? this.renderMap(t.map, level) :
- t.list ? this.renderList(t.list, level) :
- t.builtin ? this.renderBuiltin(t.builtin, level) :
- t.named ? this.renderNamed(t.named, level)
- : ""
- )}
- }
-
- renderNamed(t: NamedType, level: number) {
- const decl = this.meta.decls[t.id]
- if (this.seenDecls.has(t.id)) {
- return <>{`*${decl.loc.pkg_name}.${decl.name}`}>
- }
-
- // Mark this decl as seen for the duration of this call
- // to avoid infinite recursion.
- this.seenDecls.add(t.id)
- const res = this.renderType(decl.type, level)
- this.seenDecls.delete(t.id)
- return res
- }
-
- renderStruct(t: StructType, level: number) {
- return <>
- {"struct {"}
-
- {t.fields.map(f =>
-
- {f.name} {this.renderType(f.typ, level+1)}
- {this.renderTag(f)}
-
- )}
-
- {"}"}
- >
- }
-
- renderMap(t: MapType, level: number) {
- return <>
- {"map["}
- {this.renderType(t.key, level)}
- {"]"}
- {this.renderType(t.value, level)}
- >
- }
-
- renderList(t: ListType, level: number) {
- return <>
- {"[]"}
- {this.renderType(t.elem, level)}
- >
- }
-
- renderBuiltin(t: BuiltinType, level: number) {
- switch (t) {
- case BuiltinType.Any: return "interface{}"
- case BuiltinType.Bool: return "bool"
- case BuiltinType.Int8: return "int8"
- case BuiltinType.Int16: return "int16"
- case BuiltinType.Int32: return "int32"
- case BuiltinType.Int64: return "int64"
- case BuiltinType.Uint8: return "uint8"
- case BuiltinType.Uint16: return "uint16"
- case BuiltinType.Uint32: return "uint32"
- case BuiltinType.Uint64: return "uint64"
- case BuiltinType.Float32: return "float32"
- case BuiltinType.Float64: return "float64"
- case BuiltinType.String: return "string"
- case BuiltinType.Bytes: return "[]byte"
- case BuiltinType.Time: return "time.Time"
- case BuiltinType.UUID: return "uuid.UUID"
- case BuiltinType.USER_ID: return "auth.UID"
- case BuiltinType.JSON: return "json.RawMessage"
- default: return "unknown"
- }
- }
-
- renderTag(f: Field): string | null {
- let parts = []
- if (f.optional) {
- parts.push(`encore:"optional"`)
- }
- if (f.json_name !== "") {
- parts.push(`json:"${f.json_name}"`)
- }
- if (parts.length === 0) {
- return null
- }
- return " `" + parts.join(" ") + "`"
- }
-}
-
-class TypescriptDialect extends DialectIface {
- seenDecls: Set;
- constructor(meta: APIMeta) {
- super(meta)
- this.seenDecls = new Set()
- }
-
- render(d: Decl) {
- return this.renderType(d.type, 0)
- }
-
- renderType(t: Type, level: number) {
- return {(
- t.struct ? this.renderStruct(t.struct, level) :
- t.map ? this.renderMap(t.map, level) :
- t.list ? this.renderList(t.list, level) :
- t.builtin ? this.renderBuiltin(t.builtin, level) :
- t.named ? this.renderNamed(t.named, level)
- : ""
- )}
- }
-
- renderNamed(t: NamedType, level: number) {
- if (this.seenDecls.has(t.id)) {
- return <>null>
- }
- const decl = this.meta.decls[t.id]
-
- // Mark this decl as seen for the duration of this call
- // to avoid infinite recursion.
- this.seenDecls.add(t.id)
- const res = this.renderType(decl.type, level)
- this.seenDecls.delete(t.id)
- return res
- }
-
- renderStruct(t: StructType, level: number) {
- return <>
- {"{"}
-
- {t.fields.map(f =>
-
{f.json_name !== "" ? f.json_name : f.name}: {this.renderType(f.typ, level+1)};
- )}
-
- {"}"}
- >
- }
-
- renderMap(t: MapType, level: number) {
- return <>
- {"{ [key: "}
- {this.renderType(t.key, level)}
- {"]: "}
- {this.renderType(t.value, level)}
- {"}"}
- >
- }
-
- renderList(t: ListType, level: number) {
- return <>
- {this.renderType(t.elem, level)}
- {"[]"}
- >
- }
-
- renderBuiltin(t: BuiltinType, level: number) {
- switch (t) {
- case BuiltinType.Any: return "any"
- case BuiltinType.Bool: return "boolean"
- case BuiltinType.Int8: return "int8"
- case BuiltinType.Int16: return "int16"
- case BuiltinType.Int32: return "int32"
- case BuiltinType.Int64: return "int64"
- case BuiltinType.Uint8: return "uint8"
- case BuiltinType.Uint16: return "uint16"
- case BuiltinType.Uint32: return "uint32"
- case BuiltinType.Uint64: return "uint64"
- case BuiltinType.Float32: return "float32"
- case BuiltinType.Float64: return "float64"
- case BuiltinType.String: return "string"
- case BuiltinType.Bytes: return "[]byte"
- case BuiltinType.Time: return "Time"
- case BuiltinType.UUID: return "UUID"
- case BuiltinType.JSON: return "any"
- case BuiltinType.USER_ID: return "UserID"
- default: return "unknown"
- }
- }
-}
-
-class JSONDialect extends DialectIface {
- seenDecls: Set;
- constructor(meta: APIMeta) {
- super(meta)
- this.seenDecls = new Set()
- }
-
- render(d: Decl) {
- return this.renderType(d.type, 0)
- }
-
- renderType(t: Type, level: number) {
- return {(
- t.struct ? this.renderStruct(t.struct, level) :
- t.map ? this.renderMap(t.map, level) :
- t.list ? this.renderList(t.list, level) :
- t.builtin ? this.renderBuiltin(t.builtin, level) :
- t.named ? this.renderNamed(t.named, level)
- : ""
- )}
- }
-
- renderNamed(t: NamedType, level: number) {
- if (this.seenDecls.has(t.id)) {
- return <>null>
- }
- const decl = this.meta.decls[t.id]
-
- // Mark this decl as seen for the duration of this call
- // to avoid infinite recursion.
- this.seenDecls.add(t.id)
- const res = this.renderType(decl.type, level)
- this.seenDecls.delete(t.id)
- return res
- }
-
- renderStruct(t: StructType, level: number) {
- return <>
- {"{"}
-
- {t.fields.map((f, i) =>
-
- "{f.json_name !== "" ? f.json_name : f.name}": {this.renderType(f.typ, level+1)}
- {
- /* Render trailing comma if it's not the last key */
- (i < (t.fields.length-1)) ? "," : ""
- }
-
- )}
-
- {"}"}
- >
- }
-
- renderMap(t: MapType, level: number) {
- return <>
- {"{"}
- {this.renderType(t.key, level)}
- {": "}
- {this.renderType(t.value, level)}
- {"}"}
- >
- }
-
- renderList(t: ListType, level: number) {
- return <>
- {"["}
- {this.renderType(t.elem, level)}
- {"]"}
- >
- }
-
- renderBuiltin(t: BuiltinType, level: number) {
- switch (t) {
- case BuiltinType.Any: return ""
- case BuiltinType.Bool: return "true"
- case BuiltinType.Int8: return "1"
- case BuiltinType.Int16: return "1"
- case BuiltinType.Int32: return "1"
- case BuiltinType.Int64: return "1"
- case BuiltinType.Uint8: return "1"
- case BuiltinType.Uint16: return "1"
- case BuiltinType.Uint32: return "1"
- case BuiltinType.Uint64: return "1"
- case BuiltinType.Float32: return "2.3"
- case BuiltinType.Float64: return "fl2.3"
- case BuiltinType.String: return "\"some-string\""
- case BuiltinType.Bytes: return "\"base64-encoded-bytes\""
- case BuiltinType.Time: return "\"2009-11-10T23:00:00Z\""
- case BuiltinType.UUID: return "\"7d42f515-3517-4e76-be13-30880443546f\""
- case BuiltinType.JSON: return "{\"some-json-data\": true}"
- case BuiltinType.USER_ID: return "\"some-user-id\""
- default: return ""
- }
- }
-}
-
-class TableDialect extends DialectIface {
- render(d: Decl) {
- const st = d.type.struct
- if (!st) {
- throw new Error("TableDialect can only render named structs")
- }
- return this.renderStruct(st, 0)
- }
-
- renderStruct(t: StructType, level: number): JSX.Element {
- return (
-
- {t.fields.map((f, i) =>
-
0 ? "border-t border-gray-200" : ""}>
-
-
- {f.name}
-
-
{this.describeType(f.typ)}
-
- {f.doc !== "" ? (
-
{f.doc}
- ) : (
-
No description.
- )}
-
- )}
-
- )
- }
-
- describeType(t: Type): string {
- return (
- t.struct ? "struct" :
- t.map ? "map" :
- t.list ? "list of " + this.describeType(t.list.elem) :
- t.builtin ? this.describeBuiltin(t.builtin) :
- t.named ? this.describeNamed(t.named)
- : ""
- )
- }
-
-
- describeBuiltin(t: BuiltinType): string {
- switch (t) {
- case BuiltinType.Any: return ""
- case BuiltinType.Bool: return "boolean"
- case BuiltinType.Int8: return "int"
- case BuiltinType.Int16: return "int"
- case BuiltinType.Int32: return "int"
- case BuiltinType.Int64: return "int"
- case BuiltinType.Uint8: return "uint"
- case BuiltinType.Uint16: return "uint"
- case BuiltinType.Uint32: return "uint"
- case BuiltinType.Uint64: return "uint"
- case BuiltinType.Float32: return "float"
- case BuiltinType.Float64: return "float"
- case BuiltinType.String: return "string"
- case BuiltinType.Bytes: return "bytes"
- case BuiltinType.Time: return "RFC 3339-formatted timestamp"
- case BuiltinType.UUID: return "UUID"
- case BuiltinType.JSON: return "unspecified JSON"
- case BuiltinType.USER_ID: return "User ID"
- default: return ""
- }
- }
-
- describeNamed(named: NamedType): string {
- const decl = this.meta.decls[named.id]
- return decl.loc.pkg_name + "." + decl.name
- }
-}
-
-const dialects: { [key in Dialect]: (meta: APIMeta) => DialectIface} = {
- "go": (meta) => new GoDialect(meta),
- "typescript": (meta) => new TypescriptDialect(meta),
- "json": (meta) => new JSONDialect(meta),
- "table": (meta) => new TableDialect(meta),
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/api.ts b/cli/daemon/dash/dashapp/src/components/api/api.ts
deleted file mode 100644
index 12767ffddd..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/api.ts
+++ /dev/null
@@ -1,56 +0,0 @@
-import { Decl, Loc } from "./schema";
-
-export interface APIMeta {
- module_path: string;
- pkgs: Package[];
- svcs: Service[];
- version: string;
- decls: Decl[];
- auth_handler: AuthHandler;
-}
-
-export interface Service {
- name: string;
- rel_path: string;
- rpcs: RPC[];
- migrations: DBMigration[];
-}
-
-export interface Package {
- rel_path: string;
- name: string;
- doc: string;
- svc: string; // can be empty
- secrets: string[];
- rpc_calls: QualifiedName[];
-}
-
-export interface RPC {
- name: string;
- doc: string;
- access_type: "PRIVATE" | "PUBLIC" | "AUTH";
- rpc_calls: QualifiedName[];
- request_schema?: Decl;
- response_schema?: Decl;
- proto: "REGULAR" | "RAW";
- loc: Loc;
-}
-
-export interface QualifiedName {
- pkg: string;
- name: string;
-}
-
-export interface DBMigration {
- filename: string;
- number: number;
- description: string;
- up: boolean;
-}
-
-export interface AuthHandler {
- name: string;
- doc: string;
- user_data?: Decl;
- loc: Loc;
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/CM.tsx b/cli/daemon/dash/dashapp/src/components/api/cm/CM.tsx
deleted file mode 100644
index a478a910e4..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/CM.tsx
+++ /dev/null
@@ -1,84 +0,0 @@
-import React from 'react'
-
-import CodeMirror, { EditorConfiguration } from 'codemirror';
-import './codemirror.css';
-import './codemirror-show-hint.css';
-import './codemirror-encore.css';
-import './codemirror-idea.css';
-import 'codemirror/mode/go/go.js';
-import 'codemirror/mode/sql/sql.js';
-import 'codemirror/mode/javascript/javascript.js';
-import 'codemirror/addon/edit/closebrackets.js';
-import 'codemirror/addon/edit/matchbrackets.js';
-import 'codemirror/addon/selection/active-line.js';
-
-import 'codemirror/addon/hint/show-hint.js';
-
-export interface TextEdit {
- newText: string;
- range: {
- start: {line: number, character: number};
- end: {line: number, character: number};
- };
-}
-
-export const DefaultCfg: EditorConfiguration = {
- theme: "encore",
- mode: "go",
- lineNumbers: true,
- lineWrapping: false,
- indentWithTabs: true,
- indentUnit: 4,
- tabSize: 4,
- autoCloseBrackets: true,
- matchBrackets: true,
- styleActiveLine: false,
- gutters: ['CodeMirror-linenumbers'],
-}
-
-interface Props {
- cfg?: EditorConfiguration
- className?: string;
- onFocus?: () => void;
-}
-
-export default class CM extends React.Component {
- container: React.RefObject
- target: React.RefObject
- cm?: CodeMirror.Editor;
-
- constructor(props: Props) {
- super(props)
- this.container = React.createRef()
- this.target = React.createRef()
- }
-
- componentDidMount() {
- this.cm = CodeMirror(this.target.current!, this.props.cfg ?? DefaultCfg)
- this.cm.on("focus", () => this.props.onFocus?.())
- }
-
- shouldComponentUpdate(): boolean {
- return false
- }
-
- open(doc: CodeMirror.Doc) {
- this.cm!.swapDoc(doc)
- }
-
- render() {
- return (
-
- )
- }
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-encore.css b/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-encore.css
deleted file mode 100644
index 8fe54da470..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-encore.css
+++ /dev/null
@@ -1,150 +0,0 @@
-.cm-s-encore {
- --color-bg: #2d3748;
- --color-green: #b5f4a5;
- --color-yellow: #ffe484;
- --color-purple: #d9a9ff;
- --color-red: #ff8383;
- --color-blue: #93ddfd;
- --color-white: #fff;
- --color-selection: #5F87B5;
-
- color: var(--color-white);
- border-radius: 0.25rem;
-}
-
-.cm-s-encore.CodeMirror {
- background-color: var(--color-bg);
-}
-
-.cm-s-encore .CodeMirror-gutters {
- background-color: var(--color-bg);
- border-right: 0;
-}
-
-.cm-s-encore .CodeMirror-linenumber {
- color: #718096;
- padding: 0 5px;
-}
-.cm-s-encore .CodeMirror-guttermarker-subtle { color: #586e75; }
-.cm-s-encore .CodeMirror-guttermarker { color: #ddd; }
-
-.cm-s-encore .CodeMirror-activeline-background {
- background: rgba(255, 255, 255, 0.06);
-}
-
-.cm-s-encore .CodeMirror-cursor {
- border-left: 1px solid #e2e8f0;
- color: #718096;
-}
-
-.cm-s-encore div.CodeMirror-selected { background: var(--color-selection); }
-.cm-s-encore .CodeMirror-line::selection, .cm-s-encore .CodeMirror-line > span::selection, .cm-s-encore .CodeMirror-line > span > span::selection { background: var(--color-selection); }
-.cm-s-encore .CodeMirror-line::-moz-selection, .cm-s-encore .CodeMirror-line > span::-moz-selection, .cm-s-encore .CodeMirror-line > span > span::-moz-selection { background: var(--color-selection); }
-
-.cm-s-encore .cm-header { color: #586e75; }
-.cm-s-encore .cm-quote { color: #93a1a1; }
-.cm-s-encore .cm-keyword { color: var(--color-blue); }
-.cm-s-encore .cm-atom { color: var(--color-purple); }
-.cm-s-encore .cm-number { color: var(--color-yellow); }
-.cm-s-encore .cm-def { color: var(--color-white); }
-.cm-s-encore .cm-variable { color: var(--color-white); }
-.cm-s-encore .cm-variable-2 { color: #b58900; }
-.cm-s-encore .cm-variable-3, .cm-s-solarized .cm-type { color: #6c71c4; }
-.cm-s-encore .cm-property { color: var(--color-purple); }
-.cm-s-encore .cm-operator { color: var(--color-blue); }
-.cm-s-encore .cm-comment { color: var(--color-green); }
-.cm-s-encore .cm-string { color: var(--color-green); }
-.cm-s-encore .cm-string-2 { color: #b58900; }
-.cm-s-encore .cm-meta { color: #859900; }
-.cm-s-encore .cm-qualifier { color: #b58900; }
-.cm-s-encore .cm-builtin { color: #d33682; }
-.cm-s-encore .cm-bracket { color: #cb4b16; }
-.cm-s-encore .CodeMirror-matchingbracket { background: rgba(255, 255, 255, 0.1); color: var(--color-red); }
-.cm-s-encore .CodeMirror-nonmatchingbracket { }
-.cm-s-encore .cm-tag { color: #93a1a1; }
-.cm-s-encore .cm-attribute { color: #2aa198; }
-.cm-s-encore .cm-hr {
- color: transparent;
- border-top: 1px solid #586e75;
- display: block;
-}
-.cm-s-encore .cm-link { color: #93a1a1; cursor: pointer; }
-.cm-s-encore .cm-special { color: #6c71c4; }
-.cm-s-encore .cm-em {
- color: #999;
- text-decoration: underline;
- text-decoration-style: dotted;
-}
-.cm-s-encore .cm-error,
-.cm-s-encore .cm-invalidchar {
- background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAACCAYAAAB/qH1jAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3QUXCToH00Y1UgAAACFJREFUCNdjPMDBUc/AwNDAAAFMTAwMDA0OP34wQgX/AQBYgwYEx4f9lQAAAABJRU5ErkJggg==);
- background-position: bottom left;
- background-repeat: repeat-x;
-}
-
-.cm-s-encore .CodeMirror-gutter.webedit {
- width: 10px;
- height: 24px;
-}
-
-.cm-s-encore .webedit-guttermarker {
- width: 10px;
- height: 10px;
- margin-top: 7px;
- border-radius: 50%;
- background-color: #900;
-}
-
-
-/* TODO Customize */
-.cm-s-solarized .cm-header { color: #586e75; }
-.cm-s-solarized .cm-quote { color: #93a1a1; }
-.cm-s-solarized .cm-keyword { color: #cb4b16; }
-.cm-s-solarized .cm-atom { color: #d33682; }
-.cm-s-solarized .cm-number { color: #d33682; }
-.cm-s-solarized .cm-def { color: #2aa198; }
-.cm-s-solarized .cm-variable { color: #839496; }
-.cm-s-solarized .cm-variable-2 { color: #b58900; }
-.cm-s-solarized .cm-variable-3, .cm-s-solarized .cm-type { color: #6c71c4; }
-.cm-s-solarized .cm-property { color: #2aa198; }
-.cm-s-solarized .cm-operator { color: #6c71c4; }
-.cm-s-solarized .cm-comment { color: #586e75; font-style:italic; }
-.cm-s-solarized .cm-string { color: #859900; }
-.cm-s-solarized .cm-string-2 { color: #b58900; }
-.cm-s-solarized .cm-meta { color: #859900; }
-.cm-s-solarized .cm-qualifier { color: #b58900; }
-.cm-s-solarized .cm-builtin { color: #d33682; }
-.cm-s-solarized .cm-bracket { color: #cb4b16; }
-.cm-s-solarized .CodeMirror-matchingbracket { color: #859900; }
-.cm-s-solarized .CodeMirror-nonmatchingbracket { color: #dc322f; }
-.cm-s-solarized .cm-tag { color: #93a1a1; }
-.cm-s-solarized .cm-attribute { color: #2aa198; }
-.cm-s-solarized .cm-hr {
- color: transparent;
- border-top: 1px solid #586e75;
- display: block;
-}
-.cm-s-solarized .cm-link { color: #93a1a1; cursor: pointer; }
-.cm-s-solarized .cm-special { color: #6c71c4; }
-.cm-s-solarized .cm-em {
- color: #999;
- text-decoration: underline;
- text-decoration-style: dotted;
-}
-.cm-s-solarized .cm-error,
-.cm-s-solarized .cm-invalidchar {
- color: #586e75;
- border-bottom: 1px dotted #dc322f;
-}
-
-/* Gutter colors and line number styling based of color scheme (dark / light) */
-
-/* Dark */
-.cm-s-solarized.cm-s-dark .CodeMirror-gutters {
- background-color: #073642;
-}
-
-.cm-s-solarized.cm-s-dark .CodeMirror-linenumber {
- color: #586e75;
- text-shadow: #021014 0 -1px;
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-idea.css b/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-idea.css
deleted file mode 100644
index e0a6cef83a..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-idea.css
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- Name: IDEA default theme
- From IntelliJ IDEA by JetBrains
- */
-
-.cm-s-idea span.cm-meta { color: #808000; }
-.cm-s-idea span.cm-number { color: #0000FF; }
-.cm-s-idea span.cm-keyword { line-height: 1em; font-weight: bold; color: #000080; }
-.cm-s-idea span.cm-atom { font-weight: bold; color: #000080; }
-.cm-s-idea span.cm-def { color: #000000; }
-.cm-s-idea span.cm-variable { color: black; }
-.cm-s-idea span.cm-variable-2 { color: black; }
-.cm-s-idea span.cm-variable-3, .cm-s-idea span.cm-type { color: black; }
-.cm-s-idea span.cm-property { color: black; }
-.cm-s-idea span.cm-operator { color: black; }
-.cm-s-idea span.cm-comment { color: #808080; }
-.cm-s-idea span.cm-string { color: #008000; }
-.cm-s-idea span.cm-string-2 { color: #008000; }
-.cm-s-idea span.cm-qualifier { color: #555; }
-.cm-s-idea span.cm-error { color: #FF0000; }
-.cm-s-idea span.cm-attribute { color: #0000FF; }
-.cm-s-idea span.cm-tag { color: #000080; }
-.cm-s-idea span.cm-link { color: #0000FF; }
-.cm-s-idea .CodeMirror-activeline-background { background: #FFFAE3; }
-
-.cm-s-idea span.cm-builtin { color: #30a; }
-.cm-s-idea span.cm-bracket { color: #cc7; }
-.cm-s-idea {
- border-radius: 0.25rem;
-}
-
-
-.cm-s-idea .CodeMirror-matchingbracket { outline:1px solid grey; color:black !important; }
-
-.CodeMirror-hints.idea {
- font-family: Menlo, Monaco, Consolas, 'Courier New', monospace;
- color: #616569;
- background-color: #ebf3fd !important;
-}
-
-.CodeMirror-hints.idea .CodeMirror-hint-active {
- background-color: #a2b8c9 !important;
- color: #5c6065 !important;
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-show-hint.css b/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-show-hint.css
deleted file mode 100644
index 89e88aad9a..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-show-hint.css
+++ /dev/null
@@ -1,36 +0,0 @@
-.CodeMirror-hints {
- position: absolute;
- z-index: 10;
- overflow: hidden;
- list-style: none;
-
- margin: 0;
- padding: 2px;
-
- -webkit-box-shadow: 2px 3px 5px rgba(0,0,0,.2);
- -moz-box-shadow: 2px 3px 5px rgba(0,0,0,.2);
- box-shadow: 2px 3px 5px rgba(0,0,0,.2);
- border-radius: 3px;
- border: 1px solid silver;
-
- background: white;
- font-size: 90%;
- font-family: monospace;
-
- max-height: 20em;
- overflow-y: auto;
-}
-
-.CodeMirror-hint {
- margin: 0;
- padding: 0 4px;
- border-radius: 2px;
- white-space: pre;
- color: black;
- cursor: pointer;
-}
-
-li.CodeMirror-hint-active {
- background: #08f;
- color: white;
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-solarized.css b/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-solarized.css
deleted file mode 100644
index 902374a416..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror-solarized.css
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
-Solarized theme for code-mirror
-http://ethanschoonover.com/solarized
-*/
-
-/*
-Solarized color palette
-http://ethanschoonover.com/solarized/img/solarized-palette.png
-*/
-
-.solarized.base03 { color: #002b36; }
-.solarized.base02 { color: #073642; }
-.solarized.base01 { color: #586e75; }
-.solarized.base00 { color: #657b83; }
-.solarized.base0 { color: #839496; }
-.solarized.base1 { color: #93a1a1; }
-.solarized.base2 { color: #eee8d5; }
-.solarized.base3 { color: #fdf6e3; }
-.solarized.solar-yellow { color: #b58900; }
-.solarized.solar-orange { color: #cb4b16; }
-.solarized.solar-red { color: #dc322f; }
-.solarized.solar-magenta { color: #d33682; }
-.solarized.solar-violet { color: #6c71c4; }
-.solarized.solar-blue { color: #268bd2; }
-.solarized.solar-cyan { color: #2aa198; }
-.solarized.solar-green { color: #859900; }
-
-/* Color scheme for code-mirror */
-
-.cm-s-solarized {
- line-height: 1.45em;
- color-profile: sRGB;
- rendering-intent: auto;
-}
-.cm-s-solarized.cm-s-dark {
- color: #839496;
- background-color: #002b36;
- text-shadow: #002b36 0 1px;
-}
-.cm-s-solarized.cm-s-light {
- background-color: #fdf6e3;
- color: #657b83;
- text-shadow: #eee8d5 0 1px;
-}
-
-.cm-s-solarized .CodeMirror-widget {
- text-shadow: none;
-}
-
-.cm-s-solarized .cm-header { color: #586e75; }
-.cm-s-solarized .cm-quote { color: #93a1a1; }
-
-.cm-s-solarized .cm-keyword { color: #cb4b16; }
-.cm-s-solarized .cm-atom { color: #d33682; }
-.cm-s-solarized .cm-number { color: #d33682; }
-.cm-s-solarized .cm-def { color: #2aa198; }
-
-.cm-s-solarized .cm-variable { color: #839496; }
-.cm-s-solarized .cm-variable-2 { color: #b58900; }
-.cm-s-solarized .cm-variable-3, .cm-s-solarized .cm-type { color: #6c71c4; }
-
-.cm-s-solarized .cm-property { color: #2aa198; }
-.cm-s-solarized .cm-operator { color: #6c71c4; }
-
-.cm-s-solarized .cm-comment { color: #586e75; font-style:italic; }
-
-.cm-s-solarized .cm-string { color: #859900; }
-.cm-s-solarized .cm-string-2 { color: #b58900; }
-
-.cm-s-solarized .cm-meta { color: #859900; }
-.cm-s-solarized .cm-qualifier { color: #b58900; }
-.cm-s-solarized .cm-builtin { color: #d33682; }
-.cm-s-solarized .cm-bracket { color: #cb4b16; }
-.cm-s-solarized .CodeMirror-matchingbracket { color: #859900; }
-.cm-s-solarized .CodeMirror-nonmatchingbracket { color: #dc322f; }
-.cm-s-solarized .cm-tag { color: #93a1a1; }
-.cm-s-solarized .cm-attribute { color: #2aa198; }
-.cm-s-solarized .cm-hr {
- color: transparent;
- border-top: 1px solid #586e75;
- display: block;
-}
-.cm-s-solarized .cm-link { color: #93a1a1; cursor: pointer; }
-.cm-s-solarized .cm-special { color: #6c71c4; }
-.cm-s-solarized .cm-em {
- color: #999;
- text-decoration: underline;
- text-decoration-style: dotted;
-}
-.cm-s-solarized .cm-error,
-.cm-s-solarized .cm-invalidchar {
- color: #586e75;
- border-bottom: 1px dotted #dc322f;
-}
-
-.cm-s-solarized.cm-s-dark div.CodeMirror-selected { background: #073642; }
-.cm-s-solarized.cm-s-dark.CodeMirror ::selection { background: rgba(7, 54, 66, 0.99); }
-.cm-s-solarized.cm-s-dark .CodeMirror-line::-moz-selection, .cm-s-dark .CodeMirror-line > span::-moz-selection, .cm-s-dark .CodeMirror-line > span > span::-moz-selection { background: rgba(7, 54, 66, 0.99); }
-
-.cm-s-solarized.cm-s-light div.CodeMirror-selected { background: #eee8d5; }
-.cm-s-solarized.cm-s-light .CodeMirror-line::selection, .cm-s-light .CodeMirror-line > span::selection, .cm-s-light .CodeMirror-line > span > span::selection { background: #eee8d5; }
-.cm-s-solarized.cm-s-light .CodeMirror-line::-moz-selection, .cm-s-ligh .CodeMirror-line > span::-moz-selection, .cm-s-ligh .CodeMirror-line > span > span::-moz-selection { background: #eee8d5; }
-
-/* Editor styling */
-
-
-
-/* Little shadow on the view-port of the buffer view */
-.cm-s-solarized.CodeMirror {
- -moz-box-shadow: inset 7px 0 12px -6px #000;
- -webkit-box-shadow: inset 7px 0 12px -6px #000;
- box-shadow: inset 7px 0 12px -6px #000;
-}
-
-/* Remove gutter border */
-.cm-s-solarized .CodeMirror-gutters {
- border-right: 0;
-}
-
-/* Gutter colors and line number styling based of color scheme (dark / light) */
-
-/* Dark */
-.cm-s-solarized.cm-s-dark .CodeMirror-gutters {
- background-color: #073642;
-}
-
-.cm-s-solarized.cm-s-dark .CodeMirror-linenumber {
- color: #586e75;
- text-shadow: #021014 0 -1px;
-}
-
-/* Light */
-.cm-s-solarized.cm-s-light .CodeMirror-gutters {
- background-color: #eee8d5;
-}
-
-.cm-s-solarized.cm-s-light .CodeMirror-linenumber {
- color: #839496;
-}
-
-/* Common */
-.cm-s-solarized .CodeMirror-linenumber {
- padding: 0 5px;
-}
-.cm-s-solarized .CodeMirror-guttermarker-subtle { color: #586e75; }
-.cm-s-solarized.cm-s-dark .CodeMirror-guttermarker { color: #ddd; }
-.cm-s-solarized.cm-s-light .CodeMirror-guttermarker { color: #cb4b16; }
-
-.cm-s-solarized .CodeMirror-gutter .CodeMirror-gutter-text {
- color: #586e75;
-}
-
-/* Cursor */
-.cm-s-solarized .CodeMirror-cursor { border-left: 1px solid #819090; }
-
-/* Fat cursor */
-.cm-s-solarized.cm-s-light.cm-fat-cursor .CodeMirror-cursor { background: #77ee77; }
-.cm-s-solarized.cm-s-light .cm-animate-fat-cursor { background-color: #77ee77; }
-.cm-s-solarized.cm-s-dark.cm-fat-cursor .CodeMirror-cursor { background: #586e75; }
-.cm-s-solarized.cm-s-dark .cm-animate-fat-cursor { background-color: #586e75; }
-
-/* Active line */
-.cm-s-solarized.cm-s-dark .CodeMirror-activeline-background {
- background: rgba(255, 255, 255, 0.06);
-}
-.cm-s-solarized.cm-s-light .CodeMirror-activeline-background {
- background: rgba(0, 0, 0, 0.06);
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror.css b/cli/daemon/dash/dashapp/src/components/api/cm/codemirror.css
deleted file mode 100644
index 14ebd6cce2..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/cm/codemirror.css
+++ /dev/null
@@ -1,343 +0,0 @@
-/* BASICS */
-.CodeMirror {
- /* Set height, width, borders, and global font properties here */
- font-family: monospace;
- height: 300px;
- color: black;
- direction: ltr;
-}
-
-/* PADDING */
-
-.CodeMirror-lines {
- padding: 4px 0; /* Vertical padding around content */
-}
-.CodeMirror pre.CodeMirror-line,
-.CodeMirror pre.CodeMirror-line-like {
- padding: 0 4px; /* Horizontal padding of content */
-}
-
-.CodeMirror-scrollbar-filler, .CodeMirror-gutter-filler {
- background-color: white; /* The little square between H and V scrollbars */
-}
-
-/* GUTTER */
-
-.CodeMirror-gutters {
- border-right: 1px solid #ddd;
- background-color: #f7f7f7;
- white-space: nowrap;
-}
-.CodeMirror-linenumbers {}
-.CodeMirror-linenumber {
- padding: 0 3px 0 5px;
- min-width: 20px;
- text-align: right;
- color: #999;
- white-space: nowrap;
-}
-
-.CodeMirror-guttermarker { color: black; }
-.CodeMirror-guttermarker-subtle { color: #999; }
-
-/* CURSOR */
-
-.CodeMirror-cursor {
- border-left: 1px solid black;
- border-right: none;
- width: 0;
-}
-/* Shown when moving in bi-directional text */
-.CodeMirror div.CodeMirror-secondarycursor {
- border-left: 1px solid silver;
-}
-.cm-fat-cursor .CodeMirror-cursor {
- width: auto;
- border: 0 !important;
- background: #7e7;
-}
-.cm-fat-cursor div.CodeMirror-cursors {
- z-index: 1;
-}
-.cm-fat-cursor-mark {
- background-color: rgba(20, 255, 20, 0.5);
- -webkit-animation: blink 1.06s steps(1) infinite;
- -moz-animation: blink 1.06s steps(1) infinite;
- animation: blink 1.06s steps(1) infinite;
-}
-.cm-animate-fat-cursor {
- width: auto;
- border: 0;
- -webkit-animation: blink 1.06s steps(1) infinite;
- -moz-animation: blink 1.06s steps(1) infinite;
- animation: blink 1.06s steps(1) infinite;
- background-color: #7e7;
-}
-@-moz-keyframes blink {
- 0% {}
- 50% { background-color: transparent; }
- 100% {}
-}
-@-webkit-keyframes blink {
- 0% {}
- 50% { background-color: transparent; }
- 100% {}
-}
-@keyframes blink {
- 0% {}
- 50% { background-color: transparent; }
- 100% {}
-}
-
-/* Can style cursor different in overwrite (non-insert) mode */
-.CodeMirror-overwrite .CodeMirror-cursor {}
-
-.cm-tab { display: inline-block; text-decoration: inherit; }
-
-.CodeMirror-rulers {
- position: absolute;
- left: 0; right: 0; top: -50px; bottom: 0;
- overflow: hidden;
-}
-.CodeMirror-ruler {
- border-left: 1px solid #ccc;
- top: 0; bottom: 0;
- position: absolute;
-}
-
-/* DEFAULT THEME */
-
-.cm-s-default .cm-header {color: blue;}
-.cm-s-default .cm-quote {color: #090;}
-.cm-negative {color: #d44;}
-.cm-positive {color: #292;}
-.cm-header, .cm-strong {font-weight: bold;}
-.cm-em {font-style: italic;}
-.cm-link {text-decoration: underline;}
-.cm-strikethrough {text-decoration: line-through;}
-
-.cm-s-default .cm-keyword {color: #708;}
-.cm-s-default .cm-atom {color: #219;}
-.cm-s-default .cm-number {color: #164;}
-.cm-s-default .cm-def {color: #00f;}
-.cm-s-default .cm-variable,
-.cm-s-default .cm-punctuation,
-.cm-s-default .cm-property,
-.cm-s-default .cm-operator {}
-.cm-s-default .cm-variable-2 {color: #05a;}
-.cm-s-default .cm-variable-3, .cm-s-default .cm-type {color: #085;}
-.cm-s-default .cm-comment {color: #a50;}
-.cm-s-default .cm-string {color: #a11;}
-.cm-s-default .cm-string-2 {color: #f50;}
-.cm-s-default .cm-meta {color: #555;}
-.cm-s-default .cm-qualifier {color: #555;}
-.cm-s-default .cm-builtin {color: #30a;}
-.cm-s-default .cm-bracket {color: #997;}
-.cm-s-default .cm-tag {color: #170;}
-.cm-s-default .cm-attribute {color: #00c;}
-.cm-s-default .cm-hr {color: #999;}
-.cm-s-default .cm-link {color: #00c;}
-
-.cm-s-default .cm-error {color: #f00;}
-.cm-invalidchar {color: #f00;}
-
-.CodeMirror-composing { border-bottom: 2px solid; }
-
-/* Default styles for common addons */
-
-/* STOP */
-
-/* The rest of this file contains styles related to the mechanics of
- the editor. You probably shouldn't touch them. */
-
-.CodeMirror {
- position: relative;
- overflow: hidden;
- background: white;
-}
-
-.CodeMirror-scroll {
- overflow: scroll !important; /* Things will break if this is overridden */
- /* 50px is the magic margin used to hide the element's real scrollbars */
- /* See overflow: hidden in .CodeMirror */
- margin-bottom: -50px; margin-right: -50px;
- padding-bottom: 50px;
- height: 100%;
- outline: none; /* Prevent dragging from highlighting the element */
- position: relative;
-}
-.CodeMirror-sizer {
- position: relative;
- border-right: 50px solid transparent;
-}
-
-/* The fake, visible scrollbars. Used to force redraw during scrolling
- before actual scrolling happens, thus preventing shaking and
- flickering artifacts. */
-.CodeMirror-vscrollbar, .CodeMirror-hscrollbar, .CodeMirror-scrollbar-filler, .CodeMirror-gutter-filler {
- position: absolute;
- z-index: 6;
- display: none;
-}
-.CodeMirror-vscrollbar {
- right: 0; top: 0;
- overflow-x: hidden;
- overflow-y: scroll;
-}
-.CodeMirror-hscrollbar {
- bottom: 0; left: 0;
- overflow-y: hidden;
- overflow-x: scroll;
-}
-.CodeMirror-scrollbar-filler {
- right: 0; bottom: 0;
-}
-.CodeMirror-gutter-filler {
- left: 0; bottom: 0;
-}
-
-.CodeMirror-gutters {
- position: absolute; left: 0; top: 0;
- min-height: 100%;
- z-index: 3;
-}
-.CodeMirror-gutter {
- white-space: normal;
- height: 100%;
- display: inline-block;
- vertical-align: top;
- margin-bottom: -50px;
-}
-.CodeMirror-gutter-wrapper {
- position: absolute;
- z-index: 4;
- background: none !important;
- border: none !important;
-}
-.CodeMirror-gutter-background {
- position: absolute;
- top: 0; bottom: 0;
- z-index: 4;
-}
-.CodeMirror-gutter-elt {
- position: absolute;
- cursor: default;
- z-index: 4;
-}
-.CodeMirror-gutter-wrapper ::selection { background-color: transparent }
-.CodeMirror-gutter-wrapper ::-moz-selection { background-color: transparent }
-
-.CodeMirror-lines {
- cursor: text;
- min-height: 1px; /* prevents collapsing before first draw */
-}
-.CodeMirror pre.CodeMirror-line,
-.CodeMirror pre.CodeMirror-line-like {
- /* Reset some styles that the rest of the page might have set */
- -moz-border-radius: 0; -webkit-border-radius: 0; border-radius: 0;
- border-width: 0;
- background: transparent;
- font-family: inherit;
- font-size: inherit;
- margin: 0;
- white-space: pre;
- word-wrap: normal;
- line-height: inherit;
- color: inherit;
- z-index: 2;
- position: relative;
- overflow: visible;
- -webkit-tap-highlight-color: transparent;
- -webkit-font-variant-ligatures: contextual;
- font-variant-ligatures: contextual;
-}
-.CodeMirror-wrap pre.CodeMirror-line,
-.CodeMirror-wrap pre.CodeMirror-line-like {
- word-wrap: break-word;
- white-space: pre-wrap;
- word-break: normal;
-}
-
-.CodeMirror-linebackground {
- position: absolute;
- left: 0; right: 0; top: 0; bottom: 0;
- z-index: 0;
-}
-
-.CodeMirror-linewidget {
- position: relative;
- z-index: 2;
- padding: 0.1px; /* Force widget margins to stay inside of the container */
-}
-
-.CodeMirror-widget {}
-
-.CodeMirror-rtl pre { direction: rtl; }
-
-.CodeMirror-code {
- outline: none;
-}
-
-/* Force content-box sizing for the elements where we expect it */
-.CodeMirror-scroll,
-.CodeMirror-sizer,
-.CodeMirror-gutter,
-.CodeMirror-gutters,
-.CodeMirror-linenumber {
- -moz-box-sizing: content-box;
- box-sizing: content-box;
-}
-
-.CodeMirror-measure {
- position: absolute;
- width: 100%;
- height: 0;
- overflow: hidden;
- visibility: hidden;
-}
-
-.CodeMirror-cursor {
- position: absolute;
- pointer-events: none;
-}
-.CodeMirror-measure pre { position: static; }
-
-div.CodeMirror-cursors {
- visibility: hidden;
- position: relative;
- z-index: 3;
-}
-div.CodeMirror-dragcursors {
- visibility: visible;
-}
-
-.CodeMirror-focused div.CodeMirror-cursors {
- visibility: visible;
-}
-
-.CodeMirror-selected { background: #d9d9d9; }
-.CodeMirror-focused .CodeMirror-selected { background: #d7d4f0; }
-.CodeMirror-crosshair { cursor: crosshair; }
-.CodeMirror-line::selection, .CodeMirror-line > span::selection, .CodeMirror-line > span > span::selection { background: #d7d4f0; }
-.CodeMirror-line::-moz-selection, .CodeMirror-line > span::-moz-selection, .CodeMirror-line > span > span::-moz-selection { background: #d7d4f0; }
-
-.cm-searching {
- background-color: #ffa;
- background-color: rgba(255, 255, 0, .4);
-}
-
-/* Used to force a border model for a node */
-.cm-force-border { padding-right: .1px; }
-
-@media print {
- /* Hide the cursor when printing */
- .CodeMirror div.CodeMirror-cursors {
- visibility: hidden;
- }
-}
-
-/* See issue #2901 */
-.cm-tab-wrap-hack:after { content: ''; }
-
-/* Help users use markselection to safely style text background */
-span.CodeMirror-selectedtext { background: none; }
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/api/schema.ts b/cli/daemon/dash/dashapp/src/components/api/schema.ts
deleted file mode 100644
index 623408fc57..0000000000
--- a/cli/daemon/dash/dashapp/src/components/api/schema.ts
+++ /dev/null
@@ -1,84 +0,0 @@
-export type TypeName = "struct" | "map" | "list" | "builtin" | "named";
-
-export const typeName: (typ: Type) => TypeName = (typ: Type) => (
- typ.struct ? "struct" :
- typ.map ? "map" :
- typ.list ? "list" :
- typ.named ? "named" :
- "builtin"
-)
-
-export interface Type {
- // oneof these
- struct?: StructType;
- map?: MapType;
- list?: ListType;
- builtin?: BuiltinType;
- named?: NamedType;
-}
-
-export interface StructType {
- fields: Field[];
-}
-
-export interface Field {
- typ: Type;
- name: string;
- doc: string;
- json_name: string;
- optional: boolean;
-}
-
-export interface MapType {
- key: Type;
- value: Type;
-}
-
-export interface ListType {
- elem: Type;
-}
-
-export interface NamedType {
- id: number;
-}
-
-export interface Decl {
- id: number;
- name: string;
- type: Type;
- doc: string;
- loc: Loc;
-}
-
-export interface Loc {
- pkg_path: string;
- pkg_name: string;
- filename: string;
- start_pos: number;
- end_pos: number;
- src_line_start: number;
- src_line_end: number;
- src_col_start: number;
- src_col_end: number;
-}
-
-export enum BuiltinType {
- Any = "ANY",
- Bool = "BOOL",
- Int8 = "INT8",
- Int16 = "INT16",
- Int32 = "INT32",
- Int64 = "INT64",
- Uint8 = "UINT8",
- Uint16 = "UINT16",
- Uint32 = "UINT32",
- Uint64 = "UINT64",
- Float32 = "FLOAT32",
- Float64 = "FLOAT64",
- String = "STRING",
- Bytes = "BYTES",
- Time = "TIME",
- UUID = "UUID",
- JSON = "JSON",
- USER_ID = "USER_ID",
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/app/AppAPI.tsx b/cli/daemon/dash/dashapp/src/components/app/AppAPI.tsx
deleted file mode 100644
index 9e68a8fa23..0000000000
--- a/cli/daemon/dash/dashapp/src/components/app/AppAPI.tsx
+++ /dev/null
@@ -1,233 +0,0 @@
-import React, { FunctionComponent, useState } from 'react'
-import { APIMeta, RPC, Service } from '~c/api/api'
-import RPCCaller from "~c/api/RPCCaller"
-import SchemaView, { Dialect } from '~c/api/SchemaView'
-import { ProcessReload } from '~lib/client/client'
-import JSONRPCConn, { NotificationMsg } from '~lib/client/jsonrpc'
-
-interface Props {
- appID: string;
- conn: JSONRPCConn;
-}
-
-interface State {
- meta?: APIMeta;
-}
-
-export default class AppAPI extends React.Component {
- constructor(props: Props) {
- super(props)
- this.state = {}
- this.onNotification = this.onNotification.bind(this)
- }
-
- async componentDidMount() {
- this.props.conn.on("notification", this.onNotification)
- const status: any = await this.props.conn.request("status", {appID: this.props.appID})
- if (status.meta) {
- this.setState({meta: status.meta})
- }
- }
-
- componentWillUnmount() {
- this.props.conn.off("notification", this.onNotification)
- }
-
- onNotification(msg: NotificationMsg) {
- if (msg.method === "process/reload") {
- const data = msg.params as ProcessReload
- if (data.appID === this.props.appID) {
- this.setState({meta: data.meta})
- }
- }
- }
-
- render() {
- return (
-
- {this.state.meta ? this.renderAPI() : (
-
-
- No API schema available yet.
-
-
- )}
-
- )
- }
-
- renderAPI() {
- const meta = this.state.meta!
- const svcPkg = (svc: Service) => {
- return meta.pkgs.find(pkg => pkg.rel_path === svc.rel_path)!
- }
-
- return (
-
-
-
-
-
- {meta.svcs.map(svc => {
- const rootPkg = svcPkg(svc)
- return (
-
-
-
Service {svc.name}
- {rootPkg.doc &&
-
{rootPkg.doc}
- }
-
-
- {svc.rpcs.map(rpc =>
-
-
- func {rpc.name}
-
-
-
- {rpc.doc &&
-
{rpc.doc}
- }
-
- {rpc.proto === "RAW" ? (
-
-
- This API processes unstructured HTTP requests and therefore has no explicit schema.
-
-
- ) : (
- <>
-
-
Parameters
-
- {rpc.request_schema ?
:
-
No parameters.
- }
-
-
-
-
Response
-
- {rpc.response_schema ?
:
-
No response.
- }
-
- >
- )}
-
- {rpc.proto !== "RAW" &&
-
- }
-
-
- )}
-
- )
- })}
-
-
- )
- }
-}
-
-interface RPCDemoProps {
- conn: JSONRPCConn;
- appID: string;
- meta: APIMeta;
- svc: Service;
- rpc: RPC;
-}
-
-const RPCDemo: FunctionComponent = (props) => {
- const [respDialect, setRespDialect] = useState("json" as Dialect)
-
- type TabType = "schema" | "call"
- const [selectedTab, setSelectedTab] = useState("schema")
- const tabs: {title: string; type: TabType}[] = [
- {title: "Schema", type: "schema"},
- {title: "Call", type: "call"},
- ]
-
- return
-
- {tabs.map(t =>
- setSelectedTab(t.type)}>
- {t.title}
-
- )}
-
-
- {selectedTab === "schema" ? <>
- {props.rpc.request_schema &&
-
-
-
Request
-
- setRespDialect(e.target.value as Dialect)}
- className="form-select h-full py-0 border-transparent bg-transparent text-gray-300 text-xs leading-none">
- JSON
- Go
- TypeScript
-
-
-
-
-
-
-
- }
- {props.rpc.response_schema &&
-
-
-
Response
-
- setRespDialect(e.target.value as Dialect)}
- className="form-select h-full py-0 border-transparent bg-transparent text-gray-500 text-xs leading-none">
- JSON
- Go
- TypeScript
-
-
-
-
-
-
-
- }
- > : (
-
- )}
-
-}
-
-interface SvcMenuProps {
- svcs: Service[];
-}
-
-const SvcMenu: FunctionComponent = (props) => {
- return <>
- {props.svcs.map((svc, i) =>
- 0) ? "border-t border-gray-300" : ""}>
-
-
-
- )}
- >
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/app/AppCaller.tsx b/cli/daemon/dash/dashapp/src/components/app/AppCaller.tsx
deleted file mode 100644
index 2dbd2a81c1..0000000000
--- a/cli/daemon/dash/dashapp/src/components/app/AppCaller.tsx
+++ /dev/null
@@ -1,88 +0,0 @@
-import React, { FC, useEffect, useReducer, useState } from 'react'
-import { APIMeta, RPC, Service } from '~c/api/api'
-import RPCCaller from '~c/api/RPCCaller'
-import { ProcessReload, ProcessStart } from '~lib/client/client'
-import JSONRPCConn, { NotificationMsg } from '~lib/client/jsonrpc'
-
-interface API {
- svc: Service;
- rpc: RPC;
- name: string;
-}
-
-const AppCaller: FC<{appID: string; conn: JSONRPCConn}> = ({appID, conn}) => {
- const [port, setPort] = useState(4060)
- interface State {
- md?: APIMeta;
- list?: API[];
- selected?: API;
- }
- function reducer(state: State, action: {type: "meta" | "select"; meta?: APIMeta; name?: string}): State {
- switch (action.type) {
- case "meta":
- // Recompute our API list
- const list: API[] = []
- const md = action.meta!
- md.svcs.forEach(svc => {
- svc.rpcs.forEach(rpc => {
- list.push({svc, rpc, name: `${svc.name}.${rpc.name}`})
- })
- })
- list.sort((a, b) => a.name.localeCompare(b.name))
-
- // Does the selected API still exist?
- const exists = state.selected ? list.findIndex(a => a.name === state.selected!.name) >= 0 : false
- const newSel = exists ? state.selected : list.length > 0 ? list[0] : undefined
- return {md: md, list: list, selected: newSel}
- case "select":
- const sel = state.list?.find(a => a.name === action.name)
- return {...state, selected: sel ?? state.selected}
- }
- }
-
- const [state, dispatch] = useReducer(reducer, {})
- const onNotify = (msg: NotificationMsg) => {
- if (msg.method === "process/start") {
- const data = msg.params as ProcessStart
- if (data.appID === appID) {
- setPort(data.port)
- }
- } else if (msg.method === "process/reload") {
- const data = msg.params as ProcessReload
- if (data.appID === appID) {
- dispatch({type: "meta", meta: data.meta})
- }
- }
- }
-
- useEffect(() => {
- conn.request("status", {appID}).then((resp: any) => {
- if (resp.port) { setPort(resp.port) }
- if (resp.meta) { dispatch({type: "meta", meta: resp.meta}) }
- })
-
- conn.on("notification", onNotify)
- return () => { conn.off("notification", onNotify) }
- }, [])
-
- if (!state.md || !state.selected) { return null }
-
- return (
-
-
- API Endpoint
- dispatch({type: "select", name: e.target.value})}>
- {state.list!.map(a =>
- {a.name}
- )}
-
-
-
-
-
-
- )
-}
-
-export default AppCaller
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/app/AppLogs.tsx b/cli/daemon/dash/dashapp/src/components/app/AppLogs.tsx
deleted file mode 100644
index 42ec46fa4c..0000000000
--- a/cli/daemon/dash/dashapp/src/components/app/AppLogs.tsx
+++ /dev/null
@@ -1,141 +0,0 @@
-import React from 'react'
-import { decodeBase64 } from '~lib/base64'
-import { ProcessOutput, ProcessReload, ProcessStart, ProcessStop } from '~lib/client/client'
-import JSONRPCConn, { NotificationMsg } from '~lib/client/jsonrpc'
-import parseAnsi, { Chunk } from "~lib/parse-ansi"
-
-interface Props {
- appID: string;
- conn: JSONRPCConn;
-}
-
-interface State {
- lines: Chunk[][];
-}
-
-export default class AppLogs extends React.Component {
- constructor(props: Props) {
- super(props)
- this.state = {lines: [[]]}
- this.onNotification = this.onNotification.bind(this)
- }
-
- componentDidMount() {
- this.props.conn.on("notification", this.onNotification)
- }
-
- componentWillUnmount() {
- this.props.conn.off("notification", this.onNotification)
- }
-
- onNotification(msg: NotificationMsg) {
- if (msg.method === "process/start") {
- const data = msg.params as ProcessStart
- if (data.appID === this.props.appID) {
- this.setState(state => {
- return {
- lines: [...state.lines, [
- {type: "text", style: {}, value: "Running on "},
- {type: "text", style: {bold: true}, value: `http://localhost:${data.port}`},
- ], []]
- }
- })
- }
- } else if (msg.method === "process/reload") {
- const data = msg.params as ProcessReload
- if (data.appID === this.props.appID) {
- this.setState(state => {
- return {
- lines: [...state.lines, [
- {type: "text", style: {}, value: "App reloaded!"},
- ], []]
- }
- })
- }
- } else if (msg.method === "process/stop") {
- const data = msg.params as ProcessStop
- if (data.appID === this.props.appID) {
- this.setState(state => {
- return {
- lines: [...state.lines, [
- {type: "text", style: {foregroundColor: "red"}, value: "App stopped."},
- ], []]
- }
- })
- }
- } else if (msg.method === "process/output") {
- const data = msg.params as ProcessOutput
- if (data.appID === this.props.appID) {
- let chunks = parseAnsi(decodeBase64(data.output)).chunks as Chunk[]
- let newLines: Chunk[][] = [[]]
- for (const ch of chunks) {
- if (ch.type === "newline") {
- newLines.push([])
- } else if (ch.type === "text") {
- newLines[newLines.length - 1].push(ch)
- }
- }
- this.setState(state => {
- let prev = state.lines.slice(0, -1)
- let curr = state.lines[state.lines.length - 1]
- curr = curr.concat(newLines[0])
- return {
- lines: [...prev, curr, ...newLines.slice(1)],
- }
- })
- }
- }
- }
-
- render() {
- return (
-
-
-
- {this.state.lines.map((line, i) =>
-
- {line.map((ch, j) =>
- {ch.value}
- )}
- {line.length === 0 ? : null}
-
- )}
-
-
- )
- }
-}
-
-function chunkStyle(ch: Chunk): string {
- const cls = []
- if (ch.style.bold) { cls.push("font-bold") }
- if (ch.style.italic) { cls.push("italic") }
- if (ch.style.strikethrough) { cls.push("line-through") }
- if (ch.style.underline) { cls.push("underline") }
-
- const fc = ch.style.foregroundColor
- if (fc) {
- cls.push(
- fc === "gray" ? "text-gray-500" :
- fc === "green" ? "green" :
- fc === "red" ? "red" :
- fc === "blue" ? "blue" :
- fc === "cyan" ? "purple" :
- "text-white"
- )
- }
- return cls.join(" ")
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/app/AppTraces.tsx b/cli/daemon/dash/dashapp/src/components/app/AppTraces.tsx
deleted file mode 100644
index 03c842e2e1..0000000000
--- a/cli/daemon/dash/dashapp/src/components/app/AppTraces.tsx
+++ /dev/null
@@ -1,185 +0,0 @@
-import React from 'react'
-import { Modal } from '~c/Modal'
-import { Request, Trace } from '~c/trace/model'
-import SpanDetail from '~c/trace/SpanDetail'
-import SpanList from '~c/trace/SpanList'
-import TraceMap from '~c/trace/TraceMap'
-import { latencyStr } from '~c/trace/util'
-import { decodeBase64 } from '~lib/base64'
-import JSONRPCConn, { NotificationMsg } from '~lib/client/jsonrpc'
-import { timeToDate } from '~lib/time'
-
-interface Props {
- appID: string;
- conn: JSONRPCConn;
-}
-
-interface State {
- traces: Trace[];
- selected?: Trace;
-}
-
-export default class AppTraces extends React.Component {
- constructor(props: Props) {
- super(props)
- this.state = {traces: []}
- this.onNotification = this.onNotification.bind(this)
- }
-
- async componentDidMount() {
- const traces = await this.props.conn.request("list-traces", {appID: this.props.appID}) as Trace[]
- this.setState({traces: traces.reverse()})
- this.props.conn.on("notification", this.onNotification)
- }
-
- componentWillUnmount() {
- this.props.conn.off("notification", this.onNotification)
- }
-
- onNotification(msg: NotificationMsg) {
- if (msg.method === "trace/new") {
- const tr = msg.params as Trace
- this.setState((st) => {
- return {traces: [tr, ...st.traces]}
- })
- }
- }
-
- render() {
- return (
-
-
this.setState({selected: undefined})} width="w-full h-full mt-4">
- {this.state.selected && this.setState({selected: undefined})} /> }
-
-
-
- {this.state.traces.length === 0 && (
-
- No traces yet. Make an API call to see it here!
-
- )}
-
- {this.state.traces.map(tr => {
- const loc = tr.locations[tr.root.def_loc]
- let endpoint = ""
- if ("rpc_def" in loc) {
- endpoint = loc.rpc_def.service_name + "." + loc.rpc_def.rpc_name
- }
- return
- this.setState({selected: tr})}>
-
-
- {endpoint}
-
-
- {tr.root.err === null ? (
-
- Success
-
- ) : (
-
- Error
-
- )}
-
-
-
-
-
-
-
-
-
- View Trace
-
-
-
-
-
-
- {tr.end_time ? latencyStr(tr.end_time - tr.start_time) : "Unknown"}
-
-
-
-
- })}
-
-
-
-
- )
- }
-}
-
-interface TraceViewProps {
- trace: Trace;
- close: () => void;
-}
-
-interface TraceViewState {
- selected: Request;
-}
-
-class TraceView extends React.Component {
- constructor(props: TraceViewProps) {
- super(props)
- this.state = {
- selected: props.trace.root
- }
- }
-
- render() {
- const tr = this.props.trace
- const dt = timeToDate(tr.date)!
-
- return (
-
-
-
this.props.close()}>
-
-
-
-
-
-
-
-
-
-
- Trace Details
-
-
-
-
- Recorded
- {dt.toFormat("ff")}
-
- {tr.auth !== null && <>
-
- User ID
- {JSON.parse(decodeBase64(tr.auth.outputs[0]))}
-
- >}
-
-
-
-
- this.setState({selected: req})} />
-
-
-
-
- this.setState({selected: req})} />
-
-
-
-
-
-
-
- )
- }
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/icons.tsx b/cli/daemon/dash/dashapp/src/components/icons.tsx
deleted file mode 100644
index a317eaa675..0000000000
--- a/cli/daemon/dash/dashapp/src/components/icons.tsx
+++ /dev/null
@@ -1,332 +0,0 @@
-import React from "react"
-export type Icon = (cls?: string, title?: string) => JSX.Element
-
-export const commit: Icon = (cls, title): JSX.Element =>
-
- {renderTitle(title)}
-
-
-
-export const lightningBolt: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const lightBulb: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const code: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const cloud: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const cloudUpload: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const document: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const documentReport: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const documentDuplicate: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const solidDocument: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const pencil: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const merge: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const calendar: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const search: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const chevronDown: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const chevronRight: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const x: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const check: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const checkCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const xCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const exclamation: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const exclamationCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const playCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const questionMarkCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const dotsCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const plusCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const minusCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const refresh: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const chip: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-
-export const clock: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const puzzle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const filter: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const slash: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const selector: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const user: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const userAdd: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const userRemove: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const userCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const logout: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const key: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const wrench: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const database: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-
-
-export const errCircle: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const photograph: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const pulse: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-
-export const githubLogo: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const azureLogo: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-export const encoreLogo: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-
-export const gcpLogo: Icon = (cls, title) =>
-
- {renderTitle(title)}
-
-
-
-
-
-
-export const loading = (cls: string, color: string, baseColor: string, borderWidth: number) => (
- <>
-
-
- >
-)
-
-const renderTitle = (title?: string) => (
- title && {title}
-)
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/trace/SpanDetail.tsx b/cli/daemon/dash/dashapp/src/components/trace/SpanDetail.tsx
deleted file mode 100644
index f5b976ff4b..0000000000
--- a/cli/daemon/dash/dashapp/src/components/trace/SpanDetail.tsx
+++ /dev/null
@@ -1,430 +0,0 @@
-import { FunctionComponent, useState, useRef, useEffect } from "react"
-import { Request, Trace, Event, DBTransaction, DBQuery, TraceExpr, RPCCall, AuthCall, HTTPCall } from "./model"
-import { latencyStr, svcColor } from "./util"
-import * as icons from "~c/icons"
-import { decodeBase64, Base64EncodedBytes } from "~lib/base64"
-import React from "react"
-
-interface Props {
- trace: Trace;
- req: Request;
-}
-
-const SpanDetail: FunctionComponent = (props) => {
- const req = props.req
- const defLoc = props.trace.locations[req.def_loc]
- const callLoc = req.call_loc !== null ? props.trace.locations[req.call_loc] : null
-
- const numCalls = req.children.length
- let numQueries = 0
- for (const e of req.events) {
- if (e.type === "DBQuery" ) { numQueries++ }
- else if (e.type === "DBTransaction" ) { numQueries += e.queries.length }
- }
-
- let svcName = "unknown", rpcName = "Unknown"
- if ("rpc_def" in defLoc) {
- svcName = defLoc.rpc_def.service_name
- rpcName = defLoc.rpc_def.rpc_name
- } else if ("auth_handler_def" in defLoc) {
- svcName = defLoc.auth_handler_def.service_name
- rpcName = defLoc.auth_handler_def.name
- }
-
- return <>
-
-
{svcName}.{rpcName}
-
-
- {defLoc.filepath}:{defLoc.src_line_start}
-
- {callLoc !== null &&
- {" "}
- (Called from {callLoc.filepath}:{callLoc.src_line_start} )
-
- }
-
-
-
-
- {icons.clock("h-5 w-auto")}
- {req.end_time ? latencyStr(req.end_time - req.start_time) : "Unknown"}
- Duration
-
-
-
- {icons.logout("h-5 w-auto")}
- {numCalls}
- API Call{numCalls !== 1 ? "s" : ""}
-
-
-
- {icons.database("h-5 w-auto")}
- {numQueries}
- DB Quer{numQueries !== 1 ? "ies" : "y"}
-
-
-
-
-
-
-
-
- {req.type === "AUTH" ? (
- req.err !== null ? (
-
-
Error
-
{decodeBase64(req.err)}
-
- ) : (
- <>
-
-
User ID
- {renderData([req.outputs[0]])}
-
- {req.outputs.length > 1 &&
-
-
User Data
- {renderData([req.outputs[1]])}
-
- }
- >
- )
- ) : <>
-
-
Request
- {req.inputs.length > 0 ? renderData(req.inputs) :
No request data.
}
-
- {req.err !== null ? (
-
-
Error
-
{decodeBase64(req.err)}
-
- ) : (
-
-
Response
- {req.outputs.length > 0 ? renderData(req.outputs) :
No response data.
}
-
- )}
- >}
-
-
-
- >
-}
-
-export default SpanDetail
-
-type gdata = {goid: number, start: number, end: number | undefined, events: Event[]}
-
-const EventMap: FunctionComponent<{req: Request, trace: Trace}> = (props) => {
- const req = props.req
-
- // Compute the list of interesting goroutines
- const gmap: {[key: number]: gdata} = {
- [req.goid]: {
- goid: req.goid,
- start: req.start_time,
- end: req.end_time,
- events: [],
- }
- }
- const gnums: number[] = [req.goid]
-
- for (const ev of req.events) {
- if (ev.type === "Goroutine") {
- gmap[ev.goid] = { goid: ev.goid, start: ev.start_time, end: ev.end_time, events: [] }
- gnums.push(ev.goid)
- } else if (ev.type === "DBTransaction") {
- let g = gmap[ev.goid]
- g.events = g.events.concat(ev.queries)
- } else {
- gmap[ev.goid].events.push(ev)
- }
- }
-
- const lines = gnums.map(n => gmap[n]).filter(g => (g.events.length > 0 || g.goid === req.goid))
- return (
-
-
- {icons.chip("h-4 w-auto")}
- {lines.length}
- Goroutine{lines.length !== 1 ? "s" : ""}
-
- {lines.map((g, i) =>
-
0 ? "mt-1" : ""}>
-
-
- )}
-
- )
-}
-
-const GoroutineDetail: FunctionComponent<{g: gdata, req: Request, trace: Trace}> = (props) => {
- const req = props.req
- const reqDur = req.end_time! - req.start_time
- const start = Math.round((props.g.start - req.start_time) / reqDur * 100)
- const end = Math.round((props.g.end! - req.start_time) / reqDur * 100)
- const g = props.g
- const gdur = g.end! - g.start
- const lineHeight = 18
-
- const tooltipRef = useRef(null)
- const goroutineEl = useRef(null)
- const [hoverObj, setHoverObj] = useState(null)
- const [barOver, setBarOver] = useState(false)
- const [tooltipOver, setTooltipOver] = useState(false)
-
- const setHover = (ev: React.MouseEvent, obj: Request | Event | null) => {
- if (obj === null) {
- setBarOver(false)
- return
- }
-
- const el = tooltipRef.current
- if (!el) {
- return
- }
-
- setBarOver(true)
- setHoverObj(obj)
- const spanEl = (ev.target as HTMLElement)
- el.style.marginTop = `calc(${spanEl.offsetTop}px - 40px)`;
- el.style.marginLeft = `calc(${spanEl.offsetLeft}px)`;
- }
-
- return <>
-
-
-
- {g.events.map((ev, i) => {
- const start = Math.round((ev.start_time - g.start) / gdur * 100)
- const end = Math.round((ev.end_time! - g.start) / gdur * 100)
- const clsid = `ev-${req.id}-${g.goid}-${i}`
-
- if (ev.type === "DBQuery") {
- const [color, highlightColor] = svcColor(ev.txid !== null ? ("tx:"+ev.txid) : ("query:"+ev.start_time))
- return
-
- setHover(e, ev)}
- onMouseLeave={(e) => setHover(e, null)}
- style={{
- borderRadius: "3px",
- top: "3px", bottom: "3px",
- left: start+"%", right: (100-end)+"%",
- minWidth: "1px" // so it at least renders if start === stop
- }} />
-
- } else if (ev.type === "RPCCall") {
- const defLoc = props.trace.locations[ev.def_loc]
- let svcName = "unknown"
- if ("rpc_def" in defLoc) {
- svcName = defLoc.rpc_def.service_name
- }
- const [color, highlightColor] = svcColor(svcName)
- return
-
- setHover(e, ev)}
- onMouseLeave={(e) => setHover(e, null)}
- style={{
- borderRadius: "3px",
- top: "3px", bottom: "3px",
- left: start+"%", right: (100-end)+"%",
- minWidth: "1px" // so it at least renders if start === stop
- }}
- />
-
- } else if (ev.type === "HTTPCall") {
- const [color, highlightColor] = svcColor(ev.url)
- return
-
- setHover(e, ev)}
- onMouseLeave={(e) => setHover(e, null)}
- style={{
- borderRadius: "3px",
- top: "3px", bottom: "3px",
- left: start+"%", right: (100-end)+"%",
- minWidth: "1px" // so it at least renders if start === stop
- }}
- />
-
- }
- })}
-
-
-
- setTooltipOver(true)} onMouseLeave={() => setTooltipOver(false)}>
- {(barOver || tooltipOver) &&
-
- {hoverObj && "type" in hoverObj && (
- hoverObj.type === "DBQuery" ? :
- hoverObj.type === "RPCCall" ? :
- hoverObj.type === "HTTPCall" ? :
- null)}
-
- }
-
- >
-}
-
-const DBQueryTooltip: FunctionComponent<{q: DBQuery, trace: Trace}> = (props) => {
- const q = props.q
- return
-
- {icons.database("h-8 w-auto text-gray-400 mr-2")}
- DB Query
- {q.end_time ? latencyStr(q.end_time - q.start_time) : "Unknown"}
-
-
-
-
Query
- {q.html_query !== null ? (
-
- ) : (
-
- {decodeBase64(q.query)}
-
- )}
-
-
-
-
Error
- {q.err !== null ? (
-
- {decodeBase64(q.err)}
-
- ) : (
-
Completed successfully.
- )}
-
-
-
-}
-
-const RPCCallTooltip: FunctionComponent<{call: RPCCall, req: Request, trace: Trace}> = (props) => {
- const c = props.call
- const target = props.req.children.find(r => r.id === c.req_id)
- const defLoc = props.trace.locations[c.def_loc]
- let endpoint: string | null = null
- if ("rpc_def" in defLoc) {
- endpoint = `${defLoc.rpc_def.service_name}.${defLoc.rpc_def.rpc_name}`
- }
-
- return
-
- {icons.logout("h-8 w-auto text-gray-400 mr-2")}
- API Call
- {endpoint !== null ? (
- : {endpoint}
- ) : (
- Unknown Endpoint
- )}
- {c.end_time ? latencyStr(c.end_time - c.start_time) : "Unknown"}
-
-
-
-
Request
- {target !== undefined ? (
- target.inputs.length > 0 ? renderData(target.inputs) :
No request data.
- ) :
Not captured.
- }
-
-
-
-
Response
- {target !== undefined ? (
- target.outputs.length > 0 ? renderData(target.outputs) :
No response data.
- ) :
Not captured.
- }
-
-
-
-
Error
- {c.err !== null ? (
-
- {decodeBase64(c.err)}
-
- ) : (
-
Completed successfully.
- )}
-
-
-
-}
-
-const HTTPCallTooltip: FunctionComponent<{call: HTTPCall, req: Request, trace: Trace}> = ({call, req, trace}) => {
- const m = call.metrics
- return
-
- {icons.logout("h-8 w-auto text-gray-400 mr-2")}
- HTTP {call.method} {call.host}{call.path}
- {call.end_time ? latencyStr(call.end_time - call.start_time) : "Unknown"}
-
-
-
-
-
-
Response
- {call.end_time !== -1 ? (
-
HTTP {call.status_code}
- ) : (
-
No response recorded.
- )}
-
-
-
-
Error
- {call.err !== null ? (
-
- {decodeBase64(call.err)}
-
- ) : (
-
Completed successfully.
- )}
-
-
-
-
Timeline
-
- {m.conn_reused ? <>
- Reused Connection: Yes
- > : <>
- {m.dns_done && <>DNS Lookup: {latencyStr(m.dns_done - call.start_time)} >}
- {m.tls_handshake_done && <>TLS Handshake: {latencyStr(m.tls_handshake_done - (m.dns_done ?? call.start_time))} >}
- >}
- {m.wrote_request && <>Wrote Request: {latencyStr(m.wrote_request - (m.tls_handshake_done ?? m.got_conn ?? call.start_time))} >}
- {m.first_response && <>Response Start: {latencyStr(m.first_response - (m.wrote_headers ?? m.got_conn ?? call.start_time))} >}
-
-
-
-
-}
-
-const renderData = (data: Base64EncodedBytes[]) => {
- const json = JSON.parse(decodeBase64(data[0]))
- return {JSON.stringify(json, undefined, " ")}
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/trace/SpanList.tsx b/cli/daemon/dash/dashapp/src/components/trace/SpanList.tsx
deleted file mode 100644
index d2c43f3f4f..0000000000
--- a/cli/daemon/dash/dashapp/src/components/trace/SpanList.tsx
+++ /dev/null
@@ -1,104 +0,0 @@
-import { FunctionComponent, useState } from "react"
-import { Request, Trace } from "./model"
-import { svcColor } from "./util"
-import * as icons from "~c/icons"
-import React from "react"
-
-interface Props {
- trace: Trace;
- selected?: Request;
- onSelect?: (req: Request) => void;
-}
-
-const SpanList: FunctionComponent = (props) => {
- const traceDur = props.trace.end_time! - props.trace.start_time
- const [contracted, setContracted] = useState(new Map())
-
- let spanCounter = 0
- const renderSpan: (req: Request, level: number, siblings: number[]) => JSX.Element = (req, level, siblings) => {
- const start = Math.round((req.start_time - props.trace.start_time) / traceDur * 100)
- const end = Math.round((req.end_time! - props.trace.start_time) / traceDur * 100)
- const defLoc = props.trace.locations[req.def_loc]
-
- let svcName = "unknown", rpcName = "Unknown"
- if ("rpc_def" in defLoc) {
- svcName = defLoc.rpc_def.service_name
- rpcName = defLoc.rpc_def.rpc_name
- } else if ("auth_handler_def" in defLoc) {
- svcName = defLoc.auth_handler_def.service_name
- rpcName = defLoc.auth_handler_def.name
- }
-
- const [color, highlightColor] = svcColor(svcName)
- const sel = props.selected?.id === req.id
-
- const select = () => {
- if (props.onSelect) props.onSelect(req)
- contracted.set(req.id, !(contracted.get(req.id) ?? false))
- setContracted(contracted)
- }
-
- const isContracted = contracted.get(req.id) ?? false
- const showChildren = !isContracted && req.children.length > 0
- spanCounter++
- return
-
-
0} down={showChildren} siblings={siblings} level={level} />
-
- {(isContracted && req.children.length > 0) ?
- icons.chevronRight("h-4 w-auto ml-1 mr-0.5") :
- icons.chevronDown("h-4 w-auto ml-1 mr-0.5")
- }
-
-
-
-
- {svcName}.{rpcName}
-
-
-
-
-
-
- {showChildren && req.children.map((ch, i) =>
- renderSpan(ch, level+1, siblings.concat(i < (req.children.length-1) ? [level+1] : []))
- )}
-
- }
-
- return (
-
- {props.trace.auth && renderSpan(props.trace.auth, 0, [])}
- {renderSpan(props.trace.root, 0, [])}
-
- )
-}
-
-const TreeHint: FunctionComponent<{up: boolean, down: boolean, siblings: number[], level: number}> = (props) => {
- const lvl = props.level
- return
- {props.siblings.map(s =>
-
- )}
-
- {props.up &&
}
-
- {props.down &&
}
-
-}
-
-export default SpanList
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/trace/TraceMap.tsx b/cli/daemon/dash/dashapp/src/components/trace/TraceMap.tsx
deleted file mode 100644
index ee772df3d5..0000000000
--- a/cli/daemon/dash/dashapp/src/components/trace/TraceMap.tsx
+++ /dev/null
@@ -1,115 +0,0 @@
-import { FunctionComponent, useEffect, useState } from "react"
-import { Request, Trace } from "./model"
-import { svcColor } from "./util"
-import React from "react"
-
-interface Props {
- trace: Trace;
- selected?: Request;
- onSelect?: (req: Request) => void;
-}
-
-const TraceMap: FunctionComponent = (props) => {
- const root = props.trace.root
- const traceStart = props.trace.start_time
- const traceDur = props.trace.end_time! - traceStart
- const lineHeight = 8
- const lineGap = 2
-
- const renderSpan = (req: Request, line: number) => {
- const start = Math.round((req.start_time - traceStart) / traceDur * 100)
- const end = Math.round((req.end_time! - traceStart) / traceDur * 100)
- const defLoc = props.trace.locations[req.def_loc]
- let svcName = "unknown"
- if ("rpc_def" in defLoc) {
- svcName = defLoc.rpc_def.service_name
- }
- const [color, highlightColor] = svcColor(svcName)
- const sel = props.selected?.id === req.id
- const select = () => props.onSelect && props.onSelect(req)
-
- return
-
-
-
- }
-
- let [lines, setLines] = useState([])
- let roots = [root]
- if (props.trace.auth !== null) {
- roots.push(props.trace.auth)
- }
- useEffect(() => setLines(buildTraceMap(roots)), [props.trace])
-
- return (
-
- {lines.map((line, i) =>
- line.map((span, j) =>
- renderSpan(span, i)
- ))}
-
- )
-}
-
-export default TraceMap
-
-// buildTraceMaps computes the layout for the trace map.
-// The result is a two-dimensional array, where the outer array consists of lines
-// and the inner array is a list of non-overlapping spans in that line.
-function buildTraceMap(roots: Request[]): Request[][] {
- // Layout the spans on the trace map.
- // For a given span, look for available space in lines through
- // a naive loop over the spans in lines with idx > x, where x
- // is the parent's line index.
- const lines: Request[][] = []
- const queue = roots.map(r => { return {span: r, minLine: 0} })
- while (queue.length > 0) {
- const {span, minLine} = queue.shift()!
- let spanLine: number | undefined = undefined
- for (let i = minLine; i < lines.length; i++) {
- const line = lines[i]
- const nl = line.length
-
- // Find an available gap in the line.
- for (let j = 0; j < nl; j++) {
- const start = line[j].start_time
- const end = line[j].end_time!
- if (
- (j === 0 && span.end_time! <= start) || // before first
- (j === (nl-1) && span.start_time >= end) || // after last
- (j > 0 && j < (nl-1) && span.start_time >= end && span.end_time! <= line[j+1].start_time) // in gap between spans
- ) {
- spanLine = i
- line.splice(j, 0, span)
- break
- }
- }
- if (spanLine !== undefined) {
- break
- }
- }
-
- if (spanLine === undefined) {
- // Add a new line to accommodate it
- lines.push([span])
- spanLine = lines.length - 1
- }
-
- // Add all child spans to the queue
- for (const child of span.children) {
- queue.push({span: child, minLine: spanLine+1})
- }
- }
-
- return lines
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/trace/model.ts b/cli/daemon/dash/dashapp/src/components/trace/model.ts
deleted file mode 100644
index eba8545a14..0000000000
--- a/cli/daemon/dash/dashapp/src/components/trace/model.ts
+++ /dev/null
@@ -1,153 +0,0 @@
-import { Base64EncodedBytes } from "~lib/base64"
-
-export interface Trace {
- id: string;
- app_version: string;
- date: string;
- start_time: number;
- end_time?: number;
- root: Request;
- auth: Request | null;
- locations: {[key: number]: TraceExpr};
-}
-
-export interface Request {
- type: "RPC" | "AUTH";
- id: string;
- parent_id: string | null;
- goid: number;
- caller_goid: number | null;
- def_loc: number;
- call_loc: number | null;
- start_time: number;
- end_time?: number;
- inputs: Base64EncodedBytes[];
- outputs: Base64EncodedBytes[];
- err: Base64EncodedBytes | null;
- events: Event[];
- children: Request[];
-}
-
-export interface DBTransaction {
- type: "DBTransaction";
- goid: number;
- txid: number;
- start_loc: number;
- end_loc: number;
- start_time: number;
- end_time?: number;
- completion_type: "COMMIT" | "ROLLBACK";
- err: Base64EncodedBytes | null;
- queries: DBQuery[];
-}
-
-export interface DBQuery {
- type: "DBQuery";
- goid: number;
- txid: number | null;
- call_loc: number;
- start_time: number;
- end_time?: number;
- query: Base64EncodedBytes;
- html_query: Base64EncodedBytes | null;
- err: Base64EncodedBytes | null;
-}
-
-export interface Goroutine {
- type: "Goroutine";
- goid: number;
- call_loc: number;
- start_time: number;
- end_time?: number;
-}
-
-export interface RPCCall {
- type: "RPCCall";
- goid: number;
- req_id: string;
- call_loc: number;
- def_loc: number;
- start_time: number;
- end_time?: number;
- err: Base64EncodedBytes | null;
-}
-
-export interface AuthCall {
- type: "AuthCall";
- goid: number;
- def_loc: number;
- start_time: number;
- end_time?: number;
- uid: string;
- auth_data: Base64EncodedBytes | null;
- err: Base64EncodedBytes | null;
-}
-
-export interface HTTPCall {
- type: "HTTPCall";
- goid: number;
- req_id: string;
- start_time: number;
- end_time?: number;
- method: string;
- host: string;
- path: string;
- url: string;
- status_code: number;
- err: Base64EncodedBytes | null;
- metrics: HTTPCallMetrics;
-}
-
-export interface HTTPCallMetrics {
- got_conn?: number;
- conn_reused: boolean;
- dns_done?: number;
- tls_handshake_done?: number;
- wrote_headers?: number;
- wrote_request?: number;
- first_response?: number;
- body_closed?: number;
-}
-
-export type Event = DBTransaction | DBQuery | RPCCall | HTTPCall | Goroutine;
-
-export type TraceExpr = RpcDefExpr | RpcCallExpr | StaticCallExpr | AuthHandlerDefExpr
-
-interface BaseExpr {
- filepath: string; // source file path
- src_line_start: number; // line number in source file
- src_line_end: number; // line number in source file
- src_col_start: number; // column start in source file
- src_col_end: number; // colum end in source file (exclusive)
-}
-
-type RpcDefExpr = BaseExpr & {
- rpc_def: {
- service_name: string;
- rpc_name: string;
- context: string;
- }
-}
-
-type RpcCallExpr = BaseExpr & {
- rpc_call: {
- service_name: string;
- rpc_name: string;
- context: string;
- }
-}
-
-type StaticCallExpr = BaseExpr & {
- static_call: {
- package: "SQLDB" | "RLOG";
- func: string;
- }
-}
-
-type AuthHandlerDefExpr = BaseExpr & {
- auth_handler_def: {
- service_name: string;
- name: string;
- context: string;
- }
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/components/trace/util.ts b/cli/daemon/dash/dashapp/src/components/trace/util.ts
deleted file mode 100644
index 78a0730eec..0000000000
--- a/cli/daemon/dash/dashapp/src/components/trace/util.ts
+++ /dev/null
@@ -1,43 +0,0 @@
-const unselected = ["#fde8e8", "#feecdc", "#fdf6b2", "#def7ec", "#d5f5f6", "#e1effe", "#e5edff", "#edebfe", "#fce8f3"]
-const selected = ["#fbd5d5", "#fcd9bd", "#fce96a", "#bcf0da", "#afecef", "#c3ddfd", "#cddbfe", "#dcd7fe", "#fad1e8"]
-
-export function svcColor(svc: string): [string, string] {
- const n = unselected.length
- let idx = strhash(svc) % n
- if (idx < 0) idx += n
- return [unselected[idx], selected[idx]]
-}
-
-function strhash(s: string): number {
- let hash = 0;
- for (var i = 0; i < s.length; i++) {
- const c = s.charCodeAt(i)
- hash = ((hash<<5)-hash)+c
- hash &= hash // Convert to 32bit integer
- }
- return hash
-}
-
-export function latencyStr(n: number): string {
- if (n < 1000) {
- return Math.round(n) + "µs"
- }
- n /= 1000
-
- if (n < 1000) {
- return Math.round(n) + "ms"
- }
- n /= 1000
-
- if (n < 10) {
- return (Math.round(n*10)/10) + "s"
- } else if (n < 3600) {
- return Math.round(n) + "s"
- }
-
- n /= 3600
- if (n < 10) {
- return (Math.round(n*10)/10) + "h"
- }
- return Math.round(n) + "h"
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/index.css b/cli/daemon/dash/dashapp/src/index.css
deleted file mode 100644
index 30ec6696be..0000000000
--- a/cli/daemon/dash/dashapp/src/index.css
+++ /dev/null
@@ -1,27 +0,0 @@
-@tailwind base;
-@tailwind components;
-@tailwind utilities;
-
-html, body {
- height: 100%;
-}
-
-body {
- margin: 0;
- font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
- "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
- sans-serif;
- -webkit-font-smoothing: antialiased;
- -moz-osx-font-smoothing: grayscale;
-}
-
-code {
- font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
- monospace;
-}
-
-#root {
- height: 100%;
- display: flex;
- flex-direction: column;
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/base64.ts b/cli/daemon/dash/dashapp/src/lib/base64.ts
deleted file mode 100644
index 44cae0fe1c..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/base64.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-export type Base64EncodedBytes = string;
-
-export function decodeBase64(str: string): string {
- const raw = atob(str)
- return decodeURIComponent(raw.split('').map(function(c) {
- return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2)
- }).join(''))
-}
-
-export function encodeBase64(str: string): string {
- let encoded = encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, function(match, p1) {
- return String.fromCharCode(('0x' + p1) as any)
- })
- return btoa(encoded)
-}
diff --git a/cli/daemon/dash/dashapp/src/lib/client/base.ts b/cli/daemon/dash/dashapp/src/lib/client/base.ts
deleted file mode 100644
index ff7d9fa7b1..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/client/base.ts
+++ /dev/null
@@ -1,69 +0,0 @@
-import JSONRPCConn from "./jsonrpc";
-import { ResponseError } from "./errs";
-
-export default class BaseClient {
- base: string;
-
- constructor() {
- this.base = window.location.host
- }
-
- async do(path: string, data?: any): Promise {
- try {
- let resp = await this._do(path, data);
- if (!resp.ok) {
- return Promise.reject(new ResponseError(path, resp.error.code, resp.error.detail, ""));
- }
- return Promise.resolve(resp.data);
- } catch (err) {
- return Promise.reject(new ResponseError(path, "network_error", null, err))
- }
- }
-
- ws(path: string): Promise {
- const base = this.base
- return new Promise(function(resolve, reject) {
- let ws = new WebSocket(`ws://${base}${path}`);
- ws.onopen = function() {
- ws.onerror = null
- resolve(ws)
- };
- ws.onerror = function(err: any) {
- reject(new ResponseError(path, "network", null, err))
- }
- })
- }
-
- async jsonrpc(path: string): Promise {
- const ws = await this.ws(path)
- return new JSONRPCConn(ws)
- }
-
- async _do(path: string, data?: any): Promise> {
- let body = null
- if (data) {
- body = JSON.stringify(data)
- }
-
- let resp = await fetch(`http://${this.base}${path}`, {
- method: "POST",
- body: body,
- })
- return await resp.json()
- }
-}
-
-interface ErrorResponse {
- ok: false;
- error: {
- code: string;
- detail: any;
- }
-}
-
-interface SuccessResponse {
- ok: true;
- data: T;
-}
-
-type APIResponse = SuccessResponse | ErrorResponse;
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/client/client.ts b/cli/daemon/dash/dashapp/src/lib/client/client.ts
deleted file mode 100644
index d881efcf2a..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/client/client.ts
+++ /dev/null
@@ -1,34 +0,0 @@
-import BaseClient from "./base";
-import { APIMeta } from "~c/api/api";
-
-export default class Client {
- base: BaseClient;
-
- constructor() {
- this.base = new BaseClient()
- }
-}
-
-export interface ProcessStart {
- appID: string;
- pid: string
- meta: APIMeta;
- port: number;
-}
-
-export interface ProcessReload {
- appID: string;
- pid: string
- meta: APIMeta;
-}
-
-export interface ProcessStop {
- appID: string;
- pid: string
-}
-
-export interface ProcessOutput {
- appID: string;
- pid: string
- output: string;
-}
diff --git a/cli/daemon/dash/dashapp/src/lib/client/errs.ts b/cli/daemon/dash/dashapp/src/lib/client/errs.ts
deleted file mode 100644
index fb23ea0ba8..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/client/errs.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-export class ResponseError extends Error {
- path: string;
- code: string;
- detail: any | null;
-
- constructor(path: string, code: string, detail: any | null, message: string) {
- let d = detail ? JSON.stringify(detail) : "(no details)";
- if (message) {
- super(`API ${path} failed with code: ${code} - ${d}: ${message}`)
- } else {
- super(`API ${path} failed with code: ${code} - ${d}`)
- }
- this.path = path;
- this.code = code;
- this.detail = detail;
- }
-}
-
-export function errCode(err: any): string {
- if (!err || !err.code) {
- return "unknown"
- }
- return err.code ?? "unknown"
-}
-
-export function errDetail(err: any): any | null {
- if (!err || !err.detail) {
- return null
- }
- return err.detail || null
-}
-
-export function isErr(err: any, code?: string, detail?: any) {
- if (!err) {
- return false;
- } else if (err.code !== code) {
- return false;
- }
-
- if (!detail) {
- return true;
- } else if (!err.detail) {
- return false;
- }
-
- for (let key in detail) {
- if (detail[key] !== err.detail[key]) {
- return false;
- }
- }
- return true;
-}
-
-export function isValidationErr(err: any, field?: string, type?: string) {
- if (type) {
- return isErr(err, "validation", {"field": field, "type": type})
- } else if (field) {
- return isErr(err, "validation", {"field": field})
- } else {
- return isErr(err, "validation")
- }
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/client/jsonrpc.ts b/cli/daemon/dash/dashapp/src/lib/client/jsonrpc.ts
deleted file mode 100644
index 493272503e..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/client/jsonrpc.ts
+++ /dev/null
@@ -1,195 +0,0 @@
-import { EventEmitter } from 'events';
-import * as protocol from "json-rpc-protocol"
-
-function makeAsync(fn: (msg: Message) => T): (msg: Message) => Promise {
- return function(msg) {
- return new Promise(function(resolve) {
- return resolve(fn(msg));
- });
- }
-}
-
-export interface RequestMsg {
- type: "request";
- id: number;
- method: string;
- params: any;
-}
-
-export interface ResponseMsg {
- type: "response";
- id: number;
- method: string;
- result: any;
-}
-
-export interface NotificationMsg {
- type: "notification";
- method: string;
- params: any;
-}
-
-export interface ErrorMsg {
- type: "error";
- id: number | null;
- error: {
- message: string;
- code: any;
- data: any;
- };
-}
-
-export type Message = RequestMsg | ResponseMsg | NotificationMsg | ErrorMsg;
-
-export default class JSONRPCConn extends EventEmitter {
- _peer: Peer;
- _ws: WebSocket;
-
- constructor(ws: WebSocket) {
- super();
- this._ws = ws
- this._peer = new Peer(
- (msg) => ws.send(msg),
- (msg) => this.emit("notification", msg),
- )
- ws.onmessage = (event) => this._peer.processMsg(event.data);
- }
-
- async request(method: string, params?: any): Promise {
- return await this._peer.request(method, params);
- }
-
- async notify(method: string, params?: any): Promise {
- this._peer.notify(method, params)
- }
-
- close() {
- this._peer.failPendingRequests("closing connection")
- this._ws.close()
- }
-}
-
-const parseMessage = (message: string): Message => {
- try {
- return protocol.parse(message) as Message;
- } catch (error) {
- throw protocol.format.error(null, error);
- }
-};
-
-// Default onMessage implementation:
-//
-// - ignores notifications
-// - throw MethodNotFound for all requests
-function defaultOnMessage(message: Message) {
- if (message.type === "request") {
- throw new protocol.MethodNotFound(message.method);
- }
-}
-
-function noop() {}
-
-interface Deferred {
- resolve: (x: any) => void;
- reject: (x: any) => void;
-}
-
-// Starts the autoincrement id with the JavaScript minimal safe integer to have
-// more room before running out of integers (it's very far fetched but a very
-// long running process with a LOT of messages could run out).
-let nextRequestId = -9007199254740991;
-
-// ===================================================================
-
-export class Peer {
- _deferreds: {[key: number]: Deferred};
- _handle: (msg: Message) => Promise;
- _send: (msg: string) => void;
-
- constructor(send: (msg: string) => void, onMessage = defaultOnMessage) {
- this._send = send;
- this._handle = makeAsync(onMessage);
- this._deferreds = Object.create(null);
- }
-
- _getDeferred(id: number): Deferred {
- const deferred = this._deferreds[id];
- delete this._deferreds[id];
- return deferred;
- }
-
- async processMsg(message: string) {
- const msg = parseMessage(message);
-
- if (msg.type === "error") {
- // Some errors do not have an identifier, simply discard them.
- if (msg.id === null) {
- return;
- }
-
- const { error } = msg;
- this._getDeferred(msg.id).reject(
- // TODO: it would be great if we could return an error with of
- // a more specific type (and custom types with registration).
- new (Error as any)(error.message, error.code, error.data)
- );
- } else if (msg.type === "response") {
- this._getDeferred(msg.id).resolve(msg.result);
- } else if (msg.type === "notification") {
- this._handle(msg).catch(noop);
- } else if (msg.type === "request") {
- return this._handle(msg)
- .then(result =>
- protocol.format.response(msg.id, result === undefined ? null : result)
- )
- .catch(error =>
- protocol.format.error(
- msg.id,
-
- // If the method name is not defined, default to the method passed
- // in the request.
- error instanceof protocol.MethodNotFound && !error.data
- ? new protocol.MethodNotFound(msg.method)
- : error
- )
- );
- }
- }
-
- // Fails all pending requests.
- failPendingRequests(reason: any) {
- Object.entries(this._deferreds).forEach(([id, deferred]) => {
- deferred.reject(reason);
- delete this._deferreds[(id as unknown) as number];
- });
- }
-
- /**
- * This function should be called to send a request to the other end.
- *
- * TODO: handle multi-requests.
- */
- request(method: string, params: any): Promise {
- return new Promise((resolve, reject) => {
- const requestId = nextRequestId++;
-
- try {
- this._send(protocol.format.request(requestId, method, params));
- } catch(err) {
- reject(err);
- return;
- }
-
- this._deferreds[requestId] = { resolve, reject };
- });
- }
-
- /**
- * This function should be called to send a notification to the other end.
- *
- * TODO: handle multi-notifications.
- */
- notify(method: string, params: any) {
- this._send(protocol.format.notification(method, params));
- }
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/clipboard.ts b/cli/daemon/dash/dashapp/src/lib/clipboard.ts
deleted file mode 100644
index ba9bc24e76..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/clipboard.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-export const copyToClipboard = (text: string) => {
- let textField = document.createElement('textarea')
- textField.innerText = text
- document.body.appendChild(textField)
- textField.select()
- document.execCommand('copy')
- textField.remove()
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/ctx.ts b/cli/daemon/dash/dashapp/src/lib/ctx.ts
deleted file mode 100644
index 884b06ff74..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/ctx.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-import React, { useContext } from "react"
-import JSONRPCConn from "./client/jsonrpc"
-
-export const ConnContext = React.createContext(undefined)
-
-export function useConn(): JSONRPCConn {
- return useContext(ConnContext)!
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/parse-ansi.ts b/cli/daemon/dash/dashapp/src/lib/parse-ansi.ts
deleted file mode 100644
index ad541fe410..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/parse-ansi.ts
+++ /dev/null
@@ -1,433 +0,0 @@
-export interface Chunk {
- type: "text" | "ansi" | "newline";
- value: string;
- style: Style;
-}
-
-export interface Style {
- foregroundColor?: string;
- backgroundColor?: string;
- dim?: boolean;
- bold?: boolean;
- italic?: boolean;
- underline?: boolean;
- strikethrough?: boolean;
- inverse?: boolean;
-}
-
-const ansiRegex = ({onlyFirst = false} = {}) => {
- const pattern = [
- '[\\u001B\\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[-a-zA-Z\\d\\/#&.:=?%@~_]*)*)?\\u0007)',
- '(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PR-TZcf-ntqry=><~]))'
- ].join('|');
-
- return new RegExp(pattern, onlyFirst ? undefined : 'g');
-};
-
-const stripAnsi = (string: any) => typeof string === 'string' ? string.replace(ansiRegex(), '') : string;
-
-
-const ansiTags: {[key: string]: string} = {
- '\u001B[30m': 'black',
- '\u001B[31m': 'red',
- '\u001B[32m': 'green',
- '\u001B[33m': 'yellow',
- '\u001B[34m': 'blue',
- '\u001B[35m': 'magenta',
- '\u001B[36m': 'cyan',
- '\u001B[37m': 'white',
-
- '\u001B[90m': 'gray',
- '\u001B[91m': 'redBright',
- '\u001B[92m': 'greenBright',
- '\u001B[93m': 'yellowBright',
- '\u001B[94m': 'blueBright',
- '\u001B[95m': 'magentaBright',
- '\u001B[96m': 'cyanBright',
- '\u001B[97m': 'whiteBright',
-
- '\u001B[39m': 'foregroundColorClose',
-
- '\u001B[40m': 'bgBlack',
- '\u001B[41m': 'bgRed',
- '\u001B[42m': 'bgGreen',
- '\u001B[43m': 'bgYellow',
- '\u001B[44m': 'bgBlue',
- '\u001B[45m': 'bgMagenta',
- '\u001B[46m': 'bgCyan',
- '\u001B[47m': 'bgWhite',
-
- '\u001B[100m': 'bgGray',
- '\u001B[101m': 'bgRedBright',
- '\u001B[102m': 'bgGreenBright',
- '\u001B[103m': 'bgYellowBright',
- '\u001B[104m': 'bgBlueBright',
- '\u001B[105m': 'bgMagentaBright',
- '\u001B[106m': 'bgCyanBright',
- '\u001B[107m': 'bgWhiteBright',
-
- '\u001B[49m': 'backgroundColorClose',
-
- '\u001B[1m': 'boldOpen',
- '\u001B[2m': 'dimOpen',
- '\u001B[3m': 'italicOpen',
- '\u001B[4m': 'underlineOpen',
- '\u001B[7m': 'inverseOpen',
- '\u001B[8m': 'hiddenOpen',
- '\u001B[9m': 'strikethroughOpen',
-
- '\u001B[22m': 'boldDimClose',
- '\u001B[23m': 'italicClose',
- '\u001B[24m': 'underlineClose',
- '\u001B[27m': 'inverseClose',
- '\u001B[28m': 'hiddenClose',
- '\u001B[29m': 'strikethroughClose',
-
- '\u001B[0m': 'reset'
-}
-
-const decorators: {[key: string]: string} = {
- black: 'foregroundColorOpen',
- red: 'foregroundColorOpen',
- green: 'foregroundColorOpen',
- yellow: 'foregroundColorOpen',
- blue: 'foregroundColorOpen',
- magenta: 'foregroundColorOpen',
- cyan: 'foregroundColorOpen',
- white: 'foregroundColorOpen',
-
- gray: 'foregroundColorOpen',
- redBright: 'foregroundColorOpen',
- greenBright: 'foregroundColorOpen',
- yellowBright: 'foregroundColorOpen',
- blueBright: 'foregroundColorOpen',
- magentaBright: 'foregroundColorOpen',
- cyanBright: 'foregroundColorOpen',
- whiteBright: 'foregroundColorOpen',
-
- bgBlack: 'backgroundColorOpen',
- bgRed: 'backgroundColorOpen',
- bgGreen: 'backgroundColorOpen',
- bgYellow: 'backgroundColorOpen',
- bgBlue: 'backgroundColorOpen',
- bgMagenta: 'backgroundColorOpen',
- bgCyan: 'backgroundColorOpen',
- bgWhite: 'backgroundColorOpen',
-
- bgGray: 'backgroundColorOpen',
- bgRedBright: 'backgroundColorOpen',
- bgGreenBright: 'backgroundColorOpen',
- bgYellowBright: 'backgroundColorOpen',
- bgBlueBright: 'backgroundColorOpen',
- bgMagentaBright: 'backgroundColorOpen',
- bgCyanBright: 'backgroundColorOpen',
- bgWhiteBright: 'backgroundColorOpen',
-
- foregroundColorClose: 'foregroundColorClose',
- backgroundColorClose: 'backgroundColorClose',
-
- boldOpen: 'boldOpen',
- dimOpen: 'dimOpen',
- italicOpen: 'italicOpen',
- underlineOpen: 'underlineOpen',
- inverseOpen: 'inverseOpen',
- hiddenOpen: 'hiddenOpen',
- strikethroughOpen: 'strikethroughOpen',
-
- boldDimClose: 'boldDimClose',
- italicClose: 'italicClose',
- underlineClose: 'underlineClose',
- inverseClose: 'inverseClose',
- hiddenClose: 'hiddenClose',
- strikethroughClose: 'strikethroughClose',
-
- reset: 'reset'
-}
-
-const arrayUniq = (array: any[]) => [...new Set(array)];
-
-// Atomize
-// Splits text into "words" by sticky delimiters [ANSI Escape Seq, \n]
-// Eg: words = ['\u001b[37m', 'Line 1', '\n', 'Line 2', '\u001b[39m']
-const atomize = (text: string) => {
- const ansies = arrayUniq(text.match(ansiRegex()) as string[])
- const words = superSplit(text, ansies.concat(['\n']))
- return {ansies, words}
-}
-
-const parse = (ansi: string) => {
- const plainText = stripAnsi(ansi)
-
- const result: any = {
- raw: ansi,
- plainText,
- chunks: []
- }
-
- const {
- ansies,
- words
- } = atomize(ansi)
-
- const styleStack: any = {
- foregroundColor: [],
- backgroundColor: [],
- boldDim: []
- }
-
- const getForegroundColor = () => {
- if (styleStack.foregroundColor.length > 0) {
- return styleStack.foregroundColor[styleStack.foregroundColor.length - 1]
- }
- return false
- }
-
- const getBackgroundColor = () => {
- if (styleStack.backgroundColor.length > 0) {
- return styleStack.backgroundColor[styleStack.backgroundColor.length - 1]
- }
- return false
- }
-
- const getDim = () => {
- return styleStack.boldDim.includes('dim')
- }
-
- const getBold = () => {
- return styleStack.boldDim.includes('bold')
- }
-
- const styleState = {
- italic: false,
- underline: false,
- inverse: false,
- hidden: false,
- strikethrough: false
- }
-
- let x = 0
- let y = 0
- let nAnsi = 0
- let nPlain = 0
-
- const bundle = (type: any, value: any) => {
- const chunk: Chunk = {
- type,
- value,
- style: {},
- }
-
- if (type === 'text' || type === 'ansi') {
- const style = chunk.style
-
- const foregroundColor = getForegroundColor()
- const backgroundColor = getBackgroundColor()
- const dim = getDim()
- const bold = getBold()
-
- if (foregroundColor) {
- style.foregroundColor = foregroundColor
- }
-
- if (backgroundColor) {
- style.backgroundColor = backgroundColor
- }
-
- if (dim) {
- style.dim = dim
- }
-
- if (bold) {
- style.bold = bold
- }
-
- if (styleState.italic) {
- style.italic = true
- }
-
- if (styleState.underline) {
- style.underline = true
- }
-
- if (styleState.inverse) {
- style.inverse = true
- }
-
- if (styleState.strikethrough) {
- style.strikethrough = true
- }
- }
-
- return chunk
- }
-
- words.forEach((word: string) => {
- // Newline character
- if (word === '\n') {
- const chunk = bundle('newline', '\n')
- result.chunks.push(chunk)
-
- x = 0
- y += 1
- nAnsi += 1
- nPlain += 1
- return
- }
-
- // Text characters
- if (ansies.includes(word) === false) {
- const chunk = bundle('text', word)
- result.chunks.push(chunk)
-
- x += word.length
- nAnsi += word.length
- nPlain += word.length
- return
- }
-
- // ANSI Escape characters
- const ansiTag = ansiTags[word]
- const decorator = decorators[ansiTag]
- const color = ansiTag
-
- if (decorator === 'foregroundColorOpen') {
- styleStack.foregroundColor.push(color)
- }
-
- if (decorator === 'foregroundColorClose') {
- styleStack.foregroundColor.pop()
- }
-
- if (decorator === 'backgroundColorOpen') {
- styleStack.backgroundColor.push(color)
- }
-
- if (decorator === 'backgroundColorClose') {
- styleStack.backgroundColor.pop()
- }
-
- if (decorator === 'boldOpen') {
- styleStack.boldDim.push('bold')
- }
-
- if (decorator === 'dimOpen') {
- styleStack.boldDim.push('dim')
- }
-
- if (decorator === 'boldDimClose') {
- styleStack.boldDim.pop()
- }
-
- if (decorator === 'italicOpen') {
- styleState.italic = true
- }
-
- if (decorator === 'italicClose') {
- styleState.italic = false
- }
-
- if (decorator === 'underlineOpen') {
- styleState.underline = true
- }
-
- if (decorator === 'underlineClose') {
- styleState.underline = false
- }
-
- if (decorator === 'inverseOpen') {
- styleState.inverse = true
- }
-
- if (decorator === 'inverseClose') {
- styleState.inverse = false
- }
-
- if (decorator === 'strikethroughOpen') {
- styleState.strikethrough = true
- }
-
- if (decorator === 'strikethroughClose') {
- styleState.strikethrough = false
- }
-
- if (decorator === 'reset') {
- styleState.strikethrough = false
- styleState.inverse = false
- styleState.italic = false
- styleStack.boldDim = []
- styleStack.backgroundColor = []
- styleStack.foregroundColor = []
- }
-
- const chunk = bundle('ansi', {
- tag: ansiTag,
- ansi: word,
- decorator
- })
-
- result.chunks.push(chunk)
- nAnsi += word.length
- })
-
- return result
-}
-
-function splitString(str: any, delimiter: any): any {
- const result: any = []
-
- str.split(delimiter).forEach((str: any) => {
- result.push(str)
- result.push(delimiter)
- })
-
- result.pop()
-
- return result
-}
-
-const splitArray = (ary: any, delimiter: any) => {
- let result: any = []
-
- ary.forEach((part: any) => {
- let subRes: any = []
-
- part.split(delimiter).forEach((str: any) => {
- subRes.push(str)
- subRes.push(delimiter)
- })
-
- subRes.pop()
- subRes = subRes.filter((str: any) => {
- if (str) {
- return str
- }
- return undefined
- })
-
- result = result.concat(subRes)
- })
-
- return result
-}
-
-function superSplit(splittable: any, delimiters: any): any {
- if (delimiters.length === 0) {
- return splittable
- }
-
- if (typeof splittable === 'string') {
- const delimiter = delimiters[delimiters.length - 1]
- const split = splitString(splittable, delimiter)
- return superSplit(split, delimiters.slice(0, -1))
- }
-
- if (Array.isArray(splittable)) {
- const delimiter = delimiters[delimiters.length - 1]
- const split = splitArray(splittable, delimiter)
- return superSplit(split, delimiters.slice(0, -1))
- }
-
- return false
-}
-
-export default parse
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/lib/time.ts b/cli/daemon/dash/dashapp/src/lib/time.ts
deleted file mode 100644
index ff4cfca336..0000000000
--- a/cli/daemon/dash/dashapp/src/lib/time.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import {DateTime, Duration} from 'luxon';
-
-export function timeToDate(timeStr: string): DateTime | null {
- let d = DateTime.fromISO(timeStr);
- if (d.year === 1) {
- return null;
- }
- return d;
-}
-
-export function durationStr(dur: Duration, format?: "long" | "short"): string {
- const short = format === "short"
- dur = dur.shiftTo("hours", "minutes", "seconds", "milliseconds")
- let parts: [number, string][]
- if (short) {
- parts = [[dur.hours, "h"], [dur.minutes, "m"], [dur.seconds, "s"]]
- } else {
- parts = [[dur.hours, "hour"], [dur.minutes, "minute"], [dur.seconds, "second"]]
- }
-
- for (var part of parts) {
- if (part[0] > 0) {
- return short ? (part[0] + part[1]) : (part[0] + " " + part[1] + (part[0] > 1 ? "s" : ""))
- }
- }
- return short ? "<1s" : "less than a second"
-}
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/logo.svg b/cli/daemon/dash/dashapp/src/logo.svg
deleted file mode 100644
index d0c4ed3660..0000000000
--- a/cli/daemon/dash/dashapp/src/logo.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/main.tsx b/cli/daemon/dash/dashapp/src/main.tsx
deleted file mode 100644
index 606a3cf44e..0000000000
--- a/cli/daemon/dash/dashapp/src/main.tsx
+++ /dev/null
@@ -1,11 +0,0 @@
-import React from 'react'
-import ReactDOM from 'react-dom'
-import './index.css'
-import App from './App'
-
-ReactDOM.render(
-
-
- ,
- document.getElementById('root')
-)
diff --git a/cli/daemon/dash/dashapp/src/pages/AppAPI.tsx b/cli/daemon/dash/dashapp/src/pages/AppAPI.tsx
deleted file mode 100644
index ba6a10529e..0000000000
--- a/cli/daemon/dash/dashapp/src/pages/AppAPI.tsx
+++ /dev/null
@@ -1,24 +0,0 @@
-import React, { FunctionComponent } from 'react'
-import { useParams } from 'react-router-dom'
-import AppAPI from '~c/app/AppAPI'
-import Nav from '~c/Nav'
-import { useConn } from '~lib/ctx'
-
-const API: FunctionComponent = (props) => {
- const conn = useConn()
- const { appID } = useParams<{appID: string}>()
-
- return (
- <>
-
-
-
- >
- )
-}
-
-export default API
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/pages/AppHome.tsx b/cli/daemon/dash/dashapp/src/pages/AppHome.tsx
deleted file mode 100644
index 696b767654..0000000000
--- a/cli/daemon/dash/dashapp/src/pages/AppHome.tsx
+++ /dev/null
@@ -1,38 +0,0 @@
-import React, { FunctionComponent } from 'react'
-import { useParams } from 'react-router-dom'
-import AppCaller from '~c/app/AppCaller'
-import AppTraces from '~c/app/AppTraces'
-import Nav from '~c/Nav'
-import { useConn } from '~lib/ctx'
-
-const AppHome: FunctionComponent = (props) => {
- const { appID } = useParams<{appID: string}>()
- const conn = useConn()
-
- return (
- <>
-
-
-
- >
- )
-}
-
-export default AppHome
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/src/pages/AppList.tsx b/cli/daemon/dash/dashapp/src/pages/AppList.tsx
deleted file mode 100644
index ee3c0f5c56..0000000000
--- a/cli/daemon/dash/dashapp/src/pages/AppList.tsx
+++ /dev/null
@@ -1,35 +0,0 @@
-import React, { FunctionComponent, useEffect, useState } from 'react'
-import { Link } from 'react-router-dom'
-import { useConn } from '~lib/ctx'
-
-const AppList: FunctionComponent = (props) => {
- const conn = useConn()
- const [apps, setApps] = useState<{id: string; name: string}[] | undefined>(undefined)
- useEffect(() => {
- conn.request("list-apps").then(apps => setApps(apps as {id: string; name: string}[]))
- }, [])
-
- return (
- <>
-
-
-
-
Your Apps
-
-
- {apps !== undefined ? (
- apps.map((app) =>
-
- {app.name}
-
- )
- ) :
Loading...
- }
-
-
-
- >
- )
-}
-
-export default AppList
\ No newline at end of file
diff --git a/cli/daemon/dash/dashapp/tailwind.config.js b/cli/daemon/dash/dashapp/tailwind.config.js
deleted file mode 100644
index a535525cab..0000000000
--- a/cli/daemon/dash/dashapp/tailwind.config.js
+++ /dev/null
@@ -1,11 +0,0 @@
-module.exports = {
- purge: ['./index.html', './src/**/*.{js,ts,jsx,tsx}'],
- darkMode: false, // or 'media' or 'class'
- theme: {
- extend: {},
- },
- variants: {
- extend: {},
- },
- plugins: [require("@tailwindcss/forms")],
-}
diff --git a/cli/daemon/dash/dashapp/tsconfig.json b/cli/daemon/dash/dashapp/tsconfig.json
deleted file mode 100644
index 8acbbdf7a7..0000000000
--- a/cli/daemon/dash/dashapp/tsconfig.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "compilerOptions": {
- "target": "ESNext",
- "lib": ["DOM", "DOM.Iterable", "ESNext"],
- "types": ["vite/client"],
- "allowJs": false,
- "skipLibCheck": false,
- "esModuleInterop": true,
- "allowSyntheticDefaultImports": true,
- "strict": true,
- "forceConsistentCasingInFileNames": true,
- "module": "ESNext",
- "moduleResolution": "Node",
- "resolveJsonModule": true,
- "isolatedModules": true,
- "noEmit": true,
- "jsx": "react",
- "paths": {
- "~c/*": ["./src/components/*"],
- "~lib/*": ["./src/lib/*"],
- "~mod/*": ["./src/mod/*"],
- "~p/*": ["./src/pages/*"],
- }
- },
- "include": ["./src"]
-}
diff --git a/cli/daemon/dash/dashapp/vite.config.ts b/cli/daemon/dash/dashapp/vite.config.ts
deleted file mode 100644
index d8371d9d78..0000000000
--- a/cli/daemon/dash/dashapp/vite.config.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import reactRefresh from '@vitejs/plugin-react-refresh'
-import path from 'path';
-import { defineConfig } from 'vite'
-
-const projectRootDir = path.resolve(__dirname)
-
-// https://vitejs.dev/config/
-export default defineConfig({
- plugins: [reactRefresh()],
- alias: [
- {find: "~c", replacement: path.resolve(projectRootDir, "src", "components")},
- {find: "~p", replacement: path.resolve(projectRootDir, "src", "pages")},
- {find: "~mod", replacement: path.resolve(projectRootDir, "src", "mod")},
- {find: "~lib", replacement: path.resolve(projectRootDir, "src", "lib")},
- ],
-})
diff --git a/cli/daemon/dash/dashproxy/dashproxy.go b/cli/daemon/dash/dashproxy/dashproxy.go
new file mode 100644
index 0000000000..66ab4b6bc1
--- /dev/null
+++ b/cli/daemon/dash/dashproxy/dashproxy.go
@@ -0,0 +1,79 @@
+// Package dashproxy proxies requests to the dash server,
+// caching them locally for offline access.
+package dashproxy
+
+import (
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/cockroachdb/errors"
+ "github.com/peterbourgon/diskv"
+
+ "encr.dev/internal/conf"
+ "encr.dev/internal/httpcache"
+ "encr.dev/internal/httpcache/diskcache"
+ "encr.dev/internal/version"
+)
+
+func New(targetURL string) (*httputil.ReverseProxy, error) {
+ target, err := url.Parse(targetURL)
+ if err != nil {
+ return nil, errors.Wrap(err, "parse target url")
+ }
+
+ var transport http.RoundTripper = &versionAddingTransport{version: version.Version}
+ if conf.CacheDevDash {
+ cacheDir, err := os.UserCacheDir()
+ if err != nil {
+ return nil, errors.Wrap(err, "get user cache dir")
+ }
+
+ cache := diskcache.NewWithDiskv(diskv.New(diskv.Options{
+ BasePath: filepath.Join(cacheDir, "encore", "dashcache"),
+ CacheSizeMax: 1024 * 1024 * 1024, // 1GiB
+ Compression: diskv.NewGzipCompression(),
+ }))
+
+ // Wrap the transport with a caching transport.
+ cachingTransport := httpcache.NewTransport(cache)
+ cachingTransport.Transport = transport
+ transport = cachingTransport
+ }
+
+ proxy := &httputil.ReverseProxy{
+ Transport: transport,
+ Rewrite: func(r *httputil.ProxyRequest) {
+ r.SetURL(target)
+
+ // Configure cache headers so the cache behaves the way we want it to.
+ r.Out.Header.Del("Cookie")
+ r.Out.Header.Set("Cache-Control", "stale-if-error")
+ r.Out.Header.Del("Vary")
+ },
+ ModifyResponse: func(resp *http.Response) error {
+ if resp.StatusCode < 300 {
+ resp.Header.Del("Vary")
+ resp.Header.Set("Cache-Control", "max-age=60,stale-if-error=86400")
+ }
+ return nil
+ },
+ }
+
+ return proxy, nil
+}
+
+type versionAddingTransport struct {
+ version string
+}
+
+func (t *versionAddingTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.version != "" {
+ vals := req.URL.Query()
+ vals.Set("cli_version", t.version)
+ req.URL.RawQuery = vals.Encode()
+ }
+ return http.DefaultTransport.RoundTrip(req)
+}
diff --git a/cli/daemon/dash/dbbrowser.go b/cli/daemon/dash/dbbrowser.go
new file mode 100644
index 0000000000..a88de30b17
--- /dev/null
+++ b/cli/daemon/dash/dbbrowser.go
@@ -0,0 +1,187 @@
+package dash
+
+import (
+ "context"
+
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/pkg/fns"
+ "github.com/cockroachdb/errors"
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// QueryRequest represents the request body for the /query endpoint
+type QueryRequest struct {
+ Query string `json:"query"`
+ Params []any `json:"params"`
+ ArrayMode bool `json:"arrayMode"`
+ DbID string `json:"dbId"`
+ AppID string `json:"appId"`
+}
+
+// TransactionRequest represents the request body for the /transaction endpoint
+type TransactionRequest struct {
+ Queries []struct {
+ SQL string `json:"sql"`
+ Params []any `json:"params"`
+ } `json:"queries"`
+ DbID string `json:"dbId"`
+ AppID string `json:"appId"`
+}
+
+func (h *handler) Query(ctx context.Context, req QueryRequest) ([]any, error) {
+
+ pgConn, err := h.browserConn(ctx, req.AppID, req.DbID)
+ if err != nil {
+ return nil, err
+ }
+
+ defer fns.CloseIgnoreCtx(ctx, pgConn.Close)
+
+ rows, err := pgConn.Query(context.Background(), req.Query, req.Params...)
+
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ results := []any{}
+ if req.ArrayMode {
+ // Return results as arrays
+ for rows.Next() {
+ values, err := rows.Values()
+ if err != nil {
+ return nil, err
+ }
+ results = append(results, values)
+ }
+ } else {
+ // Return results as objects
+ fieldDescriptions := rows.FieldDescriptions()
+ for rows.Next() {
+ values, err := rows.Values()
+ if err != nil {
+ return nil, err
+ }
+
+ row := make(map[string]any)
+ for i, value := range values {
+ row[fieldDescriptions[i].Name] = value
+ }
+ results = append(results, row)
+ }
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+// handleTransaction handles the /transaction endpoint
+func (h *handler) Transaction(ctx context.Context, req TransactionRequest) ([]any, error) {
+ // Start a transaction
+ conn, err := h.browserConn(ctx, req.AppID, req.DbID)
+ if err != nil {
+ return nil, err
+ }
+ defer fns.CloseIgnoreCtx(ctx, conn.Close)
+
+ tx, err := conn.Begin(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback(context.Background())
+
+ results := []any{}
+ for _, query := range req.Queries {
+ rows, err := tx.Query(context.Background(), query.SQL, query.Params...)
+ if err != nil {
+ return nil, err
+ }
+
+ var queryResults []map[string]any
+ fieldDescriptions := rows.FieldDescriptions()
+ for rows.Next() {
+ values, err := rows.Values()
+ if err != nil {
+ rows.Close()
+ return nil, err
+ }
+
+ row := make(map[string]any)
+ for i, value := range values {
+ row[fieldDescriptions[i].Name] = value
+ }
+ queryResults = append(queryResults, row)
+ }
+ rows.Close()
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ results = append(results, queryResults)
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(context.Background()); err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+func (s *handler) browserConn(ctx context.Context, appID string, dbID string) (*pgx.Conn, error) {
+ // Find the latest app by platform ID or local ID.
+ app, err := s.apps.FindLatestByPlatformOrLocalID(appID)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to find latest app")
+ }
+
+ namespace, err := s.GetNamespace(ctx, appID)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get namespace")
+ }
+
+ clusterType := sqldb.Run
+ cluster := s.run.ClusterMgr.Create(ctx, &sqldb.CreateParams{
+ ClusterID: sqldb.GetClusterID(app, clusterType, namespace),
+ Memfs: false,
+ })
+ appMeta, err := s.GetMeta(appID)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err = cluster.Start(ctx, nil); err != nil {
+ return nil, errors.Wrap(err, "failed to start database cluster")
+ }
+ db, ok := cluster.GetDB(dbID)
+ if !ok {
+ if err := cluster.Setup(ctx, app.Root(), appMeta); err != nil {
+ return nil, errors.Wrap(err, "failed to setup database cluster")
+ }
+ db, ok = cluster.GetDB(dbID)
+ if !ok {
+ return nil, errors.Newf("failed to get database %s", dbID)
+ }
+ }
+
+ info, err := db.Cluster.Info(ctx)
+ if err != nil {
+ return nil, err
+ }
+ uri := info.ConnURI(db.EncoreName, info.Config.Superuser)
+ conn, err := pgx.Connect(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ conn.TypeMap().RegisterType(&pgtype.Type{
+ Name: "char",
+ OID: 18,
+ Codec: pgtype.TextCodec{},
+ })
+ return conn, nil
+}
diff --git a/cli/daemon/dash/server.go b/cli/daemon/dash/server.go
index 141f870afb..2b56a2627d 100644
--- a/cli/daemon/dash/server.go
+++ b/cli/daemon/dash/server.go
@@ -2,41 +2,56 @@ package dash
import (
"context"
- "embed"
"encoding/json"
"fmt"
- "io/fs"
"net/http"
- "strings"
+ "net/http/httputil"
"sync"
- "encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/runtime/trace"
- "encr.dev/cli/internal/jsonrpc2"
"github.com/gorilla/websocket"
"github.com/rs/zerolog/log"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/dash/ai"
+ "encr.dev/cli/daemon/dash/apiproxy"
+ "encr.dev/cli/daemon/dash/dashproxy"
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run"
+ "encr.dev/cli/internal/jsonrpc2"
+ "encr.dev/internal/conf"
+ "encr.dev/pkg/fns"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(*http.Request) bool { return true },
}
-//go:embed dashapp/dist/*
-var assets embed.FS
-
// NewServer starts a new server and returns it.
-func NewServer(runMgr *run.Manager, tr *trace.Store) *Server {
- assets, err := fs.Sub(assets, "dashapp/dist")
+func NewServer(appsMgr *apps.Manager, runMgr *run.Manager, nsMgr *namespace.Manager, tr trace2.Store, dashPort int) *Server {
+ proxy, err := dashproxy.New(conf.DevDashURL)
if err != nil {
- log.Fatal().Err(err).Msg("could not get dash assets")
+ log.Fatal().Err(err).Msg("could not create dash proxy")
}
+ apiProxy, err := apiproxy.New(conf.APIBaseURL + "/graphql")
+ if err != nil {
+ log.Fatal().Err(err).Msg("could not create graphql proxy")
+ }
+
+ aiMgr := ai.NewAIManager()
+
s := &Server{
- run: runMgr,
- tr: tr,
- assets: assets,
- traceCh: make(chan *trace.TraceMeta, 10),
- clients: make(map[chan<- *notification]struct{}),
+ proxy: proxy,
+ apiProxy: apiProxy,
+ apps: appsMgr,
+ run: runMgr,
+ ns: nsMgr,
+ tr: tr,
+ dashPort: dashPort,
+ traceCh: make(chan trace2.NewSpanEvent, 10),
+ clients: make(map[chan<- *notification]struct{}),
+ ai: aiMgr,
}
runMgr.AddListener(s)
@@ -47,27 +62,28 @@ func NewServer(runMgr *run.Manager, tr *trace.Store) *Server {
// Server is the http.Handler for serving the developer dashboard.
type Server struct {
- run *run.Manager
- tr *trace.Store
- traceCh chan *trace.TraceMeta
- assets fs.FS
+ proxy *httputil.ReverseProxy
+ apiProxy *httputil.ReverseProxy
+ apps *apps.Manager
+ run *run.Manager
+ ns *namespace.Manager
+ tr trace2.Store
+ dashPort int
+ traceCh chan trace2.NewSpanEvent
+ ai *ai.Manager
mu sync.Mutex
clients map[chan<- *notification]struct{}
}
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- path := req.URL.Path
- fs := http.FileServer(http.FS(s.assets))
- switch {
- case path == "/__encore":
+ switch req.URL.Path {
+ case "/__encore":
s.WebSocket(w, req)
- case strings.HasPrefix(path, "/assets/") || path == "/favicon.ico":
- fs.ServeHTTP(w, req)
+ case "/__graphql":
+ s.apiProxy.ServeHTTP(w, req)
default:
- // Serve the index page for all other paths since we use client-side routing.
- req.URL.Path = "/"
- fs.ServeHTTP(w, req)
+ s.proxy.ServeHTTP(w, req)
}
}
@@ -78,17 +94,19 @@ func (s *Server) WebSocket(w http.ResponseWriter, req *http.Request) {
log.Error().Err(err).Msg("dash: could not upgrade websocket")
return
}
- defer c.Close()
+ defer fns.CloseIgnore(c)
log.Info().Msg("dash: websocket connection established")
stream := &wsStream{c: c}
conn := jsonrpc2.NewConn(stream)
- handler := &handler{rpc: conn, run: s.run, tr: s.tr}
+ handler := &handler{rpc: conn, apps: s.apps, run: s.run, ns: s.ns, tr: s.tr, ai: s.ai}
conn.Go(req.Context(), handler.Handle)
ch := make(chan *notification, 20)
s.addClient(ch)
defer s.removeClient(ch)
+
+ // nosemgrep: tools.semgrep-rules.semgrep-go.http-request-go-context
go handler.listenNotify(req.Context(), ch)
<-conn.Done()
@@ -113,11 +131,19 @@ func (s *Server) removeClient(ch chan *notification) {
delete(s.clients, ch)
}
+// hasClients reports whether there are any active clients.
+func (s *Server) hasClients() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return len(s.clients) > 0
+}
+
type notification struct {
Method string
Params interface{}
}
+// notify notifies any active clients.
func (s *Server) notify(n *notification) {
var clients []chan<- *notification
s.mu.Lock()
diff --git a/cli/daemon/dash/trace.go b/cli/daemon/dash/trace.go
deleted file mode 100644
index 60c03fe8c5..0000000000
--- a/cli/daemon/dash/trace.go
+++ /dev/null
@@ -1,429 +0,0 @@
-package dash
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "net/url"
- "strconv"
- "time"
-
- "encr.dev/cli/daemon/runtime/trace"
- "encr.dev/cli/internal/dedent"
- tracepb "encr.dev/proto/encore/engine/trace"
- "github.com/alecthomas/chroma/formatters/html"
- "github.com/alecthomas/chroma/lexers"
- "github.com/alecthomas/chroma/styles"
- "github.com/gofrs/uuid"
- "github.com/golang/protobuf/jsonpb"
-)
-
-type Trace struct {
- ID uuid.UUID `json:"id"`
- Date time.Time `json:"date"`
- StartTime int64 `json:"start_time"`
- EndTime int64 `json:"end_time"`
- Root *Request `json:"root"`
- Auth *Request `json:"auth"`
- UID *string `json:"uid"`
- UserData []byte `json:"user_data"`
-
- Locations map[int32]json.RawMessage `json:"locations"`
-}
-
-type Request struct {
- ID string `json:"id"`
- Type string `json:"type"`
- ParentID *string `json:"parent_id"`
- Goid uint32 `json:"goid"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
- CallLoc *int32 `json:"call_loc"`
- DefLoc int32 `json:"def_loc"`
- Inputs [][]byte `json:"inputs"`
- Outputs [][]byte `json:"outputs"`
- Err []byte `json:"err"`
- Events []Event `json:"events"`
- Children []*Request `json:"children"`
-}
-
-type Goroutine struct {
- Type string `json:"type"`
- Goid uint32 `json:"goid"`
- CallLoc int32 `json:"call_loc"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
-}
-
-type DBTransaction struct {
- Type string `json:"type"`
- Goid uint32 `json:"goid"`
- Txid uint32 `json:"txid"`
- StartLoc int32 `json:"start_loc"`
- EndLoc int32 `json:"end_loc"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
- Err []byte `json:"err"`
- CompletionType string `json:"completion_type"`
- Queries []*DBQuery `json:"queries"`
-}
-
-type DBQuery struct {
- Type string `json:"type"`
- Goid uint32 `json:"goid"`
- Txid *uint32 `json:"txid"`
- CallLoc int32 `json:"call_loc"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
- Query []byte `json:"query"`
- HTMLQuery []byte `json:"html_query"`
- Err []byte `json:"err"`
-}
-
-type RPCCall struct {
- Type string `json:"type"`
- Goid uint32 `json:"goid"`
- ReqID string `json:"req_id"`
- CallLoc int32 `json:"call_loc"`
- DefLoc int32 `json:"def_loc"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
- Err []byte `json:"err"`
-}
-
-type HTTPCall struct {
- Type string `json:"type"`
- Goid uint32 `json:"goid"`
- ReqID string `json:"req_id"`
- StartTime int64 `json:"start_time"`
- EndTime *int64 `json:"end_time,omitempty"`
- Method string `json:"method"`
- Host string `json:"host"`
- Path string `json:"path"`
- URL string `json:"url"`
- StatusCode int `json:"status_code"`
- Err []byte `json:"err"`
- Metrics HTTPCallMetrics `json:"metrics"`
-}
-
-type HTTPCallMetrics struct {
- // Times are all 0 if not set
- GotConn *int64 `json:"got_conn,omitempty"`
- ConnReused bool `json:"conn_reused,omitempty"`
- DNSDone *int64 `json:"dns_done,omitempty"`
- TLSHandshakeDone *int64 `json:"tls_handshake_done,omitempty"`
- WroteHeaders *int64 `json:"wrote_headers,omitempty"`
- WroteRequest *int64 `json:"wrote_request,omitempty"`
- FirstResponseByte *int64 `json:"first_response,omitempty"`
- BodyClosed *int64 `json:"body_closed,omitempty"`
-}
-
-type Event interface {
- traceEvent()
-}
-
-func (Goroutine) traceEvent() {}
-func (DBTransaction) traceEvent() {}
-func (DBQuery) traceEvent() {}
-func (RPCCall) traceEvent() {}
-func (HTTPCall) traceEvent() {}
-
-func TransformTrace(ct *trace.TraceMeta) (*Trace, error) {
- traceID := traceUUID(ct.ID)
- tr := &Trace{
- ID: traceID,
- Date: ct.Date,
- }
-
- tp := &traceParser{}
- reqMap := make(map[string]*Request)
- for _, req := range ct.Reqs {
- if tp.startTime == 0 {
- tp.startTime = int64(req.StartTime / 1000)
- }
- r, err := tp.parseReq(req)
- if err != nil {
- return nil, fmt.Errorf("parsing request: %v", err)
- }
- reqMap[r.ID] = r
-
- switch {
- case req.Type == tracepb.Request_AUTH:
- if tr.Auth != nil {
- return nil, fmt.Errorf("got multiple auth calls in trace")
- }
- tr.Auth = r
- case r.ParentID == nil:
- if tr.Root != nil {
- return nil, fmt.Errorf("got multiple root requests (%v and %v)", tr.Root.ID, r.ID)
- }
- tr.Root = r
- default:
- parent, ok := reqMap[*r.ParentID]
- if !ok {
- return nil, fmt.Errorf("could not find parent request: %v", *r.ParentID)
- }
- parent.Children = append(parent.Children, r)
- }
- }
-
- if tr.Root == nil && tr.Auth == nil {
- return nil, fmt.Errorf("could not find a root request")
- }
-
- // Copy certain properties to the trace from the root request
- for _, req := range ct.Reqs {
- if t := tp.time(req.StartTime); t < tr.StartTime {
- tr.StartTime = t
- }
- if t := tp.time(req.EndTime); t > tr.EndTime {
- tr.EndTime = t
- }
- }
-
- locs := make(map[int32]json.RawMessage)
- m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true}
- for _, pkg := range ct.Meta.Pkgs {
- for _, e := range pkg.TraceNodes {
- s, err := m.MarshalToString(e)
- if err != nil {
- return nil, err
- }
- locs[e.Id] = json.RawMessage(s)
- }
- }
- tr.Locations = locs
- return tr, nil
-}
-
-type traceParser struct {
- startTime int64
- txCounter uint32
-}
-
-func (tp *traceParser) parseReq(req *tracepb.Request) (*Request, error) {
- // Prevent marshalling as null
- inputs, outputs := req.Inputs, req.Outputs
- if inputs == nil {
- inputs = [][]byte{}
- }
- if outputs == nil {
- outputs = [][]byte{}
- }
-
- r := &Request{
- Type: req.Type.String(),
- ID: strconv.FormatUint(req.SpanId, 10),
- ParentID: nullIntStr(req.ParentSpanId),
- Goid: req.Goid,
- StartTime: tp.time(req.StartTime),
- EndTime: tp.maybeTime(req.EndTime),
- CallLoc: nullInt32(req.CallLoc),
- DefLoc: req.DefLoc,
- Inputs: inputs,
- Outputs: outputs,
- Err: nullBytes(req.Err),
- Events: []Event{}, // prevent marshalling as null
- Children: []*Request{}, // prevent marshalling as null
- }
- for _, ev := range req.Events {
- switch e := ev.Data.(type) {
- case *tracepb.Event_Tx:
- tx, err := tp.parseTx(e.Tx)
- if err != nil {
- return nil, fmt.Errorf("parsing db transaction event: %v", err)
- }
- r.Events = append(r.Events, tx)
-
- case *tracepb.Event_Query:
- r.Events = append(r.Events, tp.parseQuery(e.Query, 0))
-
- case *tracepb.Event_Rpc:
- r.Events = append(r.Events, tp.parseCall(e.Rpc))
-
- case *tracepb.Event_Http:
- r.Events = append(r.Events, tp.parseHTTP(e.Http))
-
- case *tracepb.Event_Goroutine:
- r.Events = append(r.Events, tp.parseGoroutine(e.Goroutine))
- }
- }
-
- return r, nil
-}
-
-func (tp *traceParser) parseGoroutine(g *tracepb.Goroutine) *Goroutine {
- return &Goroutine{
- Type: "Goroutine",
- Goid: g.Goid,
- CallLoc: g.CallLoc,
- StartTime: tp.time(g.StartTime),
- EndTime: tp.maybeTime(g.EndTime),
- }
-}
-
-func (tp *traceParser) parseTx(tx *tracepb.DBTransaction) (*DBTransaction, error) {
- tp.txCounter++
- txid := tp.txCounter
- t := &DBTransaction{
- Type: "DBTransaction",
- Goid: tx.Goid,
- Txid: txid,
- StartLoc: tx.StartLoc,
- EndLoc: tx.EndLoc,
- StartTime: tp.time(tx.StartTime),
- EndTime: tp.maybeTime(tx.EndTime),
- Err: nullBytes(tx.Err),
- Queries: []*DBQuery{}, // prevent marshalling as null
- }
- switch tx.Completion {
- case tracepb.DBTransaction_COMMIT:
- t.CompletionType = "COMMIT"
- case tracepb.DBTransaction_ROLLBACK:
- t.CompletionType = "ROLLBACK"
- default:
- return nil, fmt.Errorf("unknown completion type %v", tx.Completion)
- }
- for _, q := range tx.Queries {
- t.Queries = append(t.Queries, tp.parseQuery(q, txid))
- }
- return t, nil
-}
-
-func (tp *traceParser) parseQuery(q *tracepb.DBQuery, txid uint32) *DBQuery {
- query := dedent.Bytes(q.Query)
- lexer := lexers.Get("postgres")
- iterator, err := lexer.Tokenise(nil, string(query))
- var htmlQuery []byte
- if err == nil {
- var buf bytes.Buffer
- formatter := html.New()
- style := styles.VisualStudio
- if err = formatter.Format(&buf, style, iterator); err == nil {
- htmlQuery = buf.Bytes()
- }
- }
-
- return &DBQuery{
- Type: "DBQuery",
- Goid: q.Goid,
- Txid: nullUint32(txid),
- CallLoc: q.CallLoc,
- StartTime: tp.time(q.StartTime),
- EndTime: tp.maybeTime(q.EndTime),
- Query: dedent.Bytes(q.Query),
- HTMLQuery: htmlQuery,
- Err: nullBytes(q.Err),
- }
-}
-
-func (tp *traceParser) parseCall(c *tracepb.RPCCall) *RPCCall {
- return &RPCCall{
- Type: "RPCCall",
- Goid: c.Goid,
- ReqID: strconv.FormatUint(c.SpanId, 10),
- CallLoc: c.CallLoc,
- DefLoc: c.DefLoc,
- StartTime: tp.time(c.StartTime),
- EndTime: tp.maybeTime(c.EndTime),
- Err: nullBytes(c.Err),
- }
-}
-
-func (tp *traceParser) parseHTTP(c *tracepb.HTTPCall) *HTTPCall {
- host := ""
- path := ""
- if u, err := url.Parse(c.Url); err == nil {
- host = u.Host
- path = u.Path
- }
-
- call := &HTTPCall{
- Type: "HTTPCall",
- Goid: c.Goid,
- ReqID: strconv.FormatUint(c.SpanId, 10),
- Method: c.Method,
- Host: host,
- Path: path,
- URL: c.Url,
- StatusCode: int(c.StatusCode),
- StartTime: tp.time(c.StartTime),
- EndTime: tp.maybeTime(c.EndTime),
- Err: nullBytes(c.Err),
- Metrics: HTTPCallMetrics{
- BodyClosed: tp.maybeTime(c.BodyClosedTime),
- },
- }
- m := &call.Metrics
- for _, ev := range c.Events {
- switch ev.Code {
- case tracepb.HTTPTraceEventCode_GOT_CONN:
- m.GotConn = tp.maybeTime(ev.Time)
- m.ConnReused = ev.GetGotConn().Reused
- case tracepb.HTTPTraceEventCode_DNS_DONE:
- m.DNSDone = tp.maybeTime(ev.Time)
- case tracepb.HTTPTraceEventCode_TLS_HANDSHAKE_DONE:
- m.TLSHandshakeDone = tp.maybeTime(ev.Time)
- case tracepb.HTTPTraceEventCode_WROTE_HEADERS:
- m.WroteHeaders = tp.maybeTime(ev.Time)
- case tracepb.HTTPTraceEventCode_WROTE_REQUEST:
- m.WroteRequest = tp.maybeTime(ev.Time)
- case tracepb.HTTPTraceEventCode_GOT_FIRST_RESPONSE_BYTE:
- m.FirstResponseByte = tp.maybeTime(ev.Time)
- }
- }
- return call
-}
-
-func (tp *traceParser) time(ns uint64) int64 {
- if ns == 0 {
- return -1
- }
- t := int64(ns/1000) - tp.startTime
- return t
-}
-
-func (tp *traceParser) maybeTime(ns uint64) *int64 {
- if ns == 0 {
- return nil
- }
- t := int64(ns/1000) - tp.startTime
- return &t
-}
-
-func nullIntStr(n uint64) *string {
- if n == 0 {
- return nil
- }
- s := strconv.FormatUint(n, 10)
- return &s
-}
-
-func nullInt32(n int32) *int32 {
- if n == 0 {
- return nil
- }
- return &n
-}
-
-func nullUint32(n uint32) *uint32 {
- if n == 0 {
- return nil
- }
- return &n
-}
-
-func nullBytes(b []byte) []byte {
- if len(b) == 0 {
- return nil
- }
- return b
-}
-
-func parseTime(ns uint64) time.Time {
- return time.Unix(0, int64(ns))
-}
-
-func traceUUID(traceID trace.ID) uuid.UUID {
- return uuid.UUID(traceID)
-}
diff --git a/cli/daemon/db.go b/cli/daemon/db.go
index e6dfbd1f9b..0719eb78fd 100644
--- a/cli/daemon/db.go
+++ b/cli/daemon/db.go
@@ -2,22 +2,42 @@ package daemon
import (
"context"
+ "errors"
"fmt"
"net"
"strconv"
- "sync"
"time"
- "encr.dev/cli/daemon/internal/appfile"
- "encr.dev/cli/daemon/internal/manifest"
- "encr.dev/cli/daemon/internal/runlog"
- "encr.dev/cli/daemon/sqldb"
- daemonpb "encr.dev/proto/encore/daemon"
"github.com/rs/zerolog/log"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/pgproxy"
+ daemonpb "encr.dev/proto/encore/daemon"
)
+func toRoleType(role daemonpb.DBRole) sqldb.RoleType {
+ switch role {
+ case daemonpb.DBRole_DB_ROLE_READ:
+ return sqldb.RoleRead
+ case daemonpb.DBRole_DB_ROLE_WRITE:
+ return sqldb.RoleWrite
+ case daemonpb.DBRole_DB_ROLE_ADMIN:
+ return sqldb.RoleAdmin
+ case daemonpb.DBRole_DB_ROLE_SUPERUSER:
+ return sqldb.RoleSuperuser
+ default:
+ return sqldb.RoleRead
+ }
+
+}
+
// DBConnect starts the database and returns the DSN for connecting to it.
func (s *Server) DBConnect(ctx context.Context, req *daemonpb.DBConnectRequest) (*daemonpb.DBConnectResponse, error) {
if req.EnvName == "local" {
@@ -30,44 +50,109 @@ func (s *Server) DBConnect(ctx context.Context, req *daemonpb.DBConnectRequest)
} else if appID == "" {
return nil, errNotLinked
}
- port, passwd, err := sqldb.OneshotProxy(s.rc, appID, req.EnvName)
+ port, passwd, err := sqldb.OneshotProxy(appID, req.EnvName, toRoleType(req.Role))
if err != nil {
return nil, err
}
- dsn := fmt.Sprintf("postgresql://encore:%s@localhost:%d/%s?sslmode=disable", passwd, port, req.SvcName)
+ dsn := fmt.Sprintf("postgresql://encore:%s@127.0.0.1:%d/%s?sslmode=disable", passwd, port, req.DbName)
return &daemonpb.DBConnectResponse{Dsn: dsn}, nil
}
func (s *Server) dbConnectLocal(ctx context.Context, req *daemonpb.DBConnectRequest) (*daemonpb.DBConnectResponse, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ expSet, err := app.Experiments(nil)
+ if err != nil {
+ return nil, err
+ }
+
// Parse the app to figure out what infrastructure is needed.
- parse, err := s.parseApp(req.AppRoot, ".", false)
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: builder.DefaultBuildInfo(),
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
if err != nil {
return nil, err
}
- man, err := manifest.ReadOrCreate(req.AppRoot)
+ // The Encore IDE plugins will request a connection to the database "_any_"
+ // as they will be unaware of any database names ahead of time.
+ //
+ // We will use the first database name in the app's schema on the returned connection string
+ if req.DbName == "_any_" {
+ req.DbName = ""
+ if len(parse.Meta.SqlDatabases) > 0 {
+ req.DbName = parse.Meta.SqlDatabases[0].Name
+ }
+
+ // If no database has been found, return an error
+ if req.DbName == "" {
+ return nil, errDatabaseNotFound
+ }
+ } else {
+ // Otherwise we need to check the requested service exists
+ databaseExists := false
+ for _, s := range parse.Meta.SqlDatabases {
+ if s.Name == req.DbName {
+ databaseExists = true
+ break
+ }
+ }
+ if !databaseExists {
+ return nil, errDatabaseNotFound
+ }
+ }
+
+ clusterNS, err := s.namespaceOrActive(ctx, app, req.Namespace)
if err != nil {
return nil, err
}
- clusterID := man.AppID
- log := log.With().Str("appID", man.AppID).Logger()
+ var passwd string
+ clusterType := getClusterType(req)
+ switch clusterType {
+ case sqldb.Run:
+ // If the user didn't specify a namespace, leave it out from the password
+ // so it uses the active namespace.
+ if req.Namespace != nil {
+ passwd = "local-" + string(clusterNS.ID)
+ } else {
+ passwd = "local"
+ }
+ default:
+ passwd = fmt.Sprintf("%s-%s", clusterType, clusterNS.ID)
+ }
+
+ clusterID := sqldb.GetClusterID(app, clusterType, clusterNS)
+ log := log.With().Interface("cluster", clusterID).Logger()
log.Info().Msg("setting up database cluster")
- cluster := s.cm.Init(ctx, &sqldb.InitParams{
+ cluster := s.cm.Create(ctx, &sqldb.CreateParams{
ClusterID: clusterID,
- Meta: parse.Meta,
- Memfs: false,
+ Memfs: clusterType.Memfs(),
})
+ if cluster.IsExternalDB(req.DbName) {
+ return nil, errors.New("connecting to an external database is disabled")
+ }
// TODO would be nice to stream this to the CLI
- if err := cluster.Start(runlog.OS()); err != nil {
+ if _, err := cluster.Start(ctx, nil); err != nil {
log.Error().Err(err).Msg("failed to start db cluster")
return nil, err
- } else if err := cluster.Create(ctx, req.AppRoot, parse.Meta); err != nil {
+ } else if err := cluster.Setup(ctx, req.AppRoot, parse.Meta); err != nil {
log.Error().Err(err).Msg("failed to create databases")
return nil, err
}
log.Info().Msg("created database cluster")
- dsn := fmt.Sprintf("postgresql://encore:%s@localhost:%d/%s?sslmode=disable", clusterID, s.mgr.DBProxyPort, req.SvcName)
+
+ dsn := fmt.Sprintf("postgresql://%s:%s@127.0.0.1:%d/%s?sslmode=disable",
+ app.PlatformOrLocalID(), passwd, s.mgr.DBProxyPort, req.DbName)
return &daemonpb.DBConnectResponse{Dsn: dsn}, nil
}
@@ -87,12 +172,11 @@ func (s *Server) DBProxy(params *daemonpb.DBProxyRequest, stream daemonpb.Daemon
if err != nil {
return status.Error(codes.FailedPrecondition, err.Error())
}
- defer func() {
- if err != nil {
- ln.Close()
- }
- }()
port := ln.Addr().(*net.TCPAddr).Port
+ go func() {
+ <-ctx.Done()
+ _ = ln.Close()
+ }()
log.Info().Msgf("dbproxy: listening on localhost:%d", port)
defer log.Info().Msg("dbproxy: proxy closed")
@@ -105,43 +189,82 @@ func (s *Server) DBProxy(params *daemonpb.DBProxyRequest, stream daemonpb.Daemon
return err
}
- var handler func(context.Context, net.Conn)
+ var runProxy func() error
if params.EnvName == "local" {
+ app, err := s.apps.Track(params.AppRoot)
+ if err != nil {
+ return err
+ }
+
+ expSet, err := app.Experiments(nil)
+ if err != nil {
+ return err
+ }
+
// Parse the app to figure out what infrastructure is needed.
- parse, err := s.parseApp(params.AppRoot, ".", false)
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: builder.DefaultBuildInfo(),
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
if err != nil {
return err
}
- man, err := manifest.ReadOrCreate(params.AppRoot)
+ clusterType := getClusterType(params)
+
+ clusterNS, err := s.namespaceOrActive(stream.Context(), app, params.Namespace)
if err != nil {
return err
}
- clusterID := man.AppID
- cluster := s.cm.Init(ctx, &sqldb.InitParams{
+ clusterID := sqldb.GetClusterID(app, clusterType, clusterNS)
+ cluster := s.cm.Create(ctx, &sqldb.CreateParams{
ClusterID: clusterID,
- Meta: parse.Meta,
- Memfs: false,
+ Memfs: clusterType.Memfs(),
})
- if err := cluster.Start(streamLog{stream: stream}); err != nil {
+ if _, err := cluster.Start(ctx, nil); err != nil {
return err
- } else if err := cluster.Create(ctx, params.AppRoot, parse.Meta); err != nil {
+ } else if err := cluster.Setup(ctx, params.AppRoot, parse.Meta); err != nil {
return err
}
- handler = func(ctx context.Context, frontend net.Conn) {
- s.cm.PreauthProxyConn(frontend, clusterID)
+ runProxy = func() error {
+ return serveProxy(ctx, ln, func(ctx context.Context, client net.Conn) {
+ _ = s.cm.PreauthProxyConn(client, clusterID)
+ })
}
} else {
- handler = func(ctx context.Context, frontend net.Conn) {
- sqldb.ProxyRemoteConn(ctx, s.rc, frontend, "", appID, params.EnvName)
+ proxy := &pgproxy.SingleBackendProxy{
+ Log: log.Logger,
+ RequirePassword: false,
+ FrontendTLS: nil,
+ DialBackend: func(ctx context.Context, startup *pgproxy.StartupData) (pgproxy.LogicalConn, error) {
+ startupData, err := startup.Raw.Encode(nil)
+ if err != nil {
+ return nil, err
+ }
+ ws, err := platform.DBConnect(ctx, appID, params.EnvName, startup.Database, toRoleType(params.Role).String(), startupData)
+ if err != nil {
+ return nil, err
+ }
+ return &sqldb.WebsocketLogicalConn{Conn: ws}, nil
+ },
+ }
+
+ runProxy = func() error {
+ return proxy.Serve(ctx, ln)
}
}
msgs := make(chan string, 10)
+ defer close(msgs)
go func() {
for msg := range msgs {
- stream.Send(&daemonpb.CommandMessage{Msg: &daemonpb.CommandMessage_Output{
+ _ = stream.Send(&daemonpb.CommandMessage{Msg: &daemonpb.CommandMessage_Output{
Output: &daemonpb.CommandOutput{
Stdout: []byte(msg),
},
@@ -149,68 +272,72 @@ func (s *Server) DBProxy(params *daemonpb.DBProxyRequest, stream daemonpb.Daemon
}
}()
- var wg sync.WaitGroup
- err = serveProxy(ctx, ln, func(ctx context.Context, frontend net.Conn) {
- wg.Add(1)
- defer wg.Done()
- msgs <- "dbproxy: connection opened\n"
- handler(ctx, frontend)
- msgs <- "dbproxy: connection closed\n"
- })
-
- go func() {
- // Close the msgs chan when all connections are closed
- wg.Wait()
- close(msgs)
- }()
-
- return err
+ return runProxy()
}
// DBReset resets the given databases, recreating them from scratch.
func (s *Server) DBReset(req *daemonpb.DBResetRequest, stream daemonpb.Daemon_DBResetServer) error {
sendErr := func(err error) {
- stream.Send(&daemonpb.CommandMessage{
+ _ = stream.Send(&daemonpb.CommandMessage{
Msg: &daemonpb.CommandMessage_Output{Output: &daemonpb.CommandOutput{
Stderr: []byte(err.Error() + "\n"),
}},
})
- stream.Send(&daemonpb.CommandMessage{
+ _ = stream.Send(&daemonpb.CommandMessage{
Msg: &daemonpb.CommandMessage_Exit{Exit: &daemonpb.CommandExit{
Code: 1,
}},
})
}
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ expSet, err := app.Experiments(nil)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
// Parse the app to figure out what infrastructure is needed.
- parse, err := s.parseApp(req.AppRoot, ".", false)
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(stream.Context(), builder.ParseParams{
+ Build: builder.DefaultBuildInfo(),
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
if err != nil {
sendErr(err)
return nil
}
- man, err := manifest.ReadOrCreate(req.AppRoot)
+ clusterNS, err := s.namespaceOrActive(stream.Context(), app, req.Namespace)
if err != nil {
sendErr(err)
return nil
}
- clusterID := man.AppID
+ clusterType := getClusterType(req)
+ clusterID := sqldb.GetClusterID(app, clusterType, clusterNS)
cluster, ok := s.cm.Get(clusterID)
if !ok {
- cluster = s.cm.Init(stream.Context(), &sqldb.InitParams{
+ cluster = s.cm.Create(stream.Context(), &sqldb.CreateParams{
ClusterID: clusterID,
- Memfs: false,
- Meta: parse.Meta,
+ Memfs: clusterType.Memfs(),
})
}
- if err := cluster.Start(streamLog{stream: stream}); err != nil {
+ if _, err := cluster.Start(stream.Context(), nil); err != nil {
sendErr(err)
return nil
}
-
- err = cluster.Recreate(stream.Context(), req.AppRoot, req.Services, parse.Meta)
+ err = cluster.Recreate(stream.Context(), req.AppRoot, req.DatabaseNames, parse.Meta)
if err != nil {
sendErr(err)
}
@@ -241,3 +368,16 @@ func serveProxy(ctx context.Context, ln net.Listener, handler func(context.Conte
go handler(ctx, frontend)
}
}
+
+func getClusterType(req interface{ GetClusterType() daemonpb.DBClusterType }) sqldb.ClusterType {
+ switch req.GetClusterType() {
+ case daemonpb.DBClusterType_DB_CLUSTER_TYPE_RUN:
+ return sqldb.Run
+ case daemonpb.DBClusterType_DB_CLUSTER_TYPE_TEST:
+ return sqldb.Test
+ case daemonpb.DBClusterType_DB_CLUSTER_TYPE_SHADOW:
+ return sqldb.Shadow
+ default:
+ return sqldb.Run
+ }
+}
diff --git a/cli/daemon/debug.go b/cli/daemon/debug.go
new file mode 100644
index 0000000000..840591882a
--- /dev/null
+++ b/cli/daemon/debug.go
@@ -0,0 +1,83 @@
+package daemon
+
+import (
+ "bytes"
+ "context"
+ "runtime"
+
+ "github.com/golang/protobuf/jsonpb"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/vcs"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+func (s *Server) DumpMeta(ctx context.Context, req *daemonpb.DumpMetaRequest) (*daemonpb.DumpMetaResponse, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ expSet, err := app.Experiments(req.Environ)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ // TODO: We should check that all secret keys are defined as well.
+
+ vcsRevision := vcs.GetRevision(app.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: req.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: app,
+ Experiments: expSet,
+ WorkingDir: req.WorkingDir,
+ ParseTests: req.ParseTests,
+ })
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ var out []byte
+ switch req.Format {
+ case daemonpb.DumpMetaRequest_FORMAT_PROTO:
+ out, err = proto.Marshal(parse.Meta)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+ case daemonpb.DumpMetaRequest_FORMAT_JSON:
+ var buf bytes.Buffer
+ m := &jsonpb.Marshaler{OrigName: true, EmitDefaults: true, Indent: " "}
+ if err := m.Marshal(&buf, parse.Meta); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+ out = buf.Bytes()
+ default:
+ return nil, status.Error(codes.InvalidArgument, "invalid format")
+ }
+
+ return &daemonpb.DumpMetaResponse{Meta: out}, nil
+}
diff --git a/cli/daemon/engine/runtime.go b/cli/daemon/engine/runtime.go
new file mode 100644
index 0000000000..2fe2aa28e0
--- /dev/null
+++ b/cli/daemon/engine/runtime.go
@@ -0,0 +1,89 @@
+package engine
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/cockroachdb/errors"
+
+ tracemodel "encore.dev/appruntime/exported/trace2"
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/cli/daemon/run"
+)
+
+type server struct {
+ runMgr *run.Manager
+ rec *trace2.Recorder
+}
+
+func NewServer(runMgr *run.Manager, rec *trace2.Recorder) http.Handler {
+ s := &server{runMgr: runMgr, rec: rec}
+ return s
+}
+
+// ServeHTTP implements http.Handler.
+func (s *server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ switch req.URL.Path {
+ case "/trace":
+ s.RecordTrace(w, req)
+ default:
+ http.Error(w, "Not Found", http.StatusNotFound)
+ }
+}
+
+func (s *server) RecordTrace(w http.ResponseWriter, req *http.Request) {
+ data, err := s.parseTraceData(req)
+ if err != nil {
+ http.Error(w, "unable to parse trace header: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ err = s.rec.RecordTrace(data)
+ if err != nil {
+ http.Error(w, "unable to record trace: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+}
+
+func (s *server) parseTraceData(req *http.Request) (d trace2.RecordData, err error) {
+ // Parse trace version
+ traceVersion := req.Header.Get("X-Encore-Trace-Version")
+ version, err := strconv.Atoi(traceVersion)
+ if err != nil || version <= 0 {
+ return d, fmt.Errorf("bad trace protocol version %q", traceVersion)
+ }
+ d.TraceVersion = tracemodel.Version(version)
+
+ pid := req.Header.Get("X-Encore-Env-ID")
+ if pid == "test" {
+ appID := req.Header.Get("X-Encore-App-ID")
+ if appID == "" {
+ return d, errors.New("missing X-Encore-App-ID header")
+ }
+ d.Meta = &trace2.Meta{AppID: appID}
+ } else {
+ if pid == "" {
+ return d, errors.New("missing X-Encore-Env-ID header")
+ }
+ proc := s.runMgr.FindProc(pid)
+ if proc == nil {
+ return d, errors.Newf("process %q is not running", pid)
+ }
+ d.Meta = &trace2.Meta{AppID: proc.Run.App.PlatformOrLocalID()}
+ }
+
+ // Parse time anchor
+ timeAnchor := req.Header.Get("X-Encore-Trace-TimeAnchor")
+ if timeAnchor == "" {
+ return d, errors.New("missing X-Encore-Trace-TimeAnchor header")
+ }
+
+ if err := d.Anchor.UnmarshalText([]byte(timeAnchor)); err != nil {
+ return d, errors.Wrap(err, "unable to parse X-Encore-Trace-TimeAnchor header")
+ }
+
+ d.Buf = bufio.NewReader(req.Body)
+ return d, nil
+}
diff --git a/cli/daemon/engine/trace/parse_test.go b/cli/daemon/engine/trace/parse_test.go
new file mode 100644
index 0000000000..3b8d8ebb1c
--- /dev/null
+++ b/cli/daemon/engine/trace/parse_test.go
@@ -0,0 +1,110 @@
+package trace
+
+import (
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/rs/zerolog"
+
+ "encore.dev/appruntime/exported/model"
+ "encore.dev/appruntime/exported/trace"
+ "encore.dev/beta/errs"
+)
+
+type parseTest[T any] struct {
+ name string
+ val T
+ emit func(l *trace.Log, val T)
+}
+
+func (pt parseTest[T]) Name() string {
+ return pt.name
+}
+
+func (pt parseTest[T]) Data() []byte {
+ log := &trace.Log{}
+ pt.emit(log, pt.val)
+ return log.GetAndClear()
+}
+
+func TestParse(t *testing.T) {
+ type reqResp struct {
+ Req *model.Request
+ Resp *model.Response
+ }
+ tests := []interface {
+ Name() string
+ Data() []byte
+ }{
+ parseTest[*model.Request]{
+ name: "basic",
+ val: &model.Request{
+ Type: model.RPCCall,
+ SpanID: model.SpanID{0, 0, 0, 0, 0, 0, 0, 1},
+ ParentSpanID: model.SpanID{},
+ Start: time.Now(),
+ Traced: true,
+ RPCData: &model.RPCData{
+ Desc: &model.RPCDesc{
+ Service: "service",
+ Endpoint: "endpoint",
+ Raw: false,
+ },
+ HTTPMethod: "POST",
+ Path: "/path/hello",
+ PathParams: model.PathParams{{Name: "one", Value: "hello"}},
+ UserID: "",
+ AuthData: nil,
+ NonRawPayload: []byte(`{"Body":"foo"}`),
+ RequestHeaders: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ },
+ emit: func(l *trace.Log, val *model.Request) { l.BeginRequest(val, 0) },
+ },
+ parseTest[reqResp]{
+ name: "raw_err",
+ val: reqResp{
+ Req: &model.Request{
+ Type: model.RPCCall,
+ SpanID: model.SpanID{0, 0, 0, 0, 0, 0, 0, 1},
+ ParentSpanID: model.SpanID{},
+ Start: time.Now(),
+ Traced: true,
+ RPCData: &model.RPCData{
+ Desc: &model.RPCDesc{
+ Service: "service",
+ Endpoint: "endpoint",
+ Raw: true,
+ },
+ HTTPMethod: "POST",
+ Path: "/path/hello",
+ PathParams: model.PathParams{{Name: "one", Value: "hello"}},
+ RequestHeaders: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ },
+ Resp: &model.Response{
+ HTTPStatus: 500,
+ Err: &errs.Error{Code: errs.Unavailable},
+ RawRequestPayload: []byte("foo"),
+ RawResponsePayload: []byte("bar"),
+ },
+ },
+ emit: func(l *trace.Log, val reqResp) {
+ l.BeginRequest(val.Req, 0)
+ l.FinishRequest(val.Req, val.Resp)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name(), func(t *testing.T) {
+ data := tt.Data()
+ logger := zerolog.New(zerolog.NewTestWriter(t))
+ _, err := Parse(&logger, ID{}, data, trace.CurrentVersion, nil)
+ if err != nil {
+ t.Fatalf("failed to parse trace: %v", err)
+ }
+ })
+ }
+}
diff --git a/cli/daemon/engine/trace/trace.go b/cli/daemon/engine/trace/trace.go
new file mode 100644
index 0000000000..7c8cbe0aa6
--- /dev/null
+++ b/cli/daemon/engine/trace/trace.go
@@ -0,0 +1,1398 @@
+package trace
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "encore.dev/appruntime/exported/trace"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/internal/sym"
+ "encr.dev/pkg/eerror"
+ tracepb "encr.dev/proto/encore/engine/trace"
+ metapb "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type ID [16]byte
+
+type TraceMeta struct {
+ ID ID
+ Reqs []*tracepb.Request
+ App *apps.Instance
+ EnvID string
+ Date time.Time
+ Meta *metapb.Data
+}
+
+// A Store stores traces received from running applications.
+type Store struct {
+ trmu sync.Mutex
+ traces map[string][]*TraceMeta
+ requestIDMapping map[string]*tracepb.Request // Trace ID -> Request
+
+ lnmu sync.Mutex
+ ln map[chan<- *TraceMeta]struct{}
+}
+
+func NewStore() *Store {
+ return &Store{
+ traces: make(map[string][]*TraceMeta),
+ requestIDMapping: make(map[string]*tracepb.Request),
+ ln: make(map[chan<- *TraceMeta]struct{}),
+ }
+}
+
+func (st *Store) Listen(ch chan<- *TraceMeta) {
+ st.lnmu.Lock()
+ st.ln[ch] = struct{}{}
+ st.lnmu.Unlock()
+}
+
+func (st *Store) Store(ctx context.Context, tr *TraceMeta) error {
+ appID := tr.App.PlatformOrLocalID()
+ st.trmu.Lock()
+ st.traces[appID] = append(st.traces[appID], tr)
+
+ const limit = 100
+ // Remove earlier traces if we exceed the limit.
+ if n := len(st.traces[appID]); n > limit {
+ st.traces[appID] = st.traces[appID][n-limit:]
+ }
+
+ for _, req := range tr.Reqs {
+ st.requestIDMapping[req.TraceId.String()] = req
+ }
+
+ st.trmu.Unlock()
+
+ st.lnmu.Lock()
+ defer st.lnmu.Unlock()
+ for ch := range st.ln {
+ // Don't block trying to send
+ select {
+ case ch <- tr:
+ default:
+ }
+ }
+ return nil
+}
+
+func (st *Store) GetRootTrace(traceID *tracepb.TraceID) (rtn *tracepb.Request) {
+ st.trmu.Lock()
+ defer st.trmu.Unlock()
+
+ next := st.requestIDMapping[traceID.String()]
+ for next != nil {
+ rtn = next
+ next = st.requestIDMapping[rtn.ParentTraceId.String()]
+ }
+
+ return rtn
+}
+
+func (st *Store) List(appID string) []*TraceMeta {
+ st.trmu.Lock()
+ tr := st.traces[appID]
+ st.trmu.Unlock()
+ return tr
+}
+
+func Parse(log *zerolog.Logger, traceID ID, data []byte, version trace.Version, symTable SymTabler) ([]*tracepb.Request, error) {
+ id := &tracepb.TraceID{
+ Low: bin.Uint64(traceID[:8]),
+ High: bin.Uint64(traceID[8:]),
+ }
+ tp := &traceParser{
+ log: log,
+ version: version,
+ traceReader: traceReader{buf: data},
+ symTable: symTable,
+ traceID: id,
+ reqMap: make(map[uint64]*tracepb.Request),
+ txMap: make(map[uint64]*tracepb.DBTransaction),
+ queryMap: make(map[uint64]*tracepb.DBQuery),
+ callMap: make(map[uint64]interface{}),
+ goMap: make(map[goKey]*tracepb.Goroutine),
+ httpMap: make(map[uint64]*tracepb.HTTPCall),
+ publishMap: make(map[uint64]*tracepb.PubsubMsgPublished),
+ serviceInits: make(map[uint64]*tracepb.ServiceInit),
+ cacheMap: make(map[uint64]*tracepb.CacheOp),
+ }
+ if err := tp.Parse(); err != nil {
+ return nil, err
+ }
+ return tp.reqs, nil
+}
+
+type goKey struct {
+ spanID uint64
+ goid uint32
+}
+
+type SymTabler interface {
+ SymTable(ctx context.Context) (*sym.Table, error)
+}
+
+type traceParser struct {
+ traceReader
+ log *zerolog.Logger
+ version trace.Version
+ symTable SymTabler
+ traceID *tracepb.TraceID
+ reqs []*tracepb.Request
+ reqMap map[uint64]*tracepb.Request
+ txMap map[uint64]*tracepb.DBTransaction
+ queryMap map[uint64]*tracepb.DBQuery
+ callMap map[uint64]interface{} // *RPCCall or *AuthCall
+ httpMap map[uint64]*tracepb.HTTPCall
+ goMap map[goKey]*tracepb.Goroutine
+ publishMap map[uint64]*tracepb.PubsubMsgPublished
+ serviceInits map[uint64]*tracepb.ServiceInit
+ cacheMap map[uint64]*tracepb.CacheOp
+}
+
+func (tp *traceParser) Parse() error {
+ for i := 0; !tp.Done(); i++ {
+ ev := trace.EventType(tp.Byte())
+ ts := tp.Uint64()
+ size := int(tp.Uint32())
+ startOff := tp.Offset()
+
+ var err error
+ if tp.version >= 3 {
+ err = tp.parseEventV3(ev, ts, size)
+ } else {
+ err = tp.parseEventV1(byte(ev), ts, size)
+ }
+
+ if errors.Is(err, errUnknownEvent) {
+ tp.log.Info().Msgf("trace: event #%d: unknown event type %s, skipping", i, ev.String())
+ tp.Skip(size)
+ err = nil
+ } else if err != nil {
+ return eerror.WithMeta(err, map[string]any{"event#": i, "event": ev.String()})
+ }
+
+ if tp.Overflow() {
+ return eerror.New("trace_parser", "invalid trace format: reader overflow parsing event", map[string]any{"event#": i, "event": ev})
+ } else if off, want := tp.Offset(), startOff+size; off < want {
+ tp.log.Warn().Msgf("trace: event #%d: parsing event=%s ended before end of frame, skipping ahead %d bytes", i, ev, want-off)
+ tp.Skip(want - off)
+ } else if off > want {
+ return eerror.New("trace_parser", "event exceed frame size", map[string]any{"event#": i, "event": ev.String(), "excess": off - want})
+ }
+ }
+
+ return nil
+}
+
+var errUnknownEvent = errors.New("unknown event")
+
+func (tp *traceParser) parseEventV3(ev trace.EventType, ts uint64, size int) error {
+ switch ev {
+ case trace.RequestStart:
+ return tp.requestStart(ts)
+ case trace.RequestEnd:
+ return tp.requestEnd(ts)
+ case trace.GoStart:
+ return tp.goroutineStart(ts)
+ case trace.GoEnd:
+ return tp.goroutineEnd(ts)
+ case trace.GoClear:
+ return tp.goroutineClear(ts)
+ case trace.TxStart:
+ return tp.transactionStart(ts)
+ case trace.TxEnd:
+ return tp.transactionEnd(ts)
+ case trace.QueryStart:
+ return tp.queryStart(ts)
+ case trace.QueryEnd:
+ return tp.queryEnd(ts)
+ case trace.CallStart:
+ return tp.callStart(ts, size)
+ case trace.CallEnd:
+ return tp.callEnd(ts)
+ case trace.AuthStart, trace.AuthEnd:
+ // Skip these events for now
+ tp.Skip(size)
+ return nil
+
+ case trace.HTTPCallStart:
+ return tp.httpStart(ts)
+ case trace.HTTPCallEnd:
+ return tp.httpEnd(ts)
+ case trace.HTTPCallBodyClosed:
+ return tp.httpBodyClosed(ts)
+ case trace.LogMessage:
+ return tp.logMessage(ts)
+ case trace.PublishStart:
+ return tp.publishStart(ts)
+ case trace.PublishEnd:
+ return tp.publishEnd(ts)
+ case trace.ServiceInitStart:
+ return tp.serviceInitStart(ts)
+ case trace.ServiceInitEnd:
+ return tp.serviceInitEnd(ts)
+ case trace.CacheOpStart:
+ return tp.cacheOpStart(ts)
+ case trace.CacheOpEnd:
+ return tp.cacheOpEnd(ts)
+ case trace.BodyStream:
+ return tp.bodyStream(ts)
+ default:
+ return errUnknownEvent
+ }
+}
+
+func (tp *traceParser) parseEventV1(ev byte, ts uint64, size int) error {
+ switch ev {
+ case 0x01:
+ return tp.requestStart(ts)
+ case 0x02:
+ return tp.requestEnd(ts)
+ case 0x03:
+ return tp.goroutineStart(ts)
+ case 0x04:
+ return tp.goroutineEnd(ts)
+ case 0x05:
+ return tp.goroutineClear(ts)
+ case 0x06:
+ return tp.transactionStart(ts)
+ case 0x07:
+ return tp.transactionEnd(ts)
+ case 0x08:
+ return tp.queryStart(ts)
+ case 0x09:
+ return tp.queryEnd(ts)
+ case 0x10:
+ return tp.callStart(ts, size)
+ case 0x11:
+ return tp.callEnd(ts)
+ case 0x12, 0x13:
+ // Skip these events for now
+ tp.Skip(size)
+ return nil
+
+ default:
+ return errUnknownEvent
+ }
+}
+
+func (tp *traceParser) requestStart(ts uint64) error {
+ typ, err := tp.parseRequestType()
+ if err != nil {
+ return err
+ }
+
+ // Determine the absolute start time.
+ var absStart time.Time
+ if tp.version >= 6 {
+ absStart = tp.Time()
+ } else {
+ // We don't have enough information to determine the exact start time,
+ // but approximate it from the monotonic clock reading
+ absStart = time.Unix(0, int64(ts))
+ }
+
+ // Set the trace ID
+ traceID := tp.traceID
+ if tp.version >= 11 {
+ parsedTraceID := tp.parseTraceID()
+ if parsedTraceID.Low != 0 || parsedTraceID.High != 0 {
+ traceID = parsedTraceID
+ }
+ }
+ var parentTraceID *tracepb.TraceID
+ if tp.version >= 12 {
+ parentTraceID = tp.parseTraceID()
+ }
+
+ spanID := tp.Uint64()
+ parentSpanID := tp.Uint64()
+
+ var service, endpoint string
+ if tp.version < 6 {
+ service, endpoint = "unknown", "Unknown"
+ } else if tp.version < 9 {
+ service = tp.String()
+ endpoint = tp.String()
+ }
+
+ goid := uint32(tp.UVarint())
+ if tp.version < 9 {
+ _ = tp.UVarint() // skip CallLoc: no longer used
+ }
+ defLoc := int32(tp.UVarint())
+
+ req := &tracepb.Request{
+ TraceId: traceID,
+ ParentTraceId: parentTraceID,
+ SpanId: spanID,
+ ParentSpanId: parentSpanID,
+ StartTime: ts,
+ ServiceName: service,
+ EndpointName: endpoint,
+ AbsStartTime: uint64(absStart.UnixNano()),
+ // EndTime not set yet
+ DefLoc: defLoc,
+ Goid: goid,
+ Type: typ,
+ }
+
+ if tp.version < 9 {
+ req.Uid = tp.String()
+
+ for n, i := tp.UVarint(), uint64(0); i < n; i++ {
+ size := tp.UVarint()
+ if size > (10 << 20) {
+ return eerror.New("trace_parser", "input too large", map[string]any{"size": size})
+ }
+ input := make([]byte, size)
+ tp.Bytes(input)
+ req.Inputs = append(req.Inputs, input)
+ }
+ }
+
+ switch typ {
+ case tracepb.Request_RPC:
+ if tp.version >= 9 {
+ isRaw := tp.Bool()
+ req.ServiceName = tp.String()
+ req.EndpointName = tp.String()
+ req.HttpMethod = tp.String()
+ req.Path = tp.String()
+
+ numParams := tp.UVarint()
+ req.PathParams = make([]string, numParams)
+ for i := uint64(0); i < numParams; i++ {
+ req.PathParams[i] = tp.String()
+ }
+
+ req.Uid = tp.String()
+
+ if tp.version >= 11 {
+ req.ExternalRequestId = tp.String()
+
+ if tp.version >= 12 {
+ req.ExternalCorrelationId = tp.String()
+ }
+ }
+
+ if isRaw {
+ req.RawRequestHeaders = tp.parseHTTPHeaders()
+ } else {
+ req.RequestPayload = tp.ByteString()
+ }
+ }
+
+ case tracepb.Request_AUTH:
+ if tp.version >= 9 {
+ req.ServiceName = tp.String()
+ req.EndpointName = tp.String()
+ req.RequestPayload = tp.ByteString()
+ }
+
+ case tracepb.Request_PUBSUB_MSG:
+ if tp.version >= 9 {
+ req.ServiceName = tp.String()
+ }
+
+ req.TopicName = tp.String()
+ req.SubscriptionName = tp.String()
+ req.MessageId = tp.String()
+ req.Attempt = tp.Uint32()
+ req.PublishTime = uint64(tp.Time().UnixMilli())
+
+ if tp.version >= 10 {
+ req.RequestPayload = tp.ByteString()
+ }
+ }
+
+ tp.reqs = append(tp.reqs, req)
+ tp.reqMap[req.SpanId] = req
+ return nil
+}
+
+func (tp *traceParser) bodyStream(ts uint64) error {
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+ flags := tp.Byte()
+ data := tp.ByteString()
+
+ isResponse := (flags & 1) == 1
+ overflowed := (flags & 2) == 2
+
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_BodyStream{
+ BodyStream: &tracepb.BodyStream{
+ IsResponse: isResponse,
+ Overflowed: overflowed,
+ Data: data,
+ },
+ },
+ })
+
+ return nil
+}
+
+func (tp *traceParser) requestEnd(ts uint64) error {
+ var typ tracepb.Request_Type
+ if tp.version >= 9 {
+ var err error
+ typ, err = tp.parseRequestType()
+ if err != nil {
+ return err
+ }
+ }
+
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+ if tp.version < 9 {
+ // Not captured by the protocol for old versions,
+ // so grab it from the request.
+ typ = req.Type
+ }
+
+ // dur := ts - rd.startTs
+ req.EndTime = ts
+
+ if tp.version >= 9 {
+ errMsg := tp.ByteString()
+ if len(errMsg) > 0 {
+ req.Err = errMsg
+
+ req.ErrStack = tp.stack(filterNone)
+ if tp.version >= 13 {
+ req.PanicStack = tp.formattedStack()
+ }
+ }
+
+ switch typ {
+ case tracepb.Request_RPC:
+ if isRaw := tp.Bool(); isRaw {
+ req.RawResponseHeaders = tp.parseHTTPHeaders()
+ } else {
+ req.ResponsePayload = tp.ByteString()
+ }
+ case tracepb.Request_AUTH:
+ req.Uid = tp.String()
+ req.ResponsePayload = tp.ByteString()
+ case tracepb.Request_PUBSUB_MSG:
+ req.ResponsePayload = tp.ByteString()
+ }
+ } else {
+ isErr := tp.Bool()
+ if isErr {
+ msg := tp.ByteString()
+ if len(msg) == 0 {
+ msg = []byte("unknown error")
+ }
+ if tp.version >= 5 {
+ req.ErrStack = tp.stack(filterNone)
+ }
+ } else {
+ for n, i := tp.UVarint(), uint64(0); i < n; i++ {
+ size := tp.UVarint()
+ if size > (10 << 20) {
+ return eerror.New("trace_parser", "input too large", map[string]any{"size": size})
+ }
+ output := make([]byte, size)
+ tp.Bytes(output)
+ req.Outputs = append(req.Outputs, output)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (tp *traceParser) goroutineStart(ts uint64) error {
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ // This is an expected error in certain situations like goroutines
+ // living past the request end that then spawn additional goroutines.
+ // Treat it as a warning but don't fail the parse.
+ tp.log.Warn().Uint64("span_id", spanID).Msg("unknown request span")
+ return nil
+ }
+ goid := tp.Uint32()
+ g := &tracepb.Goroutine{
+ Goid: goid,
+ StartTime: ts,
+ }
+ k := goKey{spanID: spanID, goid: goid}
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Goroutine{Goroutine: g},
+ })
+ tp.goMap[k] = g
+ return nil
+}
+
+func (tp *traceParser) goroutineEnd(ts uint64) error {
+ spanID := tp.Uint64()
+ goid := tp.Uint32()
+ k := goKey{spanID: spanID, goid: goid}
+ g, ok := tp.goMap[k]
+ if !ok {
+ return eerror.New("trace_parser", "unknown goroutine id", map[string]any{"goid": goid})
+ }
+ g.EndTime = ts
+ delete(tp.goMap, k)
+ return nil
+}
+
+func (tp *traceParser) goroutineClear(ts uint64) error {
+ spanID := tp.Uint64()
+ goid := tp.Uint32()
+ k := goKey{spanID: spanID, goid: goid}
+ g, ok := tp.goMap[k]
+ if !ok {
+ return eerror.New("trace_parser", "unknown goroutine id", map[string]any{"spanID": spanID, "goid": goid})
+ }
+ g.EndTime = ts
+ delete(tp.goMap, k)
+ return nil
+}
+
+func (tp *traceParser) transactionStart(ts uint64) error {
+ txid := tp.UVarint()
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+ goid := uint32(tp.UVarint())
+
+ if tp.version < 4 {
+ _ = tp.UVarint() // StartLoc; no longer used
+ }
+
+ tx := &tracepb.DBTransaction{
+ Goid: goid,
+ StartTime: ts,
+ }
+ if tp.version >= 5 {
+ tx.BeginStack = tp.stack(filterDB)
+ }
+ tp.txMap[txid] = tx
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Tx{Tx: tx},
+ })
+ return nil
+}
+
+func (tp *traceParser) transactionEnd(ts uint64) error {
+ txid := tp.UVarint()
+ _ = tp.Uint64() // spanID
+ tx, ok := tp.txMap[txid]
+ if !ok {
+ return eerror.New("trace_parser", "unknown transaction id", map[string]any{"txid": txid})
+ }
+ _ = uint32(tp.UVarint()) // goid
+ compl := tp.Byte()
+ if tp.version < 4 {
+ _ = int32(tp.UVarint()) // EndLoc; no longer used
+ }
+ errMsg := tp.ByteString()
+
+ var stack *tracepb.StackTrace
+ if tp.version >= 5 {
+ stack = tp.stack(filterDB)
+ }
+
+ // It's possible to get multiple transaction end events.
+ // Ignore them for now; we will expose this information later.
+ if tx.EndTime == 0 {
+ tx.EndTime = ts
+ tx.Err = errMsg
+ tx.EndStack = stack
+ switch compl {
+ case 0:
+ tx.Completion = tracepb.DBTransaction_ROLLBACK
+ case 1:
+ tx.Completion = tracepb.DBTransaction_COMMIT
+ default:
+ return eerror.New("trace_parser", "unknown completion type", map[string]any{"compl": compl})
+ }
+ }
+ return nil
+}
+
+func (tp *traceParser) queryStart(ts uint64) error {
+ qid := tp.UVarint()
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+ txid := tp.UVarint()
+ goid := uint32(tp.UVarint())
+
+ if tp.version < 4 {
+ _ = tp.UVarint() // CallLoc; no longer used
+ }
+ q := &tracepb.DBQuery{
+ Goid: goid,
+ StartTime: ts,
+ Query: tp.ByteString(),
+ }
+ if tp.version >= 5 {
+ q.Stack = tp.stack(filterDB)
+ }
+ tp.queryMap[qid] = q
+
+ if txid != 0 {
+ tx, ok := tp.txMap[txid]
+ if !ok {
+ return eerror.New("trace_parser", "unknown transaction id", map[string]any{"txid": txid})
+ }
+ tx.Queries = append(tx.Queries, q)
+ } else {
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Query{Query: q},
+ })
+ }
+
+ return nil
+}
+
+func (tp *traceParser) queryEnd(ts uint64) error {
+ qid := tp.UVarint()
+ q, ok := tp.queryMap[qid]
+ if !ok {
+ return eerror.New("trace_parser", "unknown query id", map[string]any{"qid": qid})
+ }
+ q.EndTime = ts
+ q.Err = tp.ByteString()
+ return nil
+}
+
+func (tp *traceParser) callStart(ts uint64, size int) error {
+ callID := tp.UVarint()
+ spanID := tp.Uint64()
+ // TODO(eandre) We currently (Dec 2, 2020) have an old format
+ // that leaves out the child span id. Detect this based on the size
+ // and provide a workaround that doesn't crash.
+ var childSpanID uint64
+ if size == 12 {
+ childSpanID = spanID
+ } else {
+ childSpanID = tp.Uint64()
+ }
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+
+ goid := uint32(tp.UVarint())
+ _ = tp.UVarint() // CallLoc: no longer used
+ defLoc := int32(tp.UVarint())
+
+ c := &tracepb.RPCCall{
+ SpanId: childSpanID,
+ Goid: goid,
+ DefLoc: defLoc,
+ StartTime: ts,
+ }
+ if tp.version >= 5 {
+ c.Stack = tp.stack(filterNone)
+ }
+ tp.callMap[callID] = c
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Rpc{Rpc: c},
+ })
+ return nil
+}
+
+func (tp *traceParser) callEnd(ts uint64) error {
+ callID := tp.UVarint()
+ errMsg := tp.ByteString()
+ c, ok := tp.callMap[callID].(*tracepb.RPCCall)
+ if !ok {
+ return eerror.New("trace_parser", "unknown call ", map[string]any{"callID": callID})
+ }
+ c.EndTime = ts
+ c.Err = errMsg
+ delete(tp.callMap, callID)
+ return nil
+}
+
+func (tp *traceParser) httpStart(ts uint64) error {
+ callID := tp.UVarint()
+ spanID := tp.Uint64()
+ childSpanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+ c := &tracepb.HTTPCall{
+ SpanId: childSpanID,
+ Goid: uint32(tp.UVarint()),
+ Method: tp.String(),
+ Url: tp.String(),
+ StartTime: ts,
+ }
+ tp.httpMap[callID] = c
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Http{Http: c},
+ })
+ return nil
+}
+
+func (tp *traceParser) httpEnd(ts uint64) error {
+ callID := tp.UVarint()
+ errMsg := tp.ByteString()
+ status := tp.UVarint()
+ c, ok := tp.httpMap[callID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown call ", map[string]any{"callID": callID})
+ }
+ c.EndTime = ts
+ c.Err = errMsg
+ c.StatusCode = uint32(status)
+
+ numEvents := tp.UVarint()
+ c.Events = make([]*tracepb.HTTPTraceEvent, 0, numEvents)
+ for i := 0; i < int(numEvents); i++ {
+ ev, err := tp.httpEvent()
+ if err != nil {
+ return err
+ }
+ c.Events = append(c.Events, ev)
+ }
+
+ return nil
+}
+
+func (tp *traceParser) httpBodyClosed(ts uint64) error {
+ callID := tp.UVarint()
+ _ = tp.ByteString() // close error
+ c, ok := tp.httpMap[callID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown call ", map[string]any{"callID": callID})
+ }
+ c.BodyClosedTime = ts
+ delete(tp.httpMap, callID)
+ return nil
+}
+
+func (tp *traceParser) httpEvent() (*tracepb.HTTPTraceEvent, error) {
+ code := tracepb.HTTPTraceEventCode(tp.Byte())
+ ts := tp.Int64()
+ ev := &tracepb.HTTPTraceEvent{
+ Code: code,
+ Time: uint64(ts),
+ }
+
+ switch code {
+ case tracepb.HTTPTraceEventCode_GET_CONN:
+ ev.Data = &tracepb.HTTPTraceEvent_GetConn{
+ GetConn: &tracepb.HTTPGetConnData{
+ HostPort: tp.String(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_GOT_CONN:
+ ev.Data = &tracepb.HTTPTraceEvent_GotConn{
+ GotConn: &tracepb.HTTPGotConnData{
+ Reused: tp.Bool(),
+ WasIdle: tp.Bool(),
+ IdleDurationNs: tp.Int64(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_GOT_FIRST_RESPONSE_BYTE:
+ // no data
+
+ case tracepb.HTTPTraceEventCode_GOT_1XX_RESPONSE:
+ ev.Data = &tracepb.HTTPTraceEvent_Got_1XxResponse{
+ Got_1XxResponse: &tracepb.HTTPGot1XxResponseData{
+ Code: int32(tp.Varint()),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_DNS_START:
+ ev.Data = &tracepb.HTTPTraceEvent_DnsStart{
+ DnsStart: &tracepb.HTTPDNSStartData{
+ Host: tp.String(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_DNS_DONE:
+ data := &tracepb.HTTPDNSDoneData{
+ Err: tp.ByteString(),
+ }
+ addrs := int(tp.UVarint())
+ for j := 0; j < addrs; j++ {
+ data.Addrs = append(data.Addrs, &tracepb.DNSAddr{
+ Ip: tp.ByteString(),
+ })
+ }
+ ev.Data = &tracepb.HTTPTraceEvent_DnsDone{DnsDone: data}
+
+ case tracepb.HTTPTraceEventCode_CONNECT_START:
+ ev.Data = &tracepb.HTTPTraceEvent_ConnectStart{
+ ConnectStart: &tracepb.HTTPConnectStartData{
+ Network: tp.String(),
+ Addr: tp.String(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_CONNECT_DONE:
+ ev.Data = &tracepb.HTTPTraceEvent_ConnectDone{
+ ConnectDone: &tracepb.HTTPConnectDoneData{
+ Network: tp.String(),
+ Addr: tp.String(),
+ Err: tp.ByteString(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_TLS_HANDSHAKE_START:
+ // no data
+
+ case tracepb.HTTPTraceEventCode_TLS_HANDSHAKE_DONE:
+ ev.Data = &tracepb.HTTPTraceEvent_TlsHandshakeDone{
+ TlsHandshakeDone: &tracepb.HTTPTLSHandshakeDoneData{
+ Err: tp.ByteString(),
+ TlsVersion: tp.Uint32(),
+ CipherSuite: tp.Uint32(),
+ ServerName: tp.String(),
+ NegotiatedProtocol: tp.String(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_WROTE_HEADERS:
+ // no data
+
+ case tracepb.HTTPTraceEventCode_WROTE_REQUEST:
+ ev.Data = &tracepb.HTTPTraceEvent_WroteRequest{
+ WroteRequest: &tracepb.HTTPWroteRequestData{
+ Err: tp.ByteString(),
+ },
+ }
+
+ case tracepb.HTTPTraceEventCode_WAIT_100_CONTINUE:
+ // no data
+
+ default:
+ return nil, eerror.New("trace_parser", "unknown http event", map[string]any{"code": code})
+ }
+ return ev, nil
+}
+
+func (tp *traceParser) logMessage(ts uint64) error {
+ spanID := tp.Uint64()
+ goid := uint32(tp.UVarint())
+ level := tp.Byte()
+ msg := tp.String()
+ fields := int(tp.UVarint())
+
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request", map[string]any{"spanID": spanID})
+ } else if fields > 64 {
+ return eerror.New("trace_parser", "too many fields", map[string]any{"fields": fields})
+ }
+
+ log := &tracepb.LogMessage{
+ SpanId: spanID,
+ Goid: goid,
+ Time: ts,
+ Msg: msg,
+ }
+
+ // We introduced more log levels in trace version 8.
+ if tp.version >= 8 {
+ switch level {
+ case 0:
+ log.Level = tracepb.LogMessage_TRACE
+ case 1:
+ log.Level = tracepb.LogMessage_DEBUG
+ case 2:
+ log.Level = tracepb.LogMessage_INFO
+ case 3:
+ log.Level = tracepb.LogMessage_WARN
+ case 4:
+ log.Level = tracepb.LogMessage_ERROR
+ default:
+ return eerror.New("trace_parser", "unknown log message level", map[string]any{"level": level})
+ }
+ } else {
+ switch level {
+ case 0:
+ log.Level = tracepb.LogMessage_DEBUG
+ case 1:
+ log.Level = tracepb.LogMessage_INFO
+ case 2:
+ log.Level = tracepb.LogMessage_ERROR
+ default:
+ return eerror.New("trace_parser", "unknown log message level", map[string]any{"level": level})
+ }
+ }
+
+ for i := 0; i < fields; i++ {
+ f, err := tp.logField()
+ if err != nil {
+ return eerror.Wrap(err, "trace_parser", "error parsing field", map[string]any{"field#": i})
+ }
+ log.Fields = append(log.Fields, f)
+ }
+ if tp.version >= 5 {
+ log.Stack = tp.stack(filterNone)
+ }
+
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Log{Log: log},
+ })
+ return nil
+}
+
+func (tp *traceParser) logField() (*tracepb.LogField, error) {
+ typ := tp.Byte()
+ key := tp.String()
+ f := &tracepb.LogField{
+ Key: key,
+ }
+ switch typ {
+ case 1:
+ if tp.version >= 7 { // We only added stack's to error log fields with version 7 (it was missing from the internal runtime before that)
+ f.Value = &tracepb.LogField_ErrorWithStack{ErrorWithStack: &tracepb.ErrWithStack{
+ Error: tp.String(),
+ Stack: tp.stack(filterNone),
+ }}
+ } else {
+ f.Value = &tracepb.LogField_ErrorWithoutStack{ErrorWithoutStack: tp.String()}
+ }
+ case 2:
+ f.Value = &tracepb.LogField_Str{Str: tp.String()}
+ case 3:
+ f.Value = &tracepb.LogField_Bool{Bool: tp.Bool()}
+ case 4:
+ f.Value = &tracepb.LogField_Time{Time: timestamppb.New(tp.Time())}
+ case 5:
+ f.Value = &tracepb.LogField_Dur{Dur: tp.Int64()}
+ case 6:
+ b := make([]byte, 16)
+ tp.Bytes(b)
+ f.Value = &tracepb.LogField_Uuid{Uuid: b}
+ case 7:
+ val := tp.ByteString()
+ err := tp.String()
+ if err != "" {
+ f.Value = &tracepb.LogField_ErrorWithoutStack{ErrorWithoutStack: err}
+ } else {
+ f.Value = &tracepb.LogField_Json{Json: val}
+ }
+ case 8:
+ f.Value = &tracepb.LogField_Int{Int: tp.Varint()}
+ case 9:
+ f.Value = &tracepb.LogField_Uint{Uint: tp.UVarint()}
+ case 10:
+ f.Value = &tracepb.LogField_Float32{Float32: tp.Float32()}
+ case 11:
+ f.Value = &tracepb.LogField_Float64{Float64: tp.Float64()}
+ default:
+ return nil, eerror.New("trace_parser", "unknown field type", map[string]any{"typ": typ})
+ }
+ return f, nil
+}
+
+func (tp *traceParser) publishStart(ts uint64) error {
+ publishID := tp.UVarint()
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+
+ publish := &tracepb.PubsubMsgPublished{
+ Goid: tp.UVarint(),
+ StartTime: ts,
+ Topic: tp.String(),
+ Message: tp.ByteString(),
+ Stack: tp.stack(filterNone),
+ }
+ tp.publishMap[publishID] = publish
+
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_PublishedMsg{PublishedMsg: publish},
+ })
+ return nil
+}
+
+func (tp *traceParser) publishEnd(ts uint64) error {
+ publishID := tp.UVarint()
+ publish, ok := tp.publishMap[publishID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown publish", map[string]any{"publishID": publishID})
+ }
+ publish.EndTime = ts
+ publish.MessageId = tp.String()
+ publish.Err = tp.ByteString()
+ delete(tp.publishMap, publishID)
+ return nil
+}
+
+func (tp *traceParser) serviceInitStart(ts uint64) error {
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+
+ initID := tp.UVarint()
+ svcInit := &tracepb.ServiceInit{
+ Goid: tp.UVarint(),
+ DefLoc: int32(tp.UVarint()),
+ StartTime: ts,
+ Service: tp.String(),
+ }
+ tp.serviceInits[initID] = svcInit
+
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_ServiceInit{ServiceInit: svcInit},
+ })
+ return nil
+}
+
+func (tp *traceParser) serviceInitEnd(ts uint64) error {
+ initID := tp.UVarint()
+ svcInit, ok := tp.serviceInits[initID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown service init", map[string]any{"initID": initID})
+ }
+ svcInit.EndTime = ts
+ svcInit.Err = tp.ByteString()
+ if len(svcInit.Err) > 0 {
+ svcInit.ErrStack = tp.stack(filterNone)
+ }
+ delete(tp.serviceInits, initID)
+ return nil
+}
+
+func (tp *traceParser) cacheOpStart(ts uint64) error {
+ opID := tp.UVarint()
+ spanID := tp.Uint64()
+ req, ok := tp.reqMap[spanID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown request span", map[string]any{"spanID": spanID})
+ }
+
+ op := &tracepb.CacheOp{
+ Goid: uint32(tp.UVarint()),
+ DefLoc: int32(tp.UVarint()),
+ StartTime: ts,
+ Operation: tp.String(),
+ Write: tp.Bool(),
+ Result: tracepb.CacheOp_UNKNOWN,
+ Stack: tp.stack(filterNone),
+ }
+
+ numKeys := tp.UVarint()
+ op.Keys = make([]string, numKeys)
+ for i := 0; i < int(numKeys); i++ {
+ op.Keys[i] = tp.String()
+ }
+
+ numInputs := tp.UVarint()
+ op.Inputs = make([][]byte, numInputs)
+ for i := 0; i < int(numInputs); i++ {
+ op.Inputs[i] = tp.ByteString()
+ }
+ tp.cacheMap[opID] = op
+
+ req.Events = append(req.Events, &tracepb.Event{
+ Data: &tracepb.Event_Cache{Cache: op},
+ })
+ return nil
+}
+
+func (tp *traceParser) cacheOpEnd(ts uint64) error {
+ opID := tp.UVarint()
+ op, ok := tp.cacheMap[opID]
+ if !ok {
+ return eerror.New("trace_parser", "unknown cache", map[string]any{"opID": opID})
+ }
+ op.EndTime = ts
+
+ res := trace.CacheOpResult(tp.Byte())
+ switch res {
+ case trace.CacheOK:
+ op.Result = tracepb.CacheOp_OK
+ case trace.CacheNoSuchKey:
+ op.Result = tracepb.CacheOp_NO_SUCH_KEY
+ case trace.CacheConflict:
+ op.Result = tracepb.CacheOp_CONFLICT
+ case trace.CacheErr:
+ op.Result = tracepb.CacheOp_ERR
+ op.Err = tp.ByteString()
+ }
+
+ numOutputs := tp.UVarint()
+ op.Outputs = make([][]byte, numOutputs)
+ for i := 0; i < int(numOutputs); i++ {
+ op.Outputs[i] = tp.ByteString()
+ }
+
+ delete(tp.cacheMap, opID)
+ return nil
+}
+
+type stackFilter int
+
+const (
+ filterNone stackFilter = iota
+ filterDB
+)
+
+func (tp *traceParser) stack(filterMode stackFilter) *tracepb.StackTrace {
+ n := int(tp.Byte())
+ tr := &tracepb.StackTrace{}
+ if n == 0 {
+ return tr
+ }
+
+ diffs := make([]int64, n)
+ for i := 0; i < n; i++ {
+ diff := tp.Varint()
+ diffs[i] = diff
+ }
+ tr.Pcs = diffs
+
+ if tp.symTable == nil {
+ return tr
+ }
+
+ // If we have a symTable, we can extract the full set of frames from the trace
+ sym, err := tp.symTable.SymTable(context.Background())
+ if err != nil {
+ log.Error().Err(err).Msg("could not parse sym table")
+ return tr
+ }
+
+ prev := int64(0)
+ pcs := make([]uint64, n)
+ for i := 0; i < n; i++ {
+ x := prev + diffs[i]
+ prev = x
+ pcs[i] = uint64(x) + sym.BaseOffset
+ }
+
+ tr.Frames = make([]*tracepb.StackFrame, 0, n)
+PCLoop:
+ for _, pc := range pcs {
+ file, line, fn := sym.PCToLine(pc)
+ if fn != nil {
+ if filterMode == filterDB && strings.Contains(filepath.ToSlash(file), "/src/database/sql/") {
+ continue PCLoop
+ }
+ tr.Frames = append(tr.Frames, &tracepb.StackFrame{
+ Func: fn.Name,
+ Filename: file,
+ Line: int32(line),
+ })
+ }
+ }
+ return tr
+}
+
+func (tp *traceParser) formattedStack() *tracepb.StackTrace {
+ n := int(tp.Byte())
+ tr := &tracepb.StackTrace{}
+ if n == 0 {
+ return tr
+ }
+
+ tr.Frames = make([]*tracepb.StackFrame, 0, n)
+ for i := 0; i < n; i++ {
+ tr.Frames = append(tr.Frames, &tracepb.StackFrame{
+ Filename: tp.String(),
+ Line: int32(tp.UVarint()),
+ Func: tp.String(),
+ })
+ }
+
+ return tr
+}
+
+func (tp *traceParser) parseRequestType() (tracepb.Request_Type, error) {
+ switch b := tp.Byte(); b {
+ case 0x01:
+ return tracepb.Request_RPC, nil
+ case 0x02:
+ return tracepb.Request_AUTH, nil
+ case 0x03:
+ return tracepb.Request_PUBSUB_MSG, nil
+ default:
+ return -1, eerror.New("trace_parser", "unknown request type", map[string]any{"type": fmt.Sprintf("%x", b)})
+ }
+}
+
+func (tp *traceParser) parseTraceID() *tracepb.TraceID {
+ var traceID [16]byte
+ tp.Bytes(traceID[:])
+ return &tracepb.TraceID{
+ Low: bin.Uint64(traceID[:8]),
+ High: bin.Uint64(traceID[8:]),
+ }
+}
+
+func (tp *traceParser) parseHTTPHeaders() map[string]string {
+ numHeaders := tp.UVarint()
+ h := make(map[string]string, numHeaders)
+ for i := uint64(0); i < numHeaders; i++ {
+ h[tp.String()] = tp.String()
+ }
+ return h
+}
+
+var bin = binary.LittleEndian
+
+type traceReader struct {
+ buf []byte
+ off int
+ err bool
+}
+
+func (tr *traceReader) Offset() int {
+ return tr.off
+}
+
+func (tr *traceReader) Done() bool {
+ return tr.off >= len(tr.buf)
+}
+
+func (tr *traceReader) Overflow() bool {
+ return tr.err
+}
+
+func (tr *traceReader) Bytes(b []byte) {
+ n := copy(b, tr.buf[tr.off:])
+ tr.off += n
+ if len(b) > n {
+ tr.err = true
+ }
+}
+
+func (tr *traceReader) Skip(n int) {
+ tr.off += n
+ if tr.off > len(tr.buf) {
+ tr.off = len(tr.buf)
+ tr.err = true
+ }
+}
+
+func (tr *traceReader) Byte() byte {
+ var buf [1]byte
+ tr.Bytes(buf[:])
+ return buf[0]
+}
+
+func (tr *traceReader) Bool() bool {
+ return tr.Byte() != 0
+}
+
+func (tr *traceReader) String() string {
+ return string(tr.ByteString())
+}
+
+func (tr *traceReader) ByteString() []byte {
+ size := tr.UVarint()
+ if (size) == 0 {
+ return nil
+ }
+ b := make([]byte, int(size))
+ tr.Bytes(b)
+ return b
+}
+
+func (tr *traceReader) Time() time.Time {
+ sec := tr.Int64()
+ nsec := tr.Int32()
+ return time.Unix(sec, int64(nsec)).UTC()
+}
+
+func (tr *traceReader) Int32() int32 {
+ u := tr.Uint32()
+ var v int32
+ if u&1 == 0 {
+ v = int32(u >> 1)
+ } else {
+ v = ^int32(u >> 1)
+ }
+ return v
+}
+
+func (tr *traceReader) Uint32() uint32 {
+ var buf [4]byte
+ tr.Bytes(buf[:])
+ return bin.Uint32(buf[:])
+}
+
+func (tr *traceReader) Int64() int64 {
+ u := tr.Uint64()
+ var v int64
+ if u&1 == 0 {
+ v = int64(u >> 1)
+ } else {
+ v = ^int64(u >> 1)
+ }
+ return v
+}
+
+func (tr *traceReader) Uint64() uint64 {
+ var buf [8]byte
+ tr.Bytes(buf[:])
+ return bin.Uint64(buf[:])
+}
+
+func (tr *traceReader) Varint() int64 {
+ u := tr.UVarint()
+ var v int64
+ if u&1 == 0 {
+ v = int64(u >> 1)
+ } else {
+ v = ^int64(u >> 1)
+ }
+ return v
+}
+
+func (tr *traceReader) UVarint() uint64 {
+ var u uint64
+ for i := 0; tr.off < len(tr.buf); i += 7 {
+ b := tr.buf[tr.off]
+ u |= uint64(b&^0x80) << i
+ tr.off++
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return u
+}
+
+func (tr *traceReader) Float32() float32 {
+ b := tr.Uint32()
+ return math.Float32frombits(b)
+}
+
+func (tr *traceReader) Float64() float64 {
+ b := tr.Uint64()
+ return math.Float64frombits(b)
+}
diff --git a/cli/daemon/engine/trace2/recorder.go b/cli/daemon/engine/trace2/recorder.go
new file mode 100644
index 0000000000..680e7a21c6
--- /dev/null
+++ b/cli/daemon/engine/trace2/recorder.go
@@ -0,0 +1,104 @@
+package trace2
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog/log"
+
+ "encore.dev/appruntime/exported/trace2"
+ "encr.dev/pkg/traceparser"
+ tracepb2 "encr.dev/proto/encore/engine/trace2"
+)
+
+type Recorder struct {
+ s Store
+}
+
+func NewRecorder(s Store) *Recorder {
+ return &Recorder{s}
+}
+
+type RecordData struct {
+ Meta *Meta
+ TraceVersion trace2.Version
+ Buf *bufio.Reader
+ Anchor trace2.TimeAnchor
+}
+
+func (h *Recorder) RecordTrace(data RecordData) error {
+ eventCh := make(chan *tracepb2.TraceEvent, 100)
+ go func() {
+ defer close(eventCh)
+ for {
+ ev, err := traceparser.ParseEvent(data.Buf, data.Anchor, data.TraceVersion)
+ if ev != nil {
+ eventCh <- ev
+ }
+ if err == nil {
+ continue
+ }
+
+ // We have an error.
+ if !errors.Is(err, io.EOF) {
+ log.Error().Err(err).Msg("unable to parse trace")
+ }
+ return
+ }
+ }()
+
+ writeEvents := func(ctx context.Context, ev []*tracepb2.TraceEvent) error {
+ if len(ev) == 0 {
+ return nil
+ }
+ return h.s.WriteEvents(ctx, data.Meta, ev)
+ }
+
+ // pendingWrites are the accumulated events that we have parsed so far
+ // that have not yet been written to the store.
+ pendingWrites := make([]*tracepb2.TraceEvent, 0, 100)
+
+ flushWrites := func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if err := writeEvents(ctx, pendingWrites); err != nil {
+ log.Error().Err(err).Msg("unable to write trace events")
+ return
+ }
+
+ // Garbage collect the slice if it's too big.
+ if cap(pendingWrites) > 1000 {
+ pendingWrites = make([]*tracepb2.TraceEvent, 0, 100)
+ } else {
+ pendingWrites = pendingWrites[:0]
+ }
+ }
+
+ debounce := time.NewTicker(500 * time.Millisecond)
+ defer debounce.Stop()
+
+ for {
+ select {
+ case ev, ok := <-eventCh:
+ if !ok {
+ // No more events.
+ flushWrites()
+ return nil
+ }
+ debounce.Reset(500 * time.Millisecond)
+ pendingWrites = append(pendingWrites, ev)
+
+ // Flush immediately if we've accumulated a bunch of events
+ // since the debounce may never run in a high throughput scenario.
+ if len(pendingWrites) >= 100 {
+ flushWrites()
+ }
+
+ case <-debounce.C:
+ flushWrites()
+ }
+ }
+}
diff --git a/cli/daemon/engine/trace2/sqlite/read.go b/cli/daemon/engine/trace2/sqlite/read.go
new file mode 100644
index 0000000000..799a7e1fa6
--- /dev/null
+++ b/cli/daemon/engine/trace2/sqlite/read.go
@@ -0,0 +1,147 @@
+package sqlite
+
+import (
+ "context"
+ "database/sql"
+ "strconv"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog/log"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/pkg/fns"
+ tracepb2 "encr.dev/proto/encore/engine/trace2"
+)
+
+func (s *Store) List(ctx context.Context, q *trace2.Query, iter trace2.ListEntryIterator) error {
+ limit := q.Limit
+ if limit <= 0 {
+ limit = 100
+ }
+
+ args := []any{
+ q.AppID, tracepb2.SpanSummary_AUTH, /* ignore auth spans */
+ }
+
+ extraWhereClause := ""
+
+ if q.MessageID != "" {
+ args = append(args, q.MessageID)
+ extraWhereClause += " AND message_id = $" + strconv.Itoa(len(args))
+ }
+
+ // If we're filter for tests / not tests, add the extra where clause
+ if q.TestFilter != nil {
+ args = append(args, tracepb2.SpanSummary_TEST)
+ if *q.TestFilter {
+ extraWhereClause += " AND span_type = $" + strconv.Itoa(len(args))
+ } else {
+ extraWhereClause += " AND span_type != $" + strconv.Itoa(len(args))
+ }
+ }
+
+ rows, err := s.db.QueryContext(ctx, `
+ SELECT
+ trace_id, span_id, started_at, span_type, is_root, service_name, endpoint_name,
+ topic_name, subscription_name, message_id, is_error, test_skipped, duration_nanos, src_file, src_line
+ FROM trace_span_index
+ WHERE app_id = $1 AND has_response AND is_root AND span_type != $2 `+extraWhereClause+`
+ ORDER BY started_at DESC
+ LIMIT `+strconv.Itoa(limit)+`
+ `, args...)
+ if err != nil {
+ return errors.Wrap(err, "query traces")
+ }
+
+ defer fns.CloseIgnore(rows)
+ n := 0
+ for rows.Next() {
+ if n >= limit {
+ break
+ }
+ n++
+
+ var t tracepb2.SpanSummary
+ var startedAt int64
+ err := rows.Scan(
+ &t.TraceId, &t.SpanId, &startedAt, &t.Type, &t.IsRoot, &t.ServiceName, &t.EndpointName,
+ &t.TopicName, &t.SubscriptionName, &t.MessageId, &t.IsError, &t.TestSkipped, &t.DurationNanos, &t.SrcFile, &t.SrcLine)
+ if err != nil {
+ return errors.Wrap(err, "scan trace")
+ }
+ ts := time.Unix(0, startedAt)
+ t.StartedAt = timestamppb.New(ts)
+
+ if !iter(&t) {
+ return nil
+ }
+ }
+
+ return errors.Wrap(rows.Err(), "iterate traces")
+}
+
+// emitCompleteSpanToListeners emits the given trace/span to all listeners
+// if it's a complete root span (meaning it has a response and is not an auth span).
+func (s *Store) emitCompleteSpanToListeners(ctx context.Context, appID, traceID, spanID string) {
+ var t tracepb2.SpanSummary
+ var startedAt int64
+ err := s.db.QueryRowContext(ctx, `
+ SELECT
+ trace_id, span_id, started_at, span_type, is_root, service_name, endpoint_name,
+ topic_name, subscription_name, message_id, is_error, test_skipped, duration_nanos, src_file, src_line
+ FROM trace_span_index
+ WHERE app_id = ? AND trace_id = ? AND span_id = ? AND has_response AND is_root AND span_type != ?
+ ORDER BY started_at DESC
+ `, appID, traceID, spanID, tracepb2.SpanSummary_AUTH).Scan(
+ &t.TraceId, &t.SpanId, &startedAt, &t.Type, &t.IsRoot, &t.ServiceName, &t.EndpointName,
+ &t.TopicName, &t.SubscriptionName, &t.MessageId, &t.IsError, &t.TestSkipped, &t.DurationNanos, &t.SrcFile, &t.SrcLine)
+ if errors.Is(err, sql.ErrNoRows) {
+ return
+ } else if err != nil {
+ log.Error().Err(err).Msg("unable to query trace span")
+ return
+ }
+
+ ts := time.Unix(0, startedAt)
+ t.StartedAt = timestamppb.New(ts)
+ for _, ln := range s.listeners {
+ ln <- trace2.NewSpanEvent{
+ AppID: appID,
+ TestTrace: t.Type == tracepb2.SpanSummary_TEST,
+ Span: &t,
+ }
+ }
+}
+
+func (s *Store) Get(ctx context.Context, appID, traceID string, iter trace2.EventIterator) error {
+ rows, err := s.db.QueryContext(ctx, `
+ SELECT event_data
+ FROM trace_event
+ WHERE app_id = ? AND trace_id = ?
+ `, appID, traceID)
+ if err != nil {
+ return errors.Wrap(err, "get trace")
+ }
+
+ defer fns.CloseIgnore(rows)
+ for rows.Next() {
+ var data []byte
+ err := rows.Scan(&data)
+ if err != nil {
+ return errors.Wrap(err, "scan trace data")
+ }
+
+ var ev tracepb2.TraceEvent
+ if err := protojson.Unmarshal(data, &ev); err != nil {
+ return errors.Wrap(err, "unmarshal trace event")
+ }
+ if !iter(&ev) {
+ return nil
+ }
+ }
+
+ return errors.Wrap(rows.Err(), "iterate events")
+}
diff --git a/cli/daemon/engine/trace2/sqlite/write.go b/cli/daemon/engine/trace2/sqlite/write.go
new file mode 100644
index 0000000000..036fc424a4
--- /dev/null
+++ b/cli/daemon/engine/trace2/sqlite/write.go
@@ -0,0 +1,372 @@
+package sqlite
+
+import (
+ "context"
+ "database/sql"
+ "encoding/base32"
+ "encoding/binary"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/lib/pq"
+ "github.com/rs/zerolog/log"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/pkg/fns"
+ tracepbcli "encr.dev/proto/encore/engine/trace2"
+)
+
+// New creates a new store backed by the given db.
+func New(db *sql.DB) *Store {
+ return &Store{
+ db: db,
+ }
+}
+
+type Store struct {
+ db *sql.DB
+ listeners []chan<- trace2.NewSpanEvent
+}
+
+var _ trace2.Store = (*Store)(nil)
+
+func scanRows[T any](rows *sql.Rows) ([]T, error) {
+ defer rows.Close()
+ var out []T
+ for rows.Next() {
+ var v T
+ err := rows.Scan(&v)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, v)
+ }
+ return out, nil
+}
+
+func (s *Store) CleanEvery(ctx context.Context, freq time.Duration, triggerAt, eventsToKeep, batchSize int) {
+ for {
+ timer := time.NewTimer(freq)
+ select {
+ case <-ctx.Done():
+ return
+ case <-timer.C:
+ if err := s.DoClean(ctx, triggerAt, eventsToKeep, batchSize); err != nil {
+ log.Error().Err(err).Msg("trace cleanup failed")
+ }
+ }
+ }
+}
+
+func (s *Store) DoClean(ctx context.Context, triggerAt, eventsToKeep, batchSize int) error {
+ log.Info().Msg("initiating trace event cleanup sweep")
+ rows, err := s.db.QueryContext(ctx, "SELECT app_id FROM trace_event GROUP BY app_id HAVING COUNT(distinct trace_id) > ?", triggerAt)
+ if err != nil {
+ return errors.Wrap(err, "query app ids")
+ }
+ appIDs, err := scanRows[string](rows)
+ if err != nil {
+ return errors.Wrap(err, "scan app ids")
+ }
+
+ for _, appID := range appIDs {
+ row := s.db.QueryRowContext(ctx, `
+ WITH latest_events AS (
+ SELECT trace_id, min(id) as id FROM trace_event WHERE app_id = ? GROUP BY 1 ORDER BY 2 DESC LIMIT ?
+ ) SELECT min(id) FROM latest_events;
+ `, appID, eventsToKeep)
+ var traceID int64
+ err := row.Scan(&traceID)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to get trace id")
+ continue
+ }
+ rows, err := s.db.QueryContext(ctx, "SELECT DISTINCT trace_id FROM trace_event WHERE app_id = ? AND id < ? ORDER BY id DESC LIMIT ?", appID, traceID, batchSize)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to get old trace ids")
+ continue
+ }
+ traceIDs, err := scanRows[string](rows)
+ if len(traceIDs) == 0 {
+ continue
+ }
+ idArgs := strings.Join(fns.Map(traceIDs, pq.QuoteLiteral), ",")
+ res, err := s.db.ExecContext(ctx, "DELETE FROM trace_event WHERE app_id = ? AND trace_id IN ("+idArgs+")", appID)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to delete old trace events")
+ continue
+ }
+ rowCount, err := res.RowsAffected()
+ if err != nil {
+ log.Error().Err(err).Msg("failed to get rows affected")
+ continue
+ }
+ log.Info().Str("app_id", appID).Int64("deleted", rowCount).Msg("cleaned up old trace events")
+ res, err = s.db.ExecContext(ctx, "DELETE FROM trace_span_index WHERE app_id = ? AND trace_id IN ("+idArgs+")", appID)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to delete old trace spans")
+ continue
+ }
+ rowCount, err = res.RowsAffected()
+ if err != nil {
+ log.Error().Err(err).Msg("failed to get rows affected")
+ continue
+ }
+ log.Info().Str("app_id", appID).Int64("deleted", rowCount).Msg("cleaned up old trace spans")
+ }
+
+ return nil
+}
+
+func (s *Store) Listen(ch chan<- trace2.NewSpanEvent) {
+ s.listeners = append(s.listeners, ch)
+}
+
+func (s *Store) Clear(ctx context.Context, appID string) error {
+ _, err := s.db.ExecContext(ctx, "DELETE FROM trace_event WHERE app_id = ?", appID)
+ if err != nil {
+ return errors.Wrap(err, "failed to clear trace events")
+ }
+ _, err = s.db.ExecContext(ctx, "DELETE FROM trace_span_index WHERE app_id = ?", appID)
+ return errors.Wrap(err, "failed to clear trace spans")
+}
+
+func (s *Store) WriteEvents(ctx context.Context, meta *trace2.Meta, events []*tracepbcli.TraceEvent) error {
+ for _, ev := range events {
+ if err := s.insertEvent(ctx, meta, ev); err != nil {
+ log.Error().Err(err).Msg("unable to insert trace span event")
+ continue
+ }
+ }
+
+ return nil
+}
+
+func (s *Store) insertEvent(ctx context.Context, meta *trace2.Meta, ev *tracepbcli.TraceEvent) error {
+ data, err := protojson.Marshal(ev)
+ if err != nil {
+ return errors.Wrap(err, "marshal trace event")
+ }
+
+ _, err = s.db.ExecContext(ctx, `
+ INSERT INTO trace_event (
+ app_id, trace_id, span_id, event_data)
+ VALUES (?, ?, ?, ?)
+ `, meta.AppID, encodeTraceID(ev.TraceId), encodeSpanID(ev.SpanId), data)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+
+ if start := ev.GetSpanStart(); start != nil {
+ if err := s.updateSpanStartIndex(ctx, meta, ev, start); err != nil {
+ return errors.Wrap(err, "update span start index")
+ }
+ } else if end := ev.GetSpanEnd(); end != nil {
+ if err := s.updateSpanEndIndex(ctx, meta, ev, end); err != nil {
+ return errors.Wrap(err, "update span end index")
+ }
+ }
+
+ return nil
+}
+
+func (s *Store) updateSpanStartIndex(ctx context.Context, meta *trace2.Meta, ev *tracepbcli.TraceEvent, start *tracepbcli.SpanStart) error {
+ isRoot := start.ParentSpanId == nil
+ if req := start.GetRequest(); req != nil {
+ extRequestID := req.RequestHeaders[http.CanonicalHeaderKey("X-Request-ID")]
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, started_at, is_root, service_name, endpoint_name, external_request_id, has_response, test_skipped
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, false, false)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ is_root = excluded.is_root,
+ service_name = excluded.service_name,
+ endpoint_name = excluded.endpoint_name,
+ external_request_id = excluded.external_request_id
+ `, meta.AppID, encodeTraceID(ev.TraceId), encodeSpanID(ev.SpanId),
+ tracepbcli.SpanSummary_REQUEST, ev.EventTime.AsTime().UnixNano(),
+ isRoot, req.ServiceName, req.EndpointName, extRequestID)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if auth := start.GetAuth(); auth != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, started_at, is_root, service_name,
+ endpoint_name, has_response, test_skipped
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, false, false)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ is_root = excluded.is_root,
+ service_name = excluded.service_name,
+ endpoint_name = excluded.endpoint_name
+ `, meta.AppID, encodeTraceID(ev.TraceId), encodeSpanID(ev.SpanId),
+ tracepbcli.SpanSummary_AUTH, ev.EventTime.AsTime().UnixNano(),
+ isRoot, auth.ServiceName, auth.EndpointName)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if msg := start.GetPubsubMessage(); msg != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, started_at, is_root, service_name,
+ topic_name, subscription_name, message_id, has_response, test_skipped
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, false, false)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ is_root = excluded.is_root,
+ service_name = excluded.service_name,
+ topic_name = excluded.topic_name,
+ subscription_name = excluded.subscription_name,
+ message_id = excluded.message_id
+ `, meta.AppID, encodeTraceID(ev.TraceId), encodeSpanID(ev.SpanId),
+ tracepbcli.SpanSummary_PUBSUB_MESSAGE, ev.EventTime.AsTime().UnixNano(),
+ isRoot, msg.ServiceName, msg.TopicName, msg.SubscriptionName, msg.MessageId)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if msg := start.GetTest(); msg != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, started_at, is_root, service_name,
+ endpoint_name, user_id, src_file, src_line, has_response, test_skipped
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, false, false)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ is_root = excluded.is_root,
+ service_name = excluded.service_name,
+ endpoint_name = excluded.endpoint_name
+ `, meta.AppID, encodeTraceID(ev.TraceId), encodeSpanID(ev.SpanId),
+ tracepbcli.SpanSummary_TEST, ev.EventTime.AsTime().UnixNano(),
+ isRoot, msg.ServiceName, msg.TestName, msg.Uid, msg.TestFile, msg.TestLine)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (s *Store) updateSpanEndIndex(ctx context.Context, meta *trace2.Meta, ev *tracepbcli.TraceEvent, end *tracepbcli.SpanEnd) (err error) {
+ traceID := encodeTraceID(ev.TraceId)
+ spanID := encodeSpanID(ev.SpanId)
+
+ defer func() {
+ if err == nil {
+ // If the span is complete, emit it to listeners.
+ s.emitCompleteSpanToListeners(ctx, meta.AppID, traceID, spanID)
+ }
+ }()
+
+ if req := end.GetRequest(); req != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, has_response, is_error, duration_nanos
+ ) VALUES (?, ?, ?, ?, ?, ?, ?)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ has_response = excluded.has_response,
+ is_error = excluded.is_error,
+ duration_nanos = excluded.duration_nanos
+ `, meta.AppID, traceID, spanID,
+ tracepbcli.SpanSummary_REQUEST, true,
+ end.Error != nil, end.DurationNanos)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if auth := end.GetAuth(); auth != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, has_response, is_error, duration_nanos, user_id
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ has_response = excluded.has_response,
+ is_error = excluded.is_error,
+ duration_nanos = excluded.duration_nanos,
+ user_id = excluded.user_id
+ `, meta.AppID, traceID, spanID,
+ tracepbcli.SpanSummary_AUTH, true,
+ end.Error != nil, end.DurationNanos, auth.Uid)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if msg := end.GetPubsubMessage(); msg != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, has_response, is_error, duration_nanos
+ ) VALUES (?, ?, ?, ?, ?, ?, ?)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ has_response = excluded.has_response,
+ is_error = excluded.is_error,
+ duration_nanos = excluded.duration_nanos
+ `, meta.AppID, traceID, spanID,
+ tracepbcli.SpanSummary_PUBSUB_MESSAGE, true,
+ end.Error != nil, end.DurationNanos)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ if msg := end.GetTest(); msg != nil {
+ _, err := s.db.ExecContext(ctx, `
+ INSERT INTO trace_span_index (
+ app_id, trace_id, span_id, span_type, has_response, is_error, test_skipped, duration_nanos
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ ON CONFLICT (trace_id, span_id) DO UPDATE SET
+ has_response = excluded.has_response,
+ is_error = excluded.is_error,
+ test_skipped = excluded.test_skipped,
+ duration_nanos = excluded.duration_nanos
+ `, meta.AppID, traceID, spanID,
+ tracepbcli.SpanSummary_TEST, true,
+ msg.Failed, msg.Skipped, end.DurationNanos)
+ if err != nil {
+ return errors.Wrap(err, "insert trace span event")
+ }
+ return nil
+ }
+
+ return nil
+}
+
+var (
+ binBE = binary.BigEndian
+ binLE = binary.LittleEndian
+)
+
+// encodeTraceID encodes the trace id as a human-readable string.
+func encodeTraceID(id *tracepbcli.TraceID) string {
+ var b [16]byte
+ binLE.PutUint64(b[0:8], id.Low)
+ binLE.PutUint64(b[8:16], id.High)
+ return base32hex.EncodeToString(b[:])
+}
+
+// encodeSpanID encodes the span id as a human-readable string.
+func encodeSpanID(id uint64) string {
+ var b [8]byte
+ binLE.PutUint64(b[:], id)
+ return base32hex.EncodeToString(b[:])
+}
+
+var (
+ // base32hex is a lowercase base32 hex encoding without padding
+ // that preserves lexicographic sort order.
+ base32hex = base32.NewEncoding("0123456789abcdefghijklmnopqrstuv").WithPadding(base32.NoPadding)
+)
diff --git a/cli/daemon/engine/trace2/store.go b/cli/daemon/engine/trace2/store.go
new file mode 100644
index 0000000000..391f7b9ffb
--- /dev/null
+++ b/cli/daemon/engine/trace2/store.go
@@ -0,0 +1,86 @@
+package trace2
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ tracepb2 "encr.dev/proto/encore/engine/trace2"
+)
+
+type Meta struct {
+ AppID string
+}
+
+type Query struct {
+ AppID string
+ Service string
+ Endpoint string
+ Topic string
+ Subscription string
+ TraceID string
+ MessageID string
+ TestFilter *bool // nil means both test and non-test traces are returned
+ Tags []Tag
+
+ // StartTime and EndTime specify the time range to query.
+ // If zero values they are not bounded.
+ StartTime, EndTime time.Time
+
+ IsError *bool // nil means both successes and failures are returned
+
+ // Minimum and maximum duration (in nanoseconds) to filter requests for.
+ // If MaxDurMicros is 0 it defaults to no limit.
+ MinDurNanos, MaxDurNanos uint64
+
+ Limit int // if 0 defaults to 100.
+}
+
+type Tag struct {
+ Key string
+ Value string
+}
+
+// ErrNotFound is reported by Store.Get when a trace is not found.
+var ErrNotFound = errors.New("trace not found")
+
+// A ListEntryIterator is called once for each trace matching the query string,
+// sequentially and in streaming fashion as traces are read from the store.
+//
+// If it returns false the listing operation is stopped and the function is
+// not called again.
+type ListEntryIterator func(*tracepb2.SpanSummary) bool
+
+// An EventIterator is called once for each event in a trace,
+// sequentially and in streaming fashion as events are read from the store.
+//
+// If it returns false the stream is aborted and the function is
+// not called again.
+type EventIterator func(*tracepb2.TraceEvent) bool
+
+// Store is the interface for storing and retrieving traces.
+type Store interface {
+ // WriteEvents persists requests in the store.
+ WriteEvents(ctx context.Context, meta *Meta, events []*tracepb2.TraceEvent) error
+
+ // List lists traces that match the query.
+ // It calls fn for each trace read; see ListEntryIterator.
+ List(ctx context.Context, q *Query, iter ListEntryIterator) error
+
+ // Get streams events matching the given trace id.
+ // fn may be called with events out of order.
+ // If the trace is not found it reports an error matching ErrNotFound.
+ Get(ctx context.Context, appID, traceID string, iter EventIterator) error
+
+ // Listen listens for new spans.
+ Listen(ch chan<- NewSpanEvent)
+
+ // Clear removes all traces for an app
+ Clear(ctx context.Context, appID string) error
+}
+
+type NewSpanEvent struct {
+ AppID string
+ TestTrace bool
+ Span *tracepb2.SpanSummary
+}
diff --git a/cli/daemon/exec_script.go b/cli/daemon/exec_script.go
new file mode 100644
index 0000000000..e2191aff27
--- /dev/null
+++ b/cli/daemon/exec_script.go
@@ -0,0 +1,129 @@
+package daemon
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/rs/zerolog/log"
+ "golang.org/x/mod/modfile"
+
+ "encr.dev/cli/daemon/run"
+ "encr.dev/internal/optracker"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/paths"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// ExecScript executes a one-off script.
+func (s *Server) ExecScript(req *daemonpb.ExecScriptRequest, stream daemonpb.Daemon_ExecScriptServer) error {
+ ctx := stream.Context()
+ slog := &streamLog{stream: stream, buffered: true}
+ stderr := slog.Stderr(false)
+ sendErr := func(err error) {
+ if list := run.AsErrorList(err); list != nil {
+ _ = list.SendToStream(stream)
+ } else {
+ errStr := err.Error()
+ if !strings.HasSuffix(errStr, "\n") {
+ errStr += "\n"
+ }
+ slog.Stderr(false).Write([]byte(errStr))
+ }
+ streamExit(stream, 1)
+ }
+
+ ctx, tracer, err := s.beginTracing(ctx, req.AppRoot, req.WorkingDir, req.TraceFile)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+ defer tracer.Close()
+
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ ns, err := s.namespaceOrActive(ctx, app, req.Namespace)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ ops := optracker.New(stderr, stream)
+ defer ops.AllDone() // Kill the tracker when we exit this function
+
+ testResults := make(chan error, 1)
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ var err error
+ switch recovered := recovered.(type) {
+ case error:
+ err = recovered
+ default:
+ err = fmt.Errorf("%v", recovered)
+ }
+ log.Err(err).Msg("panic during script execution")
+ testResults <- fmt.Errorf("panic occured within Encore during script execution: %v\n", recovered)
+ }
+ }()
+
+ switch app.Lang() {
+ case appfile.LangGo:
+ modPath := filepath.Join(app.Root(), "go.mod")
+ modData, err := os.ReadFile(modPath)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+ mod, err := modfile.Parse(modPath, modData, nil)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ commandRelPath := filepath.ToSlash(filepath.Join(req.WorkingDir, req.ScriptArgs[0]))
+ scriptArgs := req.ScriptArgs[1:]
+ commandPkg := paths.Pkg(mod.Module.Mod.Path).JoinSlash(paths.RelSlash(commandRelPath))
+
+ p := run.ExecScriptParams{
+ App: app,
+ NS: ns,
+ WorkingDir: req.WorkingDir,
+ Environ: req.Environ,
+ MainPkg: commandPkg,
+ ScriptArgs: scriptArgs,
+ Stdout: slog.Stdout(false),
+ Stderr: slog.Stderr(false),
+ OpTracker: ops,
+ }
+ if err := s.mgr.ExecScript(stream.Context(), p); err != nil {
+ sendErr(err)
+ } else {
+ streamExit(stream, 0)
+ }
+ case appfile.LangTS:
+ p := run.ExecCommandParams{
+ App: app,
+ NS: ns,
+ WorkingDir: req.WorkingDir,
+ Environ: req.Environ,
+ Command: req.ScriptArgs[0],
+ ScriptArgs: req.ScriptArgs[1:],
+ Stdout: slog.Stdout(false),
+ Stderr: slog.Stderr(false),
+ OpTracker: ops,
+ }
+
+ if err := s.mgr.ExecCommand(stream.Context(), p); err != nil {
+ sendErr(err)
+ } else {
+ streamExit(stream, 0)
+ }
+ }
+
+ return nil
+}
diff --git a/cli/daemon/export.go b/cli/daemon/export.go
new file mode 100644
index 0000000000..71108ab418
--- /dev/null
+++ b/cli/daemon/export.go
@@ -0,0 +1,39 @@
+package daemon
+
+import (
+ "go/scanner"
+
+ "encr.dev/cli/daemon/export"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// Export exports the app.
+func (s *Server) Export(req *daemonpb.ExportRequest, stream daemonpb.Daemon_ExportServer) error {
+ slog := &streamLog{stream: stream, buffered: false}
+ log := newStreamLogger(slog)
+
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to resolve app")
+ streamExit(stream, 1)
+ return nil
+ }
+
+ exitCode := 0
+ success, err := export.Docker(stream.Context(), app, req, log)
+ if err != nil {
+ exitCode = 1
+ if list, ok := err.(scanner.ErrorList); ok {
+ for _, e := range list {
+ log.Error().Msg(e.Error())
+ }
+ } else {
+ log.Error().Msg(err.Error())
+ }
+ } else if !success {
+ exitCode = 1
+ }
+
+ streamExit(stream, exitCode)
+ return nil
+}
diff --git a/cli/daemon/export/download.go b/cli/daemon/export/download.go
new file mode 100644
index 0000000000..fe0ff5cbcb
--- /dev/null
+++ b/cli/daemon/export/download.go
@@ -0,0 +1,150 @@
+package export
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/dockerbuild"
+)
+
+const (
+ DOWNLOAD_BASE_URL = "https://storage.googleapis.com/encore-optional/encore"
+)
+
+func downloadBinary(platform, arch string, binary string, log zerolog.Logger) (dockerbuild.HostPath, error) {
+ if version.Channel == version.DevBuild {
+ suffix := ""
+ if platform != runtime.GOOS || arch != runtime.GOARCH {
+ suffix = "-" + platform + "-" + arch
+ }
+ if binary == "encore-runtime.node" {
+ binary = "js/" + binary
+ }
+ path := filepath.Join(env.EncoreRuntimesPath(), binary+suffix)
+ if _, err := os.Stat(path); err == nil {
+ return dockerbuild.HostPath(path), nil
+ }
+ return "", fmt.Errorf("development build of %s/%s %s not found at %s. Build it with `go run ./pkg/encorebuild/cmd/build-local-binary %[3]s --os=%[1]s --arch=%[2]s`", platform, arch, binary, path)
+ }
+ cacheDir, err := conf.CacheDir()
+ if err != nil {
+ return "", err
+ }
+ binDir := dockerbuild.HostPath(cacheDir).Join("bin")
+ archDir := binDir.Join(version.Version, platform, arch)
+ binaryPath := archDir.Join(binary)
+ if _, err := os.Stat(binaryPath.String()); err == nil {
+ return binaryPath, nil
+ }
+ if err := os.MkdirAll(archDir.String(), 0755); err != nil {
+ return "", err
+ }
+ // Download the binaries
+ archURL := fmt.Sprintf("%s/%s/%s-%s", DOWNLOAD_BASE_URL, version.Version, platform, arch)
+ url := fmt.Sprintf("%s/%s", archURL, binary)
+ log.Info().Msgf("Downloading %s/%s %s", platform, arch, binary)
+ if err := downloadFile(url, binaryPath.String()); err != nil {
+ return "", err
+ }
+ tryCleanupPreviousVersions(binDir)
+ return binaryPath, nil
+}
+
+func tryCleanupPreviousVersions(binDir dockerbuild.HostPath) {
+ // Clean up binaries for other versions
+ entries, err := os.ReadDir(binDir.String())
+ if err != nil {
+ log.Warn().Msgf("failed to read directory %s: %v", binDir, err)
+ return
+ }
+ for _, entry := range entries {
+ if entry.IsDir() && entry.Name() != version.Version {
+ oldVersionPath := filepath.Join(binDir.String(), entry.Name())
+ if err := os.RemoveAll(oldVersionPath); err != nil {
+ log.Warn().Msgf("failed to remove old version directory %s: %v", oldVersionPath, err)
+ }
+ }
+ }
+ return
+}
+
+func downloadFile(url, dest string) error {
+ // Download the file to a temporary destination
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to download %s: %s", url, resp.Status)
+ }
+
+ tmpDest := dest + ".tmp"
+ out, err := os.OpenFile(tmpDest, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, resp.Body)
+ if err != nil {
+ return err
+ }
+ out.Close()
+
+ // Download the checksum
+ sha256url := url + ".sha256"
+ resp, err = http.Get(sha256url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to download %s: %s", sha256url, resp.Status)
+ }
+ hash, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ // Validate the checksum
+ if err := validateHash(tmpDest, string(hash)); err != nil {
+ return err
+ }
+
+ // Move the file
+ if err := os.Rename(tmpDest, dest); err != nil {
+ return err
+ }
+ return nil
+}
+
+func validateHash(file, hash string) error {
+ f, err := os.Open(file)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ h := sha256.New()
+ if _, err := io.Copy(h, f); err != nil {
+ return err
+ }
+ if fileHash := hex.EncodeToString(h.Sum(nil)); hash != fileHash {
+ return fmt.Errorf("file checksum failed. Expected %s, got %s", hash, fileHash)
+ }
+ return nil
+}
diff --git a/cli/daemon/export/export.go b/cli/daemon/export/export.go
new file mode 100644
index 0000000000..9be8a4b476
--- /dev/null
+++ b/cli/daemon/export/export.go
@@ -0,0 +1,311 @@
+package export
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/daemon"
+ "github.com/google/go-containerregistry/pkg/v1/empty"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+ "github.com/rs/zerolog"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/internal/env"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/dockerbuild"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/vcs"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// Docker exports the app as a docker image.
+func Docker(ctx context.Context, app *apps.Instance, req *daemonpb.ExportRequest, log zerolog.Logger) (success bool, err error) {
+ params := req.GetDocker()
+ if params == nil {
+ return false, errors.Newf("unsupported format: %T", req.Format)
+ }
+
+ expSet, err := app.Experiments(req.Environ)
+ if err != nil {
+ return false, errors.Wrap(err, "get experimental features")
+ }
+
+ vcsRevision := vcs.GetRevision(app.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: []string{"timetzdata"},
+ CgoEnabled: req.CgoEnabled,
+ StaticLink: true,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: req.Environ,
+ GOOS: req.Goos,
+ GOARCH: req.Goarch,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+ appLang := app.Lang()
+ bld := builderimpl.Resolve(appLang, expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
+ if err != nil {
+ return false, err
+ }
+ if err := app.CacheMetadata(parse.Meta); err != nil {
+ log.Info().Err(err).Msg("failed to cache metadata")
+ return false, errors.Wrap(err, "cache metadata")
+ }
+
+ // Validate the service configs.
+ _, err = bld.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ // Dummy data to satisfy config validation.
+ APIBaseURL: "http://localhost:0",
+ EnvName: "encore-eject",
+ EnvType: cueutil.EnvType_Development,
+ CloudType: cueutil.CloudType_Local,
+ },
+ })
+ if err != nil {
+ return false, err
+ }
+
+ log.Info().Msgf("compiling Encore application for %s/%s", req.Goos, req.Goarch)
+ result, err := bld.Compile(ctx, builder.CompileParams{
+ Build: buildInfo,
+ App: app,
+ Parse: parse,
+ OpTracker: nil, // TODO
+ Experiments: expSet,
+ WorkingDir: ".",
+ })
+
+ if err != nil {
+ log.Info().Err(err).Msg("compilation failed")
+ return false, errors.Wrap(err, "compilation failed")
+ }
+
+ var crossNodeRuntime option.Option[dockerbuild.HostPath]
+ if appLang == appfile.LangTS && buildInfo.IsCrossBuild() {
+ binary, err := downloadBinary(req.Goos, req.Goarch, "encore-runtime.node", log)
+ if err != nil {
+ return false, errors.Wrap(err, "download runtime binaries")
+ }
+ crossNodeRuntime = option.Some(binary)
+ }
+
+ buildSettings, err := app.BuildSettings()
+ if err != nil {
+ return false, errors.Wrap(err, "get build settings")
+ }
+
+ describeCfg := dockerbuild.DescribeConfig{
+ Meta: parse.Meta,
+ Compile: result,
+ BundleSource: option.Option[dockerbuild.BundleSourceSpec]{},
+ DockerBaseImage: option.AsOptional(params.BaseImageTag),
+ Runtimes: dockerbuild.HostPath(env.EncoreRuntimesPath()),
+ NodeRuntime: crossNodeRuntime,
+ ProcessPerService: buildSettings.Docker.ProcessPerService,
+ }
+
+ if buildSettings.Docker.BundleSource || appLang == appfile.LangTS {
+ workspaceRoot := req.WorkspaceRoot
+ appRoot := app.Root()
+
+ relPath, err := filepath.Rel(workspaceRoot, appRoot)
+ if err != nil {
+ return false, errors.Wrap(err, "relative path from workspace root to app root")
+ }
+
+ relPath = filepath.ToSlash(relPath)
+
+ includedPaths, err := dockerbuild.DetermineIncludes(appLang, buildSettings.Docker.BundleSource, workspaceRoot, appRoot)
+ if err != nil {
+ return false, errors.Wrap(err, "determine extra includes")
+ }
+
+ imageAppRoot := dockerbuild.ImagePath("/workspace").Join(relPath)
+
+ describeCfg.BundleSource = option.Some(dockerbuild.BundleSourceSpec{
+ Source: dockerbuild.HostPath(workspaceRoot),
+ Dest: "/workspace",
+ AppRootRelpath: dockerbuild.RelPath(relPath),
+ IncludeSource: includedPaths,
+ ExcludeSource: []dockerbuild.RelPath{
+ ".git",
+ },
+ })
+
+ if describeCfg.WorkingDir.Empty() {
+ // Set the working directory to app root by default.
+ describeCfg.WorkingDir = option.Some(imageAppRoot)
+ }
+ }
+
+ spec, err := dockerbuild.Describe(describeCfg)
+ if err != nil {
+ return false, errors.Wrap(err, "describe docker image")
+ }
+
+ cors, err := app.GlobalCORS()
+ if err != nil {
+ return false, errors.Wrap(err, "get global CORS")
+ }
+ var logResponse string
+ if !req.SkipInfraConf {
+ cfg, infraCfgOutput, err := buildAndValidateInfraConfig(EmbeddedInfraConfigParams{
+ File: dockerbuild.HostPath(req.InfraConfPath),
+ Services: req.Services,
+ Gateways: req.Gateways,
+ GlobalCORS: cors,
+ Meta: parse.Meta,
+ })
+ logResponse = infraCfgOutput
+ if err != nil {
+ return false, errors.Wrap(err, "build infra config")
+ }
+ data, err := json.Marshal(cfg)
+ if err != nil {
+ return false, errors.Wrap(err, "marshal infra config")
+ }
+ spec.WriteFiles[defaultInfraConfigPath] = data
+ spec.Env = append(spec.Env, fmt.Sprintf("ENCORE_INFRA_CONFIG_PATH=%s", defaultInfraConfigPath))
+ }
+ var baseImgOverride option.Option[v1.Image]
+ if params.BaseImageTag != "" {
+ baseImg, err := resolveBaseImage(ctx, log, params, spec)
+ if err != nil {
+ return false, errors.Wrap(err, "resolve base image")
+ }
+ baseImgOverride = option.Some(baseImg)
+ }
+
+ var supervisorPath option.Option[dockerbuild.HostPath]
+ if spec.Supervisor.Present() {
+ binary, err := downloadBinary(req.Goos, req.Goarch, "supervisor-encore", log)
+ if err != nil {
+ return false, errors.Wrap(err, "download supervisor binaries")
+ }
+ supervisorPath = option.Some(binary)
+ }
+ img, err := dockerbuild.BuildImage(ctx, spec, dockerbuild.ImageBuildConfig{
+ BuildTime: time.Now(),
+ BaseImageOverride: baseImgOverride,
+ AddCACerts: option.Some[dockerbuild.ImagePath](""),
+ SupervisorPath: supervisorPath,
+ })
+ if err != nil {
+ return false, errors.Wrap(err, "build docker image")
+ }
+
+ if params.LocalDaemonTag != "" {
+ tag, err := name.NewTag(params.LocalDaemonTag, name.WeakValidation)
+ if err != nil {
+ log.Error().Err(err).Msg("invalid image tag")
+ return false, nil
+ }
+ log.Info().Msg("saving image to local docker daemon")
+
+ _, err = daemon.Write(tag, img, daemon.WithUnbufferedOpener())
+ if err != nil {
+ log.Error().Err(err).Msg("unable to save docker image")
+ return false, nil
+ }
+ log.Info().Msg("successfully saved local docker image")
+ }
+
+ if params.PushDestinationTag != "" {
+ tag, err := name.NewTag(params.PushDestinationTag, name.WeakValidation)
+ if err != nil {
+ log.Error().Err(err).Msg("invalid image tag")
+ return false, nil
+ }
+ log.Info().Msg("pushing image to docker registry")
+ if err := pushDockerImage(ctx, log, img, tag); err != nil {
+ log.Error().Err(err).Msg("unable to push docker image")
+ return false, nil
+ }
+ }
+
+ log.Info().Msgf("successfully exported app as docker image\n%s", logResponse)
+ return true, nil
+}
+
+func resolveBaseImage(ctx context.Context, log zerolog.Logger, p *daemonpb.DockerExportParams, spec *dockerbuild.ImageSpec) (v1.Image, error) {
+ baseImgTag := p.BaseImageTag
+ if baseImgTag == "" || baseImgTag == "scratch" {
+ return empty.Image, nil
+ }
+
+ // Try to get it from the daemon if it exists.
+ log.Info().Msgf("resolving base image %s", baseImgTag)
+ baseImgRef, err := name.ParseReference(baseImgTag)
+ if err != nil {
+ return nil, errors.Wrap(err, "parse base image")
+ }
+
+ fetchRemote := true
+ img, err := daemon.Image(baseImgRef, daemon.WithUnbufferedOpener())
+ if err == nil {
+ file, err := img.ConfigFile()
+ if err == nil {
+ fetchRemote = file.OS != spec.OS || file.Architecture != spec.Arch
+ }
+ }
+ if fetchRemote {
+ log.Info().Msg("could not get image from local daemon, fetching it remotely")
+ keychain := authn.DefaultKeychain
+ img, err = remote.Image(baseImgRef, remote.WithAuthFromKeychain(keychain), remote.WithContext(ctx), remote.WithPlatform(v1.Platform{
+ OS: spec.OS,
+ Architecture: spec.Arch,
+ }))
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to fetch image")
+ }
+ // If the user requested to push the image locally, save the remote image locally as well.
+ if p.LocalDaemonTag != "" {
+ if tag, err := name.NewTag(baseImgTag, name.WeakValidation); err == nil {
+ log.Info().Msgf("saving remote image %s to local docker daemon", baseImgTag)
+ if _, err = daemon.Write(tag, img); err != nil {
+ log.Warn().Err(err).Msg("unable to save remote image to local docker daemon, skipping")
+ } else {
+ log.Info().Msgf("saved remote image to local docker daemon")
+ }
+ }
+ }
+ }
+
+ return img, nil
+}
+
+func pushDockerImage(ctx context.Context, log zerolog.Logger, img v1.Image, destination name.Tag) error {
+ log.Info().Msg("pushing docker image to container registry")
+ keychain := authn.DefaultKeychain
+ if err := remote.Write(destination, img, remote.WithAuthFromKeychain(keychain), remote.WithContext(ctx)); err != nil {
+ return errors.WithStack(err)
+ }
+ log.Info().Msg("successfully pushed docker image")
+ return nil
+}
diff --git a/cli/daemon/export/infra_config.go b/cli/daemon/export/infra_config.go
new file mode 100644
index 0000000000..238adc3945
--- /dev/null
+++ b/cli/daemon/export/infra_config.go
@@ -0,0 +1,492 @@
+package export
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "slices"
+ "strings"
+
+ "github.com/cockroachdb/errors"
+ "github.com/logrusorgru/aurora"
+ "github.com/tailscale/hujson"
+ "golang.org/x/exp/maps"
+
+ "encore.dev/appruntime/exported/config/infra"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/dockerbuild"
+ "encr.dev/pkg/fns"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+var (
+ LEARN_MORE = aurora.Italic("Learn More: https://encore.dev/docs/how-to/self-host").String()
+)
+
+// defaultInfraConfigPath is the path in the image where the environment configuration is mounted.
+const defaultInfraConfigPath dockerbuild.ImagePath = "/encore/infra.config.json"
+
+type EmbeddedInfraConfigParams struct {
+ // The path to the infra config file.
+ File dockerbuild.HostPath
+
+ // Services to include in the image.
+ Services []string
+
+ // Gateways to include in the image.
+ Gateways []string
+
+ // CORS config to include in the image.
+ GlobalCORS appfile.CORS
+
+ Meta *meta.Data
+}
+
+func buildAndValidateInfraConfig(params EmbeddedInfraConfigParams) (*infra.InfraConfig, string, error) {
+ missing := map[string][]string{}
+ md := params.Meta
+ services := params.Services
+ gateways := params.Gateways
+ if len(services)+len(gateways) == 0 {
+ services = fns.Map(md.Svcs, (*meta.Service).GetName)
+ gateways = fns.Map(md.Gateways, (*meta.Gateway).GetEncoreName)
+ }
+
+ unknownServices := fns.Filter(services, func(s string) bool {
+ return !fns.Any(md.Svcs, func(svc *meta.Service) bool {
+ return svc.Name == s
+ })
+ })
+ if len(unknownServices) > 0 {
+ return nil, "", errors.Newf("unknown services: %v", unknownServices)
+ }
+
+ unknownGateways := fns.Filter(gateways, func(s string) bool {
+ return !fns.Any(md.Gateways, func(gw *meta.Gateway) bool {
+ return gw.EncoreName == s
+ })
+ })
+ if len(unknownGateways) > 0 {
+ return nil, "", errors.Newf("unknown gateways: %v", unknownGateways)
+ }
+
+ var infraCfg infra.InfraConfig
+ if params.File != "" {
+ data, err := os.ReadFile(params.File.String())
+ if err != nil {
+ return nil, "", errors.Wrap(err, "infra config not found")
+ }
+ data, err = hujson.Standardize(data)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "could not standardize infra config")
+ }
+ err = json.Unmarshal(data, &infraCfg)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "could not decode infra config")
+ }
+ }
+ infraCfg.HostedGateways = gateways
+ infraCfg.HostedServices = services
+ envVars, validationErrors := infra.Validate(&infraCfg)
+
+ hostedSvcs := fns.ToMap(fns.Filter(md.Svcs, func(svc *meta.Service) bool {
+ return fns.Any(services, func(s string) bool {
+ return svc.Name == s
+ })
+ }), (*meta.Service).GetName)
+
+ var secrets []string
+ // Find all service dependencies for our hosted services.
+ var svcDeps = map[string]struct{}{}
+ pkgs := fns.ToMap(md.Pkgs, (*meta.Package).GetRelPath)
+
+ // Add dependencies for all outbound RPCs for our hosted services
+ // and collect all required secrets.
+ for _, p := range md.Pkgs {
+ if p.ServiceName == "" {
+ secrets = append(secrets, p.Secrets...)
+ continue
+ } else if _, ok := hostedSvcs[p.ServiceName]; !ok {
+ continue
+ }
+ secrets = append(secrets, p.Secrets...)
+ for _, r := range p.RpcCalls {
+ svcDeps[pkgs[r.Pkg].ServiceName] = struct{}{}
+ }
+ }
+
+ // Add auth handler to service discovery if we host any auth RPCs.
+ if md.AuthHandler != nil {
+ requiresAuth := fns.Any(md.Svcs, func(svc *meta.Service) bool {
+ return fns.Any(svc.Rpcs, func(rpc *meta.RPC) bool {
+ return rpc.AccessType == meta.RPC_AUTH
+ })
+ })
+ if requiresAuth {
+ svcDeps[md.AuthHandler.ServiceName] = struct{}{}
+ }
+ }
+
+ // Make sure we have service discovery for all services that are not private
+ // if we are hosting gateways.
+ if len(gateways) > 0 {
+ for _, svc := range md.Svcs {
+ if _, ok := hostedSvcs[svc.Name]; ok {
+ continue
+ }
+ for _, rpc := range svc.Rpcs {
+ if rpc.AccessType != meta.RPC_PRIVATE {
+ svcDeps[svc.Name] = struct{}{}
+ break
+ }
+ }
+ }
+ }
+
+ // Remove any services that we host from our service dependencies.
+ for _, svc := range hostedSvcs {
+ delete(svcDeps, svc.Name)
+ }
+
+ // Remove any service discovery entries for services that we don't host.
+ for svc := range infraCfg.ServiceDiscovery {
+ if _, ok := svcDeps[svc]; !ok {
+ delete(infraCfg.ServiceDiscovery, svc)
+ } else {
+ delete(svcDeps, svc)
+ }
+ }
+
+ // Make sure all our service dependencies are accounted for.
+ if len(svcDeps) > 0 {
+ missing["Service Discovery"] = maps.Keys(svcDeps)
+ }
+
+ // Remove secrets we don't need for our hosted services.
+ slices.Sort(secrets)
+ secrets = slices.Compact(secrets)
+ var ok bool
+ if infraCfg.Secrets.EnvRef == nil {
+ for secret := range infraCfg.Secrets.SecretsMap {
+ secrets, ok = fns.Delete(secrets, secret)
+ if !ok {
+ delete(infraCfg.Secrets.SecretsMap, secret)
+ }
+ }
+
+ // Make sure all our secrets are accounted for.
+ if len(secrets) > 0 {
+ missing["Secrets"] = secrets
+ }
+ } else {
+ // Print that you need to define a secrets map in the infra config.
+ }
+
+ // Find all databases for our hosted services.
+ databases := fns.FlatMap(maps.Values(hostedSvcs), func(db *meta.Service) []string {
+ return db.Databases
+ })
+ slices.Sort(databases)
+ databases = slices.Compact(databases)
+
+ for i, sqlServer := range append([]*infra.SQLServer{}, infraCfg.SQLServers...) {
+ for name := range sqlServer.Databases {
+ databases, ok = fns.Delete(databases, name)
+ if !ok {
+ delete(sqlServer.Databases, name)
+ }
+ }
+ if len(sqlServer.Databases) == 0 {
+ infraCfg.SQLServers = append(infraCfg.SQLServers[:i], infraCfg.SQLServers[i+1:]...)
+ }
+ }
+
+ if len(databases) > 0 {
+ missing["Databases"] = databases
+ }
+
+ caches := fns.MapAndFilter(md.CacheClusters, func(cache *meta.CacheCluster) (string, bool) {
+ return cache.Name, fns.Any(cache.Keyspaces, func(ks *meta.CacheCluster_Keyspace) bool {
+ return fns.Any(services, func(s string) bool {
+ return ks.Service == s
+ })
+ })
+ })
+
+ for name := range infraCfg.Redis {
+ caches, ok = fns.Delete(caches, name)
+ if !ok {
+ delete(infraCfg.Redis, name)
+ }
+ }
+
+ if len(caches) > 0 {
+ missing["Redis"] = caches
+ }
+
+ subscriptions := fns.FlatMap(md.PubsubTopics, func(topic *meta.PubSubTopic) [][2]string {
+ return fns.MapAndFilter(topic.Subscriptions, func(s *meta.PubSubTopic_Subscription) ([2]string, bool) {
+ return [2]string{topic.Name, s.Name}, fns.Any(services, func(svc string) bool {
+ return s.ServiceName == svc
+ })
+ })
+ })
+
+ for _, pubsub := range infraCfg.PubSub {
+ for topicName, topic := range pubsub.GetTopics() {
+ for subName := range topic.GetSubscriptions() {
+ found := false
+ for i, sub := range subscriptions {
+ if sub[0] == topicName && sub[1] == subName {
+ subscriptions = append(subscriptions[:i], subscriptions[i+1:]...)
+ found = true
+ break
+ }
+ }
+ if !found {
+ topic.DeleteSubscription(subName)
+ }
+ }
+ }
+ }
+
+ if len(subscriptions) > 0 {
+ missing["Subscriptions"] = fns.Map(subscriptions, func(sub [2]string) string {
+ return sub[0] + "/" + sub[1]
+ })
+ }
+
+ topics := fns.MapAndFilter(md.PubsubTopics, func(topic *meta.PubSubTopic) (string, bool) {
+ return topic.Name, fns.Any(topic.Publishers, func(p *meta.PubSubTopic_Publisher) bool {
+ return fns.Any(services, func(s string) bool {
+ return p.ServiceName == s
+ })
+ })
+ })
+
+ for i, pubsub := range infraCfg.PubSub {
+ for topicName, topic := range pubsub.GetTopics() {
+ i := slices.Index(topics, topicName)
+ if i != -1 {
+ topics = append(topics[:i], topics[i+1:]...)
+ } else if len(topic.GetSubscriptions()) == 0 {
+ pubsub.DeleteTopic(topicName)
+ }
+ }
+ if len(pubsub.GetTopics()) == 0 {
+ infraCfg.PubSub = append(infraCfg.PubSub[:i], infraCfg.PubSub[i+1:]...)
+ }
+ }
+ if len(topics) > 0 {
+ missing["Topics"] = topics
+ }
+
+ // Validate bucket config
+ buckets := fns.FlatMap(maps.Values(hostedSvcs), func(svc *meta.Service) []string {
+ return fns.Map(svc.Buckets, (*meta.BucketUsage).GetBucket)
+ })
+ slices.Sort(buckets)
+ buckets = slices.Compact(buckets)
+
+ for _, storage := range infraCfg.ObjectStorage {
+ for name, infraCfg := range storage.GetBuckets() {
+ metaBkt, ok := fns.Find(md.Buckets, func(b *meta.Bucket) bool {
+ return b.Name == name
+ })
+ if ok {
+ if metaBkt.Public && infraCfg.PublicBaseURL == "" {
+ path := infra.JSONPath("buckets").Append(infra.JSONPath(name)).Append("public_base_url")
+ validationErrors[path] = errors.New("Bucket is public but no public base URL is set")
+ return nil, "", configError(missing, validationErrors)
+ }
+ }
+
+ buckets, ok = fns.Delete(buckets, name)
+ if !ok {
+ storage.DeleteBucket(name)
+ }
+ }
+ }
+ if len(buckets) > 0 {
+ missing["Buckets"] = buckets
+ }
+
+ // Copy CORS config
+ cors := infra.CORS(params.GlobalCORS)
+ infraCfg.CORS = &cors
+
+ if len(missing) > 0 || len(validationErrors) > 0 {
+ return nil, "", configError(missing, validationErrors)
+ }
+
+ cronJobStr, err := formatCronJobInstructions(services, md)
+ if err != nil {
+ return nil, "", err
+ }
+ envStr := formatEnvVars(envVars)
+ var resp strings.Builder
+ if len(cronJobStr)+len(envStr) > 0 {
+ resp.WriteString(aurora.Bold("Before you deploy, you may need to configure the following:\n").String())
+ resp.WriteString(cronJobStr)
+ resp.WriteString(envStr)
+ }
+ resp.WriteString(LEARN_MORE)
+
+ return &infraCfg, resp.String(), nil
+}
+
+func formatCronJobInstructions(services []string, md *meta.Data) (string, error) {
+ if len(md.CronJobs) == 0 {
+ return "", nil
+ }
+ svcByRelPath := fns.ToMap(md.Svcs, func(p *meta.Service) string {
+ return p.RelPath
+ })
+ cronsTable := [][]string{
+ {"ID", "Endpoint Path", "Schedule"},
+ }
+ for _, cronJob := range md.CronJobs {
+ svc, ok := svcByRelPath[cronJob.Endpoint.Pkg]
+ if !ok {
+ return "", errors.Newf("could not find service for cron job %s", cronJob.Id)
+ }
+ if !slices.Contains(services, svc.Name) {
+ continue
+ }
+ rpc, ok := fns.Find(svc.Rpcs, func(r *meta.RPC) bool {
+ return r.Name == cronJob.Endpoint.Name
+ })
+ if !ok {
+ return "", errors.Newf("could not find rpc for cron job %s", cronJob.Id)
+ }
+ cronsTable = append(cronsTable, []string{cronJob.Id, pathToString(rpc.Path), cronJob.Schedule})
+ }
+ if len(cronsTable) == 1 {
+ return "", nil
+ }
+
+ return aurora.Sprintf("\n%s\n%s\n", aurora.Bold("Cron Jobs:"), generateTable(cronsTable)), nil
+}
+
+func generateTable(rows [][]string) string {
+ au := aurora.NewAurora(true)
+ var sb strings.Builder
+
+ // Calculate column widths
+ colWidths := make([]int, len(rows[0]))
+ for _, row := range rows {
+ for i, cell := range row {
+ colWidths[i] = max(colWidths[i], len(cell))
+ }
+ }
+
+ // Helper function to create a horizontal line
+ createLine := func() string {
+ line := "+"
+ for _, width := range colWidths {
+ line += strings.Repeat("-", width+2) + "+"
+ }
+ return line + "\n"
+ }
+
+ // Write top border
+ sb.WriteString(au.Cyan(createLine()).String())
+
+ // Write header
+ sb.WriteString(au.Cyan("| ").String())
+ for i, header := range rows[0] {
+ sb.WriteString(au.Bold(fmt.Sprintf("%-*s", colWidths[i], header)).String())
+ sb.WriteString(au.Cyan(" | ").String())
+ }
+ sb.WriteString("\n")
+
+ // Write header-content separator
+ sb.WriteString(au.Cyan(createLine()).String())
+
+ // Write content rows
+ for _, row := range rows[1:] {
+ sb.WriteString(au.Cyan("| ").String())
+ for i, cell := range row {
+ sb.WriteString(fmt.Sprintf("%-*s", colWidths[i], cell))
+ sb.WriteString(au.Cyan(" | ").String())
+ }
+ sb.WriteString("\n")
+ }
+
+ // Write bottom border
+ sb.WriteString(au.Cyan(createLine()).String())
+
+ return sb.String()
+}
+
+func pathToString(path *meta.Path) string {
+ b := strings.Builder{}
+ for _, s := range path.Segments {
+ b.WriteByte('/')
+ switch s.Type {
+ case meta.PathSegment_PARAM:
+ b.WriteByte(':')
+ case meta.PathSegment_WILDCARD:
+ b.WriteByte('*')
+ case meta.PathSegment_FALLBACK:
+ b.WriteByte('!')
+ }
+ b.WriteString(s.Value)
+ }
+ return b.String()
+
+}
+
+func formatEnvVars(envVars map[infra.JSONPath]infra.EnvDesc) string {
+ if len(envVars) == 0 {
+ return ""
+ }
+
+ envByName := map[string]infra.EnvDesc{}
+ for _, envVar := range envVars {
+ envByName[envVar.Name] = envVar
+ }
+ envTable := [][]string{
+ {"Name", "Description"},
+ }
+ for _, envVar := range envByName {
+ envTable = append(envTable, []string{envVar.Name, envVar.Description})
+ }
+ return aurora.Sprintf("%s\n%s\n", aurora.Bold("Environment Variables:"), generateTable(envTable))
+}
+
+func configError(missing map[string][]string, validation map[infra.JSONPath]error) error {
+ au := aurora.NewAurora(true)
+ var errorMsg strings.Builder
+
+ errorMsg.WriteString("\n")
+ errorMsg.WriteString(au.Red("\nYour infra configuration is incomplete\n").String())
+ errorMsg.WriteString("\n")
+
+ if len(missing) > 0 {
+ errorMsg.WriteString(au.Red("Missing Resource Configurations:\n").String())
+ maxTypeLen := 0
+ for dataType := range missing {
+ if len(dataType) > maxTypeLen {
+ maxTypeLen = len(dataType)
+ }
+ }
+
+ for dataType, values := range missing {
+ paddedType := fmt.Sprintf("%-*s", maxTypeLen, dataType)
+ errorMsg.WriteString(fmt.Sprintf(" %s: %s\n",
+ au.Bold(paddedType),
+ strings.Join(values, ", ")))
+ }
+ errorMsg.WriteString(" \n ")
+ }
+ if len(validation) > 0 {
+ errorMsg.WriteString(au.Red("Validation Errors:\n").String())
+ for dataType, err := range validation {
+ errorMsg.WriteString(fmt.Sprintf(" %s: %s\n", au.Bold(dataType), err.Error()))
+ }
+ errorMsg.WriteString(" \n ")
+ }
+ errorMsg.WriteString(LEARN_MORE)
+ return errors.Newf(errorMsg.String())
+}
diff --git a/cli/daemon/internal/appfile/appfile.go b/cli/daemon/internal/appfile/appfile.go
deleted file mode 100644
index 786dda0eca..0000000000
--- a/cli/daemon/internal/appfile/appfile.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Package appfile reads and writes encore.app files.
-package appfile
-
-import (
- "errors"
- "fmt"
- "io/fs"
- "os"
- "path/filepath"
-
- "github.com/tailscale/hujson"
-)
-
-// Name is the name of the Encore app file.
-// It is expected to be located in the root of the Encore app
-// (which is usually the Git repository root).
-const Name = "encore.app"
-
-// File is a parsed encore.app file.
-type File struct {
- // ID is the encore.dev app id for the app.
- // It is empty if the app is not linked to encore.dev.
- ID string `json:"id"` // can be empty
-}
-
-// Parse parses the app file data into a File.
-func Parse(data []byte) (*File, error) {
- var f File
- if err := hujson.Unmarshal(data, &f); err != nil {
- return nil, fmt.Errorf("appfile.Parse: %v", err)
- }
- return &f, nil
-}
-
-// ParseFile parses the app file located at path.
-func ParseFile(path string) (*File, error) {
- data, err := os.ReadFile(path)
- if errors.Is(err, fs.ErrNotExist) {
- return &File{}, nil
- } else if err != nil {
- return nil, fmt.Errorf("appfile.ParseFile: %w", err)
- }
- return Parse(data)
-}
-
-// Slug parses the app slug for the encore.app file located at path.
-// The slug can be empty if the app is not linked to encore.dev.
-func Slug(appRoot string) (string, error) {
- f, err := ParseFile(filepath.Join(appRoot, Name))
- if err != nil {
- return "", err
- }
- return f.ID, nil
-}
diff --git a/cli/daemon/internal/manifest/manifest.go b/cli/daemon/internal/manifest/manifest.go
deleted file mode 100644
index 623bc4533b..0000000000
--- a/cli/daemon/internal/manifest/manifest.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Package manifest reads and writes Encore app manifests.
-package manifest
-
-import (
- "crypto/rand"
- "encoding/base32"
- "encoding/json"
- "errors"
- "fmt"
- "io/fs"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "encr.dev/cli/daemon/internal/appfile"
-)
-
-// Manifest represents the persisted manifest for
-// an Encore application. It is not intended to be committed to
-// source control.
-type Manifest struct {
- // AppID is a unique identifier for the app.
- // It uses the encore.dev app slug if the app
- // is linked, and is otherwise a randomly generated id.
- AppID string `json:"appID"`
-}
-
-// ReadOrCreate reads the manifest for the app rooted at appRoot.
-// If it doesn't exist it creates it first.
-func ReadOrCreate(appRoot string) (mf *Manifest, err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("read/create manifest: %v", err)
- }
- }()
-
- var man Manifest
-
- // Use the existing manifest if we have one.
- cfgPath := filepath.Join(appRoot, ".encore", "manifest.json")
- if data, err := os.ReadFile(cfgPath); err != nil && !errors.Is(err, fs.ErrNotExist) {
- return nil, err
- } else if err == nil {
- err = json.Unmarshal(data, &man)
- if err != nil {
- return nil, err
- } else if man.AppID != "" {
- return &Manifest{AppID: man.AppID}, nil
- }
- }
-
- // Otherwise create it. Default to the App ID in the encore.app file,
- // and fall back to randomly generating an ID if the app is not linked.
- if f, err := appfile.ParseFile(filepath.Join(appRoot, appfile.Name)); err == nil && f.ID != "" {
- man.AppID = f.ID
- }
- if man.AppID == "" {
- id, err := genID()
- if err != nil {
- return nil, err
- }
- man.AppID = id
- }
-
- // Write it back.
- out, _ := json.Marshal(&man)
- if err := os.MkdirAll(filepath.Dir(cfgPath), 0755); err != nil {
- return nil, err
- } else if err := ioutil.WriteFile(cfgPath, out, 0644); err != nil {
- return nil, err
- }
- return &man, nil
-}
-
-const encodeStr = "23456789abcdefghikmnopqrstuvwxyz"
-
-var encoding = base32.NewEncoding(encodeStr).WithPadding(base32.NoPadding)
-
-// genID generates a
-func genID() (string, error) {
- var data [3]byte
- if _, err := rand.Read(data[:]); err != nil {
- return "", err
- }
- return encoding.EncodeToString(data[:]), nil
-}
diff --git a/cli/daemon/internal/runlog/runlog.go b/cli/daemon/internal/runlog/runlog.go
index 866cefaf5c..be1432bb74 100644
--- a/cli/daemon/internal/runlog/runlog.go
+++ b/cli/daemon/internal/runlog/runlog.go
@@ -6,14 +6,14 @@ import (
)
type Log interface {
- Stdout() io.Writer
- Stderr() io.Writer
+ Stdout(buffered bool) io.Writer
+ Stderr(buffered bool) io.Writer
}
type oslog struct{}
-func (oslog) Stdout() io.Writer { return os.Stdout }
-func (oslog) Stderr() io.Writer { return os.Stderr }
+func (oslog) Stdout(buffered bool) io.Writer { return os.Stdout }
+func (oslog) Stderr(buffered bool) io.Writer { return os.Stderr }
func OS() Log {
return oslog{}
diff --git a/cli/daemon/internal/sym/sym.go b/cli/daemon/internal/sym/sym.go
new file mode 100644
index 0000000000..f21653a958
--- /dev/null
+++ b/cli/daemon/internal/sym/sym.go
@@ -0,0 +1,22 @@
+// Package sym parses symbol tables from Go binaries.
+package sym
+
+import (
+ "fmt"
+ "io"
+
+ "encr.dev/cli/internal/gosym"
+)
+
+type Table struct {
+ *gosym.Table
+ BaseOffset uint64
+}
+
+func Load(r io.ReaderAt) (*Table, error) {
+ tbl, err := load(r)
+ if err != nil {
+ return nil, fmt.Errorf("sym.Load: %v", err)
+ }
+ return tbl, nil
+}
diff --git a/cli/daemon/internal/sym/sym_darwin.go b/cli/daemon/internal/sym/sym_darwin.go
new file mode 100644
index 0000000000..4bf7c2c0c6
--- /dev/null
+++ b/cli/daemon/internal/sym/sym_darwin.go
@@ -0,0 +1,48 @@
+package sym
+
+import (
+ "debug/macho"
+ "fmt"
+ "io"
+
+ "encr.dev/cli/internal/gosym"
+)
+
+func load(r io.ReaderAt) (*Table, error) {
+ exe, err := macho.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+ defer exe.Close()
+
+ text := exe.Section("__text")
+ if text == nil {
+ return nil, fmt.Errorf("cannot find __text section")
+ }
+ textAddr := text.Addr
+
+ pctbl := exe.Section("__gopclntab")
+ if pctbl == nil {
+ return nil, fmt.Errorf("cannot find __gopclntab section")
+ }
+ pctblData, err := pctbl.Data()
+ if err != nil {
+ return nil, fmt.Errorf("cannot read __gopclntab: %v", err)
+ }
+
+ symtab := exe.Section("__gosymtab")
+ if symtab == nil {
+ return nil, fmt.Errorf("cannot find __gosymtab section")
+ }
+ symtabData, err := symtab.Data()
+ if err != nil {
+ return nil, fmt.Errorf("cannot read __gosymtab: %v", err)
+ }
+
+ lntbl := gosym.NewLineTable(pctblData, textAddr)
+ tbl, err := gosym.NewTable(symtabData, lntbl)
+ if err != nil {
+ return nil, err
+ }
+ return &Table{Table: tbl, BaseOffset: textAddr}, nil
+}
diff --git a/cli/daemon/internal/sym/sym_elf.go b/cli/daemon/internal/sym/sym_elf.go
new file mode 100644
index 0000000000..082605ee5f
--- /dev/null
+++ b/cli/daemon/internal/sym/sym_elf.go
@@ -0,0 +1,51 @@
+//go:build !windows && !darwin
+// +build !windows,!darwin
+
+package sym
+
+import (
+ "debug/elf"
+ "fmt"
+ "io"
+
+ "encr.dev/cli/internal/gosym"
+)
+
+func load(r io.ReaderAt) (*Table, error) {
+ exe, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+ defer exe.Close()
+
+ text := exe.Section(".text")
+ if text == nil {
+ return nil, fmt.Errorf("cannot find .text section")
+ }
+ textAddr := text.Addr
+
+ pctbl := exe.Section(".gopclntab")
+ if pctbl == nil {
+ return nil, fmt.Errorf("cannot find .gopclntab section")
+ }
+ pctblData, err := pctbl.Data()
+ if err != nil {
+ return nil, fmt.Errorf("cannot read .gopclntab: %v", err)
+ }
+
+ symtab := exe.Section(".gosymtab")
+ if symtab == nil {
+ return nil, fmt.Errorf("cannot find .gosymtab section")
+ }
+ symtabData, err := symtab.Data()
+ if err != nil {
+ return nil, fmt.Errorf("cannot read .gosymtab: %v", err)
+ }
+
+ lntbl := gosym.NewLineTable(pctblData, textAddr)
+ tbl, err := gosym.NewTable(symtabData, lntbl)
+ if err != nil {
+ return nil, err
+ }
+ return &Table{Table: tbl, BaseOffset: textAddr}, nil
+}
diff --git a/cli/daemon/internal/sym/sym_windows.go b/cli/daemon/internal/sym/sym_windows.go
new file mode 100644
index 0000000000..487c9c122b
--- /dev/null
+++ b/cli/daemon/internal/sym/sym_windows.go
@@ -0,0 +1,83 @@
+package sym
+
+import (
+ "debug/pe"
+ "fmt"
+ "io"
+
+ "encr.dev/cli/internal/gosym"
+)
+
+// This code is a simplified version of $GOROOT/src/cmd/internal/objfile/pe.go.
+
+func load(r io.ReaderAt) (*Table, error) {
+ exe, err := pe.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+ defer exe.Close()
+
+ var imageBase, textStart uint64
+ switch oh := exe.OptionalHeader.(type) {
+ case *pe.OptionalHeader32:
+ imageBase = uint64(oh.ImageBase)
+ case *pe.OptionalHeader64:
+ imageBase = oh.ImageBase
+ default:
+ return nil, fmt.Errorf("pe file format not recognized")
+ }
+ if sect := exe.Section(".text"); sect != nil {
+ textStart = imageBase + uint64(sect.VirtualAddress)
+ }
+ pclntab, err := loadPETable(exe, "runtime.pclntab", "runtime.epclntab")
+ if err != nil {
+ return nil, err
+ }
+ symtab, err := loadPETable(exe, "runtime.symtab", "runtime.esymtab")
+ if err != nil {
+ return nil, err
+ }
+
+ lntbl := gosym.NewLineTable(pclntab, textStart)
+ tbl, err := gosym.NewTable(symtab, lntbl)
+ if err != nil {
+ return nil, err
+ }
+ return &Table{Table: tbl, BaseOffset: textStart}, nil
+}
+
+func findPESymbol(f *pe.File, name string) (*pe.Symbol, error) {
+ for _, s := range f.Symbols {
+ if s.Name != name {
+ continue
+ }
+ if s.SectionNumber <= 0 {
+ return nil, fmt.Errorf("symbol %s: invalid section number %d", name, s.SectionNumber)
+ }
+ if len(f.Sections) < int(s.SectionNumber) {
+ return nil, fmt.Errorf("symbol %s: section number %d is larger than max %d", name, s.SectionNumber, len(f.Sections))
+ }
+ return s, nil
+ }
+ return nil, fmt.Errorf("no %s symbol found", name)
+}
+
+func loadPETable(f *pe.File, sname, ename string) ([]byte, error) {
+ ssym, err := findPESymbol(f, sname)
+ if err != nil {
+ return nil, err
+ }
+ esym, err := findPESymbol(f, ename)
+ if err != nil {
+ return nil, err
+ }
+ if ssym.SectionNumber != esym.SectionNumber {
+ return nil, fmt.Errorf("%s and %s symbols must be in the same section", sname, ename)
+ }
+ sect := f.Sections[ssym.SectionNumber-1]
+ data, err := sect.Data()
+ if err != nil {
+ return nil, err
+ }
+ return data[ssym.Value:esym.Value], nil
+}
diff --git a/cli/daemon/mcp/api_tools.go b/cli/daemon/mcp/api_tools.go
new file mode 100644
index 0000000000..074c3f45c5
--- /dev/null
+++ b/cli/daemon/mcp/api_tools.go
@@ -0,0 +1,703 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/mark3labs/mcp-go/mcp"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "encr.dev/cli/daemon/run"
+ "encr.dev/pkg/builder"
+ metav1 "encr.dev/proto/encore/parser/meta/v1"
+ schema "encr.dev/proto/encore/parser/schema/v1"
+)
+
+func (m *Manager) registerAPITools() {
+
+ // Add tool for calling an API endpoint
+ m.server.AddTool(mcp.NewTool("call_endpoint",
+ mcp.WithDescription("Make HTTP requests to any API endpoint in the currently open Encore. Always use this tool to make API calls and do not use curl. This tool will automatically start the application if it's not already running. This tool allows testing and interacting with the application's API endpoints, including authentication and custom payloads."),
+ mcp.WithString("service", mcp.Description("The name of the service containing the endpoint to call. This must match a service defined in the currently open Encore.")),
+ mcp.WithString("endpoint", mcp.Description("The name of the endpoint to call within the specified service. This must match an endpoint defined in the service.")),
+ mcp.WithString("method", mcp.Description("The HTTP method to use for the request (GET, POST, PUT, DELETE, etc.). Must be a valid HTTP method.")),
+ mcp.WithString("path", mcp.Description("The API request path, including any path parameters. This should match the endpoint's defined path pattern.")),
+ mcp.WithString("payload", mcp.Description("JSON payload for the request containing all endpoint parameters. This includes path parameters, query parameters, headers, and request body as key-value pairs.")),
+ mcp.WithString("auth_token", mcp.Description("Optional authentication token to include in the request. This is used for endpoints that require authentication.")),
+ mcp.WithString("auth_payload", mcp.Description("Optional authentication payload in JSON format. This is used for custom authentication schemes.")),
+ mcp.WithString("correlation_id", mcp.Description("Optional correlation ID to track the request through the system. Useful for debugging and tracing.")),
+ ), m.callEndpoint)
+
+ // Add tool for getting all services and endpoints
+ m.server.AddTool(mcp.NewTool("get_services",
+ mcp.WithDescription("Retrieve comprehensive information about all services and their endpoints in the currently open Encore. This includes endpoint schemas, documentation, and service-level metadata."),
+ mcp.WithArray("services",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "Optional list of specific service names to retrieve information for. If not provided, returns information for all services in the currently open Encore.",
+ })),
+ mcp.WithArray("endpoints",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "Optional list of specific endpoint names to filter by. If not provided, returns all endpoints for the specified services.",
+ })),
+ mcp.WithBoolean("include_schemas", mcp.Description("When true, includes detailed request and response schemas for each endpoint. This is useful for understanding the data structures used by the API.")),
+ mcp.WithBoolean("include_service_details", mcp.Description("When true, includes additional service-level information such as middleware, dependencies, and configuration.")),
+ mcp.WithBoolean("include_endpoints", mcp.Description("When true, includes endpoint information in the response. Set to false to get only service-level information.")),
+ ), m.getEndpoints)
+
+ // Add tool for getting middleware metadata
+ m.server.AddTool(mcp.NewTool("get_middleware",
+ mcp.WithDescription("Retrieve detailed information about all middleware components in the currently open Encore, including their configuration, order of execution, and which services/endpoints they affect."),
+ ), m.getMiddleware)
+
+ // Add tool for getting auth handler metadata
+ m.server.AddTool(mcp.NewTool("get_auth_handlers",
+ mcp.WithDescription("Retrieve information about all authentication handlers in the currently open Encore, including their configuration, supported authentication methods, and which services/endpoints they protect."),
+ ), m.getAuthHandlers)
+}
+
+func (m *Manager) callEndpoint(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ // Extract and validate required arguments
+ serviceStr, ok := request.Params.Arguments["service"].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing or invalid service argument")
+ }
+
+ endpointStr, ok := request.Params.Arguments["endpoint"].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing or invalid endpoint argument")
+ }
+
+ methodStr, ok := request.Params.Arguments["method"].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing or invalid method argument")
+ }
+
+ pathStr, ok := request.Params.Arguments["path"].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing or invalid path argument")
+ }
+
+ // Build API call parameters
+ params := &run.ApiCallParams{
+ AppID: inst.PlatformOrLocalID(),
+ Service: serviceStr,
+ Endpoint: endpointStr,
+ Path: pathStr,
+ Method: methodStr,
+ CorrelationID: "",
+ }
+
+ if !strings.HasPrefix(params.Path, "/") {
+ params.Path = "/" + params.Path
+ }
+
+ // Add optional parameters
+ if payload, ok := request.Params.Arguments["payload"].(string); ok && payload != "" {
+ params.Payload = []byte(payload)
+ }
+ if authToken, ok := request.Params.Arguments["auth_token"].(string); ok && authToken != "" {
+ params.AuthToken = authToken
+ }
+ if authPayload, ok := request.Params.Arguments["auth_payload"].(string); ok && authPayload != "" {
+ params.AuthPayload = []byte(authPayload)
+ }
+ if correlationID, ok := request.Params.Arguments["correlation_id"].(string); ok && correlationID != "" {
+ params.CorrelationID = correlationID
+ }
+ ns, err := m.ns.GetActive(ctx, inst)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get active namespace: %w", err)
+ }
+
+ // Get the app's run instance
+ appRun := m.run.FindRunByAppID(inst.PlatformOrLocalID())
+ if appRun == nil {
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create listener: %w", err)
+ }
+ port := ln.Addr().(*net.TCPAddr).Port
+ appRun, err = m.run.Start(ctx, run.StartParams{
+ App: inst,
+ NS: ns,
+ WorkingDir: "/",
+ Watch: true,
+ Listener: ln,
+ ListenAddr: "127.0.0.1:" + fmt.Sprint(port),
+ Environ: os.Environ(),
+ OpsTracker: nil,
+ Browser: run.BrowserModeNever,
+ Debug: builder.DebugModeDisabled,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to start app run: %w", err)
+ }
+ }
+
+ started := false
+ for !started {
+ select {
+ case <-appRun.Done():
+ return nil, fmt.Errorf("app run failed to start")
+ case <-time.After(100 * time.Millisecond):
+ // Check if the app is ready by polling the health endpoint
+ resp, err := http.Get("http://" + appRun.ListenAddr + "/__encore/healthz")
+ if err != nil {
+ continue
+ }
+ resp.Body.Close()
+ started = resp.StatusCode == 200
+ }
+ }
+
+ // Call the API
+ result, err := run.CallAPI(ctx, appRun, params)
+
+ if err != nil {
+ return nil, fmt.Errorf("API call failed: %w", err)
+ }
+
+ // Serialize the response
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal response: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getEndpoints(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Get the list of services to retrieve endpoints for
+ var serviceNames []string
+ if services, ok := request.Params.Arguments["services"].([]interface{}); ok {
+ for _, svc := range services {
+ if svcName, ok := svc.(string); ok && svcName != "" {
+ serviceNames = append(serviceNames, svcName)
+ }
+ }
+ }
+
+ // If no services specified, get all services
+ if len(serviceNames) == 0 {
+ for _, svc := range md.Svcs {
+ serviceNames = append(serviceNames, svc.Name)
+ }
+ }
+
+ // Parse request parameters
+ includeEndpoints := true
+ if include, ok := request.Params.Arguments["include_endpoints"].(bool); ok {
+ includeEndpoints = include
+ }
+
+ var endpointNames []string
+ var endpointFilter map[string]bool
+ var hasEndpointFilter bool
+
+ // Only process endpoint filters if we're including endpoints
+ if includeEndpoints {
+ // Get the list of endpoint names to filter by
+ if endpoints, ok := request.Params.Arguments["endpoints"].([]interface{}); ok {
+ for _, ep := range endpoints {
+ if epName, ok := ep.(string); ok && epName != "" {
+ endpointNames = append(endpointNames, epName)
+ }
+ }
+ }
+ // Create a map for faster lookups when filtering endpoints
+ endpointFilter = make(map[string]bool)
+ for _, name := range endpointNames {
+ endpointFilter[name] = true
+ }
+ hasEndpointFilter = len(endpointFilter) > 0
+ }
+
+ includeSchemas := false
+ if include, ok := request.Params.Arguments["include_schemas"].(bool); ok {
+ includeSchemas = include
+ }
+
+ includeServiceDetails := false
+ if include, ok := request.Params.Arguments["include_service_details"].(bool); ok {
+ includeServiceDetails = include
+ }
+
+ // Set up decl map for schema info if needed
+ var declByID map[uint32]*schema.Decl
+ if includeEndpoints && includeSchemas {
+ declByID = map[uint32]*schema.Decl{}
+ for _, decl := range md.Decls {
+ declByID[decl.Id] = decl
+ }
+ }
+
+ // Create a map to store services with their endpoints
+ serviceMap := make(map[string]map[string]interface{})
+
+ // Process each requested service
+ for _, serviceName := range serviceNames {
+ // Find the service in metadata
+ var targetService *metav1.Service
+ for _, svc := range md.Svcs {
+ if svc.Name == serviceName {
+ targetService = svc
+ break
+ }
+ }
+
+ if targetService == nil {
+ // Skip services that don't exist instead of returning an error
+ continue
+ }
+
+ // Initialize service data
+ serviceData := map[string]interface{}{}
+
+ // Add service details if requested
+ if includeServiceDetails {
+ serviceData["name"] = targetService.Name
+ serviceData["rel_path"] = targetService.RelPath
+ serviceData["has_config"] = targetService.HasConfig
+ serviceData["databases"] = targetService.Databases
+ serviceData["rpc_count"] = len(targetService.Rpcs)
+ }
+
+ // Process endpoints if requested
+ if includeEndpoints {
+ // Initialize an empty array for this service's endpoints
+ endpoints := make([]map[string]interface{}, 0)
+
+ // Process all RPCs for this service
+ for _, rpc := range targetService.Rpcs {
+ // Skip this endpoint if it's not in the filter list (when filter is provided)
+ if hasEndpointFilter && !endpointFilter[rpc.Name] {
+ continue
+ }
+
+ endpoint := map[string]interface{}{
+ "name": rpc.Name,
+ "access_type": rpc.AccessType.String(),
+ "http_methods": rpc.HttpMethods,
+ }
+
+ // Add path if available
+ if rpc.Path != nil {
+ pathSegments := make([]string, 0)
+ for _, segment := range rpc.Path.Segments {
+ pathSegments = append(pathSegments, segment.Value)
+ }
+ endpoint["path"] = strings.Join(pathSegments, "/")
+ }
+
+ // Add documentation if available
+ if rpc.Doc != nil {
+ endpoint["doc"] = *rpc.Doc
+ }
+
+ // Include schema information if requested
+ if includeSchemas {
+ schemas := map[string]interface{}{}
+
+ // For request and response schemas
+ if rpc.RequestSchema != nil {
+ str, _ := NamedOrInlineStruct(declByID, rpc.RequestSchema)
+ qry, headers, cookies, body := StructBits(str, rpc.HttpMethods[0], false, false, true)
+ schemas["request_schema"] = strings.Join([]string{"{", qry, headers, cookies, body, "}"}, "")
+ }
+
+ if rpc.ResponseSchema != nil {
+ str, _ := NamedOrInlineStruct(declByID, rpc.ResponseSchema)
+ qry, headers, cookies, body := StructBits(str, rpc.HttpMethods[0], true, false, true)
+ schemas["response_schema"] = strings.Join([]string{"{", qry, headers, cookies, body, "}"}, "")
+ }
+
+ if len(schemas) > 0 {
+ endpoint["schemas"] = schemas
+ }
+ }
+
+ endpoints = append(endpoints, endpoint)
+ }
+
+ // Add endpoints to the service data if any were found
+ if len(endpoints) > 0 {
+ serviceData["endpoints"] = endpoints
+ }
+ }
+
+ // Add service to the result map if it has data or endpoints
+ if len(serviceData) > 0 {
+ serviceMap[serviceName] = serviceData
+ }
+ }
+
+ // Create the result object with services and summary
+ result := map[string]interface{}{
+ "services": serviceMap,
+ "summary": map[string]interface{}{
+ "total_services": len(serviceMap),
+ },
+ }
+
+ // Add endpoint count to summary if we're including endpoints
+ if includeEndpoints {
+ totalEndpoints := 0
+ for _, serviceData := range serviceMap {
+ if endpoints, ok := serviceData["endpoints"].([]map[string]interface{}); ok {
+ totalEndpoints += len(endpoints)
+ }
+ }
+ result["summary"].(map[string]interface{})["total_endpoints"] = totalEndpoints
+ }
+
+ // Add filter information to summary if filters were applied
+ if len(serviceNames) < len(md.Svcs) || (includeEndpoints && hasEndpointFilter) {
+ filters := map[string]interface{}{}
+ if len(serviceNames) < len(md.Svcs) {
+ filters["services"] = serviceNames
+ }
+ if includeEndpoints && hasEndpointFilter {
+ filters["endpoints"] = endpointNames
+ }
+ result["summary"].(map[string]interface{})["filters_applied"] = filters
+ }
+
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal services and endpoints: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getMiddleware(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Find middleware definition locations from trace nodes
+ middlewareDefLocations := make(map[string]map[string]interface{})
+
+ // Scan through all packages to find trace nodes related to middleware
+ for _, pkg := range md.Pkgs {
+ for _, node := range pkg.TraceNodes {
+ // Check for middleware definitions
+ if node.GetMiddlewareDef() != nil {
+ middlewareDef := node.GetMiddlewareDef()
+ middlewareName := middlewareDef.Name
+
+ // Use package path + name as a unique identifier
+ middlewareID := fmt.Sprintf("%s/%s", middlewareDef.PkgRelPath, middlewareName)
+
+ middlewareDefLocations[middlewareID] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ "package_path": middlewareDef.PkgRelPath,
+ }
+ }
+ }
+ }
+
+ // Group middleware by type (global vs service-specific)
+ globalMiddleware := make([]map[string]interface{}, 0)
+ serviceMiddleware := make(map[string][]map[string]interface{})
+
+ // Process all middleware
+ for _, middleware := range md.Middleware {
+ middlewareInfo := map[string]interface{}{
+ "doc": middleware.Doc,
+ "global": middleware.Global,
+ }
+
+ // Add qualified name information if available
+ if middleware.Name != nil {
+ name := map[string]interface{}{
+ "package": middleware.Name.Pkg,
+ "name": middleware.Name.Name,
+ }
+ middlewareInfo["name"] = name
+
+ // Add definition location if available
+ middlewareID := fmt.Sprintf("%s/%s", middleware.Name.Pkg, middleware.Name.Name)
+ if location, exists := middlewareDefLocations[middlewareID]; exists {
+ middlewareInfo["definition"] = map[string]interface{}{
+ "filepath": location["filepath"],
+ "line_start": location["line_start"],
+ "line_end": location["line_end"],
+ "column_start": location["column_start"],
+ "column_end": location["column_end"],
+ }
+ }
+ }
+
+ // Add target information if available
+ if len(middleware.Target) > 0 {
+ targets := make([]map[string]interface{}, 0, len(middleware.Target))
+ for _, target := range middleware.Target {
+ targetInfo := map[string]interface{}{
+ "type": target.Type.String(),
+ "value": target.Value,
+ }
+ targets = append(targets, targetInfo)
+ }
+ middlewareInfo["targets"] = targets
+ }
+
+ // Add to the appropriate group
+ if middleware.Global {
+ globalMiddleware = append(globalMiddleware, middlewareInfo)
+ } else if middleware.ServiceName != nil {
+ serviceName := *middleware.ServiceName
+ if _, exists := serviceMiddleware[serviceName]; !exists {
+ serviceMiddleware[serviceName] = make([]map[string]interface{}, 0)
+ }
+ serviceMiddleware[serviceName] = append(serviceMiddleware[serviceName], middlewareInfo)
+ }
+ }
+
+ // Build the final result
+ result := map[string]interface{}{
+ "global": globalMiddleware,
+ "services": serviceMiddleware,
+ "summary": map[string]interface{}{
+ "total_middleware": len(md.Middleware),
+ "global_middleware": len(globalMiddleware),
+ "service_middleware": make(map[string]int),
+ "service_count": len(serviceMiddleware),
+ },
+ }
+
+ // Add counts by service
+ summary := result["summary"].(map[string]interface{})
+ for service, middleware := range serviceMiddleware {
+ summary["service_middleware"].(map[string]int)[service] = len(middleware)
+ }
+
+ // Sort middleware by name for consistent output
+ sort.Slice(globalMiddleware, func(i, j int) bool {
+ nameI := ""
+ nameJ := ""
+ if name, ok := globalMiddleware[i]["name"].(map[string]interface{}); ok {
+ nameI = name["name"].(string)
+ }
+ if name, ok := globalMiddleware[j]["name"].(map[string]interface{}); ok {
+ nameJ = name["name"].(string)
+ }
+ return nameI < nameJ
+ })
+
+ // Sort service middleware as well
+ for service, middleware := range serviceMiddleware {
+ sort.Slice(middleware, func(i, j int) bool {
+ nameI := ""
+ nameJ := ""
+ if name, ok := middleware[i]["name"].(map[string]interface{}); ok {
+ nameI = name["name"].(string)
+ }
+ if name, ok := middleware[j]["name"].(map[string]interface{}); ok {
+ nameJ = name["name"].(string)
+ }
+ return nameI < nameJ
+ })
+ serviceMiddleware[service] = middleware
+ }
+
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal middleware information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getAuthHandlers(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Find auth handler definition locations from trace nodes
+ authHandlerDefLocations := make(map[string]map[string]interface{})
+
+ // Scan through all packages to find trace nodes related to auth handlers
+ for _, pkg := range md.Pkgs {
+ for _, node := range pkg.TraceNodes {
+ // Check for auth handler definitions
+ if node.GetAuthHandlerDef() != nil {
+ authHandlerDef := node.GetAuthHandlerDef()
+ serviceName := authHandlerDef.ServiceName
+ handlerName := authHandlerDef.Name
+
+ // Use service name + handler name as a unique identifier
+ handlerID := fmt.Sprintf("%s/%s", serviceName, handlerName)
+
+ authHandlerDefLocations[handlerID] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ "service_name": serviceName,
+ }
+ }
+ }
+ }
+
+ // Process the main auth handler if it exists
+ var mainAuthHandler map[string]interface{}
+ if md.AuthHandler != nil {
+ auth := md.AuthHandler
+
+ authData := map[string]interface{}{
+ "name": auth.Name,
+ "doc": auth.Doc,
+ "service_name": auth.ServiceName,
+ "pkg_path": auth.PkgPath,
+ "pkg_name": auth.PkgName,
+ }
+
+ // Add parameter and auth data type information
+ if auth.Params != nil {
+ paramsData, err := protojson.Marshal(auth.Params)
+ if err == nil {
+ var paramsJson interface{}
+ if err := json.Unmarshal(paramsData, ¶msJson); err == nil {
+ authData["params"] = paramsJson
+ }
+ }
+ }
+
+ if auth.AuthData != nil {
+ authDataTypeData, err := protojson.Marshal(auth.AuthData)
+ if err == nil {
+ var authDataJson interface{}
+ if err := json.Unmarshal(authDataTypeData, &authDataJson); err == nil {
+ authData["auth_data"] = authDataJson
+ }
+ }
+ }
+
+ // Add location information if available
+ handlerID := fmt.Sprintf("%s/%s", auth.ServiceName, auth.Name)
+ if location, exists := authHandlerDefLocations[handlerID]; exists {
+ authData["definition"] = map[string]interface{}{
+ "filepath": location["filepath"],
+ "line_start": location["line_start"],
+ "line_end": location["line_end"],
+ "column_start": location["column_start"],
+ "column_end": location["column_end"],
+ }
+ }
+
+ mainAuthHandler = authData
+ }
+
+ // Process gateway auth handlers
+ gatewayAuthHandlers := make(map[string]map[string]interface{})
+
+ for _, gateway := range md.Gateways {
+ if gateway.Explicit != nil && gateway.Explicit.AuthHandler != nil {
+ auth := gateway.Explicit.AuthHandler
+
+ authData := map[string]interface{}{
+ "name": auth.Name,
+ "doc": auth.Doc,
+ "service_name": auth.ServiceName,
+ "pkg_path": auth.PkgPath,
+ "pkg_name": auth.PkgName,
+ "gateway_name": gateway.EncoreName,
+ }
+
+ // Add parameter and auth data type information
+ if auth.Params != nil {
+ paramsData, err := protojson.Marshal(auth.Params)
+ if err == nil {
+ var paramsJson interface{}
+ if err := json.Unmarshal(paramsData, ¶msJson); err == nil {
+ authData["params"] = paramsJson
+ }
+ }
+ }
+
+ if auth.AuthData != nil {
+ authDataTypeData, err := protojson.Marshal(auth.AuthData)
+ if err == nil {
+ var authDataJson interface{}
+ if err := json.Unmarshal(authDataTypeData, &authDataJson); err == nil {
+ authData["auth_data"] = authDataJson
+ }
+ }
+ }
+
+ // Add location information if available
+ handlerID := fmt.Sprintf("%s/%s", auth.ServiceName, auth.Name)
+ if location, exists := authHandlerDefLocations[handlerID]; exists {
+ authData["definition"] = map[string]interface{}{
+ "filepath": location["filepath"],
+ "line_start": location["line_start"],
+ "line_end": location["line_end"],
+ "column_start": location["column_start"],
+ "column_end": location["column_end"],
+ }
+ }
+
+ gatewayAuthHandlers[gateway.EncoreName] = authData
+ }
+ }
+
+ // Build the final result
+ result := map[string]interface{}{
+ "main_auth_handler": mainAuthHandler,
+ "gateway_auth_handlers": gatewayAuthHandlers,
+ "summary": map[string]interface{}{
+ "has_main_auth": mainAuthHandler != nil,
+ "gateway_count": len(md.Gateways),
+ "auth_gateway_count": len(gatewayAuthHandlers),
+ },
+ }
+
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal auth handler information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/bucket_tools.go b/cli/daemon/mcp/bucket_tools.go
new file mode 100644
index 0000000000..d75e12dd97
--- /dev/null
+++ b/cli/daemon/mcp/bucket_tools.go
@@ -0,0 +1,154 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/mark3labs/mcp-go/mcp"
+
+ "encr.dev/pkg/emulators/storage/gcsemu"
+)
+
+func (m *Manager) registerBucketTools() {
+ m.server.AddTool(mcp.NewTool("get_storage_buckets",
+ mcp.WithDescription("Retrieve comprehensive information about all storage buckets in the currently open Encore, including their configurations, access patterns, and the services that interact with them. This tool helps understand the application's storage architecture and data management strategy."),
+ ), m.getStorageBuckets)
+
+ m.server.AddTool(mcp.NewTool("get_objects",
+ mcp.WithDescription("List and retrieve metadata about objects stored in one or more storage buckets. This tool helps inspect the contents of storage buckets and understand the data stored in them."),
+ mcp.WithArray("buckets",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "List of bucket names to list objects from. Each bucket must be defined in the currently open Encore's storage configuration.",
+ })),
+ ), m.listObjects)
+}
+
+func (m *Manager) listObjects(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ app, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+ clusterNS, err := m.ns.GetActive(ctx, app)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get active namespace: %w", err)
+ }
+ dir, err := m.objects.BaseDir(clusterNS.ID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get base directory: %w", err)
+ }
+ store := gcsemu.NewFileStore(dir)
+ buckets, ok := request.Params.Arguments["buckets"].([]any)
+ if !ok {
+ return nil, fmt.Errorf("buckets is not an array")
+ }
+ objects := map[string][]map[string]interface{}{}
+ for _, bucket := range buckets {
+ bucketName := bucket.(string)
+ var bucketObjects []map[string]interface{}
+ err = store.Walk(ctx, bucketName, func(ctx context.Context, filename string, fInfo os.FileInfo) error {
+ objectInfo := map[string]interface{}{
+ "name": filename,
+ "size": fInfo.Size(),
+ "last_modified": fInfo.ModTime(),
+ "is_directory": fInfo.IsDir(),
+ }
+ bucketObjects = append(bucketObjects, objectInfo)
+ return nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to walk bucket objects: %w", err)
+ }
+ objects[bucketName] = bucketObjects
+ }
+ jsonData, err := json.Marshal(objects)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal object information: %w", err)
+ }
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getStorageBuckets(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Build map of services that use each bucket with their operations
+ bucketUsageByService := make(map[string][]map[string]interface{})
+
+ for _, svc := range md.Svcs {
+ for _, bucketUsage := range svc.Buckets {
+ bucketName := bucketUsage.Bucket
+
+ // Convert operations to strings
+ operations := make([]string, 0, len(bucketUsage.Operations))
+ for _, op := range bucketUsage.Operations {
+ operations = append(operations, op.String())
+ }
+
+ // Create usage info
+ usageInfo := map[string]interface{}{
+ "service_name": svc.Name,
+ "operations": operations,
+ }
+
+ // Add to map
+ if _, exists := bucketUsageByService[bucketName]; !exists {
+ bucketUsageByService[bucketName] = make([]map[string]interface{}, 0)
+ }
+ bucketUsageByService[bucketName] = append(bucketUsageByService[bucketName], usageInfo)
+ }
+ }
+
+ // Collect bucket definition locations from trace nodes
+ bucketDefLocations := make(map[string]map[string]interface{})
+
+ // Find bucket definitions in trace nodes if possible
+ // Currently no specific bucket definition node type in the TraceNode,
+ // so we leave this empty for now. This could be expanded in the future
+ // if the metadata provides better tracking.
+
+ // Process all buckets
+ buckets := make([]map[string]interface{}, 0)
+ for _, bucket := range md.Buckets {
+ bucketInfo := map[string]interface{}{
+ "name": bucket.Name,
+ "versioned": bucket.Versioned,
+ "public": bucket.Public,
+ }
+
+ // Add documentation if available
+ if bucket.Doc != nil {
+ bucketInfo["doc"] = *bucket.Doc
+ }
+
+ // Add location information if available
+ if location, exists := bucketDefLocations[bucket.Name]; exists {
+ bucketInfo["definition"] = location
+ }
+
+ // Add service usage information
+ if usages, exists := bucketUsageByService[bucket.Name]; exists {
+ bucketInfo["service_usage"] = usages
+ } else {
+ bucketInfo["service_usage"] = []map[string]interface{}{}
+ }
+
+ buckets = append(buckets, bucketInfo)
+ }
+
+ jsonData, err := json.Marshal(buckets)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal storage buckets information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/cache_tools.go b/cli/daemon/mcp/cache_tools.go
new file mode 100644
index 0000000000..dfa7a18de9
--- /dev/null
+++ b/cli/daemon/mcp/cache_tools.go
@@ -0,0 +1,152 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/mark3labs/mcp-go/mcp"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+func (m *Manager) registerCacheTools() {
+ m.server.AddTool(mcp.NewTool("get_cache_keyspaces",
+ mcp.WithDescription("Retrieve comprehensive information about all cache keyspaces in the currently open Encore, including their configurations, usage patterns, and the services that interact with them. This tool helps understand the application's caching strategy and data access patterns."),
+ ), m.getCacheKeyspaces)
+}
+
+func (m *Manager) getCacheKeyspaces(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Find keyspace definition locations from trace nodes
+ keyspaceDefLocations := make(map[string]map[string]map[string]interface{})
+
+ // Scan through all packages to find trace nodes related to cache keyspaces
+ for _, pkg := range md.Pkgs {
+ for _, node := range pkg.TraceNodes {
+ // Check for cache keyspace definitions
+ if node.GetCacheKeyspace() != nil {
+ keyspaceDef := node.GetCacheKeyspace()
+ clusterName := keyspaceDef.ClusterName
+ keyspaceName := keyspaceDef.VarName
+
+ // Initialize maps if needed
+ if _, exists := keyspaceDefLocations[clusterName]; !exists {
+ keyspaceDefLocations[clusterName] = make(map[string]map[string]interface{})
+ }
+
+ if _, exists := keyspaceDefLocations[clusterName][keyspaceName]; !exists {
+ keyspaceDefLocations[clusterName][keyspaceName] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ "package_path": keyspaceDef.PkgRelPath,
+ }
+ }
+ }
+ }
+ }
+
+ // Build the result
+ result := make([]map[string]interface{}, 0)
+
+ // Process all cache clusters
+ for _, cluster := range md.CacheClusters {
+ clusterInfo := map[string]interface{}{
+ "name": cluster.Name,
+ "eviction_policy": cluster.EvictionPolicy,
+ "doc": cluster.Doc,
+ }
+
+ // Process keyspaces for this cluster
+ keyspaces := make([]map[string]interface{}, 0)
+ for _, keyspace := range cluster.Keyspaces {
+ keyspaceInfo := map[string]interface{}{
+ "service": keyspace.Service,
+ "doc": keyspace.Doc,
+ }
+
+ // Add key and value type information from protojson
+ if keyspace.KeyType != nil {
+ keyTypeData, err := protojson.Marshal(keyspace.KeyType)
+ if err == nil {
+ var keyTypeJson interface{}
+ if err := json.Unmarshal(keyTypeData, &keyTypeJson); err == nil {
+ keyspaceInfo["key_type"] = keyTypeJson
+ }
+ }
+ }
+
+ if keyspace.ValueType != nil {
+ valueTypeData, err := protojson.Marshal(keyspace.ValueType)
+ if err == nil {
+ var valueTypeJson interface{}
+ if err := json.Unmarshal(valueTypeData, &valueTypeJson); err == nil {
+ keyspaceInfo["value_type"] = valueTypeJson
+ }
+ }
+ }
+
+ // Add path pattern if available
+ if keyspace.PathPattern != nil {
+ pathPattern := make([]string, 0)
+ for _, segment := range keyspace.PathPattern.Segments {
+ pathPattern = append(pathPattern, segment.Value)
+ }
+ keyspaceInfo["path_pattern"] = strings.Join(pathPattern, "/")
+ }
+
+ // Add definition location if available
+ // We need to find the keyspace variable name from the definition data
+ // This is approximate as we don't have a direct mapping in the metadata
+ if locations, ok := keyspaceDefLocations[cluster.Name]; ok {
+ for keyspaceName, location := range locations {
+ // Try to match by service
+ if location["package_path"] != "" && keyspace.Service != "" {
+ // If this location is for a keyspace in this service, add it
+ if packageService := findServiceNameForPackage(md, location["package_path"].(string)); packageService == keyspace.Service {
+ keyspaceInfo["name"] = keyspaceName
+ keyspaceInfo["definition"] = map[string]interface{}{
+ "filepath": location["filepath"],
+ "line_start": location["line_start"],
+ "line_end": location["line_end"],
+ "column_start": location["column_start"],
+ "column_end": location["column_end"],
+ }
+ break
+ }
+ }
+ }
+ }
+
+ keyspaces = append(keyspaces, keyspaceInfo)
+ }
+
+ clusterInfo["keyspaces"] = keyspaces
+ result = append(result, clusterInfo)
+ }
+
+ // Sort by cluster name for consistent output
+ sort.Slice(result, func(i, j int) bool {
+ return result[i]["name"].(string) < result[j]["name"].(string)
+ })
+
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal cache keyspaces information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/cron_tools.go b/cli/daemon/mcp/cron_tools.go
new file mode 100644
index 0000000000..de926803ba
--- /dev/null
+++ b/cli/daemon/mcp/cron_tools.go
@@ -0,0 +1,109 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/mark3labs/mcp-go/mcp"
+)
+
+func (m *Manager) registerCronTools() {
+ m.server.AddTool(mcp.NewTool("get_cronjobs",
+ mcp.WithDescription("Retrieve detailed information about all scheduled cron jobs in the currently open Encore, including their schedules, endpoints they trigger, and execution history. This tool helps understand the application's background task scheduling and automation capabilities."),
+ ), m.getCronJobs)
+}
+
+func (m *Manager) getCronJobs(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Create a map to find service and endpoint locations
+ endpointLocations := make(map[string]map[string]map[string]interface{})
+
+ // Scan through all packages to find trace nodes related to RPC definitions
+ for _, pkg := range md.Pkgs {
+ for _, node := range pkg.TraceNodes {
+ // Check for RPC definitions
+ if node.GetRpcDef() != nil {
+ rpcDef := node.GetRpcDef()
+ serviceName := rpcDef.ServiceName
+ rpcName := rpcDef.RpcName
+
+ // Initialize maps if needed
+ if _, exists := endpointLocations[serviceName]; !exists {
+ endpointLocations[serviceName] = make(map[string]map[string]interface{})
+ }
+
+ if _, exists := endpointLocations[serviceName][rpcName]; !exists {
+ endpointLocations[serviceName][rpcName] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ }
+ }
+ }
+ }
+ }
+
+ // Process cron jobs with location information
+ cronjobs := make([]map[string]interface{}, 0)
+ for _, job := range md.CronJobs {
+ jobInfo := map[string]interface{}{
+ "id": job.Id,
+ "title": job.Title,
+ "schedule": job.Schedule,
+ }
+
+ // Add documentation if available
+ if job.Doc != nil {
+ jobInfo["doc"] = *job.Doc
+ }
+
+ // Add endpoint information
+ if job.Endpoint != nil {
+ endpoint := map[string]interface{}{
+ "package": job.Endpoint.Pkg,
+ "name": job.Endpoint.Name,
+ }
+
+ // If we can find the service for this endpoint, add location info
+ for _, svc := range md.Svcs {
+ for _, rpc := range svc.Rpcs {
+ if rpc.Name == job.Endpoint.Name && (svc.RelPath == job.Endpoint.Pkg || svc.Name == findServiceNameForPackage(md, job.Endpoint.Pkg)) {
+ endpoint["service_name"] = svc.Name
+
+ // Add location if we found it
+ if locations, ok := endpointLocations[svc.Name]; ok {
+ if loc, ok := locations[rpc.Name]; ok {
+ endpoint["definition"] = loc
+ }
+ }
+
+ break
+ }
+ }
+ }
+
+ jobInfo["endpoint"] = endpoint
+ }
+
+ cronjobs = append(cronjobs, jobInfo)
+ }
+
+ jsonData, err := json.Marshal(cronjobs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal cron jobs information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/db_tools.go b/cli/daemon/mcp/db_tools.go
new file mode 100644
index 0000000000..f9247ae516
--- /dev/null
+++ b/cli/daemon/mcp/db_tools.go
@@ -0,0 +1,310 @@
+package mcp
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/lib/pq"
+ "github.com/mark3labs/mcp-go/mcp"
+
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/pkg/fns"
+)
+
+func (m *Manager) registerDatabaseTools() {
+ // Add tool for getting all databases and optionally their tables
+ m.server.AddTool(mcp.NewTool("get_databases",
+ mcp.WithDescription("Retrieve metadata about all SQL databases defined in the currently open Encore, including their schema, tables, and relationships. This tool helps understand the database structure and can optionally include detailed table information."),
+ mcp.WithBoolean("include_tables", mcp.Description("When true, includes detailed information about each table in the database, including column names, types, and constraints. This is useful for understanding the complete database schema.")),
+ mcp.WithArray("databases",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "Optional list of specific database names to retrieve information for. If not provided, returns information for all databases in the currently open Encore.",
+ })),
+ ), m.getDatabases)
+
+ // Add tool for querying a database
+ m.server.AddTool(mcp.NewTool("query_database",
+ mcp.WithDescription("Execute SQL queries against one or more databases in the currently open Encore. This tool allows running custom SQL queries to inspect or manipulate data while respecting the application's database access patterns."),
+ mcp.WithArray("queries",
+ mcp.Items(map[string]any{
+ "type": "object",
+ "description": "Array of query objects, where each object must contain 'database' (the database name to query) and 'query' (the SQL query to execute) fields. Multiple queries can be executed in a single call.",
+ "properties": map[string]any{
+ "database": map[string]any{
+ "type": "string",
+ "description": "The database name to query",
+ },
+ "query": map[string]any{
+ "type": "string",
+ "description": "The SQL query to execute",
+ },
+ },
+ "required": []string{"database", "query"},
+ })),
+ ), m.runQuery)
+}
+
+func (m *Manager) getDatabases(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ includeTables := false
+ if includeTablesParam, ok := request.Params.Arguments["include_tables"]; ok {
+ includeTables, _ = includeTablesParam.(bool)
+ }
+
+ // Parse databases parameter if provided
+ var filterDBs map[string]bool
+ if dbsParam, ok := request.Params.Arguments["databases"]; ok && dbsParam != nil {
+ dbsArray, ok := dbsParam.([]interface{})
+ if ok && len(dbsArray) > 0 {
+ filterDBs = make(map[string]bool)
+ for _, db := range dbsArray {
+ if dbName, ok := db.(string); ok {
+ filterDBs[dbName] = true
+ }
+ }
+ }
+ }
+
+ // Build database list
+ databases := make([]map[string]interface{}, 0)
+ for _, db := range md.SqlDatabases {
+ // Skip if we have a filter and this database isn't in it
+ if filterDBs != nil && !filterDBs[db.Name] {
+ continue
+ }
+
+ dbInfo := map[string]interface{}{
+ "name": db.Name,
+ "doc": db.Doc,
+ }
+
+ // If we should include tables, get table information
+ if includeTables {
+ tables, err := m.getTablesForDatabase(ctx, db.Name)
+ if err != nil {
+ // Don't fail the whole request if one database fails
+ dbInfo["tables_error"] = err.Error()
+ } else {
+ dbInfo["tables"] = tables
+ }
+ }
+
+ databases = append(databases, dbInfo)
+ }
+
+ jsonData, err := json.Marshal(databases)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal database list: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getTablesForDatabase(ctx context.Context, dbName string) ([]map[string]interface{}, error) {
+ var tables []map[string]interface{}
+
+ err := m.withConn(ctx, dbName, func(db *sql.DB) error {
+ // Query to get tables and their columns from PostgreSQL
+ query := `
+ SELECT
+ t.table_name,
+ ARRAY_AGG(c.column_name ORDER BY c.ordinal_position) as columns,
+ ARRAY_AGG(c.data_type ORDER BY c.ordinal_position) as column_types
+ FROM
+ information_schema.tables t
+ JOIN
+ information_schema.columns c ON t.table_name = c.table_name AND t.table_schema = c.table_schema
+ WHERE
+ t.table_schema = 'public'
+ GROUP BY
+ t.table_name
+ ORDER BY
+ t.table_name;
+ `
+
+ rows, err := db.QueryContext(ctx, query)
+ if err != nil {
+ return fmt.Errorf("failed to query tables: %w", err)
+ }
+ defer rows.Close()
+
+ tables = []map[string]interface{}{}
+
+ for rows.Next() {
+ var tableName string
+ var columns pq.StringArray
+ var columnTypes pq.StringArray
+
+ if err := rows.Scan(&tableName, &columns, &columnTypes); err != nil {
+ return fmt.Errorf("failed to scan row: %w", err)
+ }
+
+ // Create structured column information
+ columnInfo := make([]map[string]string, len(columns))
+ for i := range columns {
+ columnInfo[i] = map[string]string{
+ "name": columns[i],
+ "type": columnTypes[i],
+ }
+ }
+
+ tables = append(tables, map[string]interface{}{
+ "table_name": tableName,
+ "columns": columnInfo,
+ })
+ }
+
+ if err := rows.Err(); err != nil {
+ return fmt.Errorf("error iterating rows: %w", err)
+ }
+
+ return nil
+ })
+
+ return tables, err
+}
+
+func (m *Manager) withConn(ctx context.Context, dbName string, fn func(db *sql.DB) error) error {
+ app, err := m.getApp(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get app: %w", err)
+ }
+ clusterNS, err := m.ns.GetActive(ctx, app)
+ if err != nil {
+ return fmt.Errorf("failed to get active namespace: %w", err)
+ }
+ md, err := app.CachedMetadata()
+ if err != nil {
+ return fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ clusterID := sqldb.GetClusterID(app, sqldb.Run, clusterNS)
+ cluster := m.cluster.Create(ctx, &sqldb.CreateParams{
+ ClusterID: clusterID,
+ Memfs: sqldb.Run.Memfs(),
+ })
+ if _, err := cluster.Start(ctx, nil); err != nil {
+ return err
+ } else if err := cluster.Setup(ctx, app.Root(), md); err != nil {
+ return err
+ }
+
+ info, err := cluster.Info(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get cluster info: %w", err)
+ } else if info.Status != sqldb.Running {
+ return errors.New("cluster not running")
+ }
+
+ admin, ok := info.Encore.First(sqldb.RoleRead)
+ if !ok {
+ return errors.New("unable to find superuser or admin roles")
+ }
+
+ uri := info.ConnURI(dbName, admin)
+
+ pool, err := sql.Open("pgx", uri)
+ if err != nil {
+ return err
+ }
+ defer fns.CloseIgnore(pool)
+
+ return fn(pool)
+}
+
+func (m *Manager) runQuery(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ queriesParam, ok := request.Params.Arguments["queries"].([]interface{})
+ if !ok || len(queriesParam) == 0 {
+ return nil, fmt.Errorf("missing or invalid 'queries' parameter")
+ }
+
+ results := make(map[string][]map[string]interface{})
+
+ for _, queryObj := range queriesParam {
+ queryMap, ok := queryObj.(map[string]interface{})
+ if !ok {
+ continue
+ }
+
+ dbName, ok := queryMap["database"].(string)
+ if !ok || dbName == "" {
+ continue
+ }
+
+ sqlQuery, ok := queryMap["query"].(string)
+ if !ok || sqlQuery == "" {
+ continue
+ }
+
+ // Execute the query for this database
+ var queryResults []map[string]interface{}
+ err := m.withConn(ctx, dbName, func(db *sql.DB) error {
+ rows, err := db.QueryContext(ctx, sqlQuery)
+ if err != nil {
+ return fmt.Errorf("failed to execute query: %w", err)
+ }
+ defer rows.Close()
+
+ // Serialize rows to JSON
+ columns, err := rows.Columns()
+ if err != nil {
+ return fmt.Errorf("failed to get columns: %w", err)
+ }
+
+ queryResults = make([]map[string]interface{}, 0)
+ for rows.Next() {
+ values := make([]interface{}, len(columns))
+ valuePtrs := make([]interface{}, len(columns))
+ for i := range values {
+ valuePtrs[i] = &values[i]
+ }
+
+ if err := rows.Scan(valuePtrs...); err != nil {
+ return fmt.Errorf("failed to scan row: %w", err)
+ }
+
+ row := make(map[string]interface{})
+ for i, col := range columns {
+ row[col] = values[i]
+ }
+ queryResults = append(queryResults, row)
+ }
+
+ if err := rows.Err(); err != nil {
+ return fmt.Errorf("error iterating rows: %w", err)
+ }
+
+ return nil
+ })
+
+ // Store results for this query
+ key := fmt.Sprintf("%s: %s", dbName, sqlQuery)
+ if err != nil {
+ results[key] = []map[string]interface{}{
+ {"error": err.Error()},
+ }
+ } else {
+ results[key] = queryResults
+ }
+ }
+
+ jsonData, err := json.Marshal(results)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal results: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/docs_tools.go b/cli/daemon/mcp/docs_tools.go
new file mode 100644
index 0000000000..857a90ebb6
--- /dev/null
+++ b/cli/daemon/mcp/docs_tools.go
@@ -0,0 +1,320 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/algolia/algoliasearch-client-go/v3/algolia/opt"
+ "github.com/algolia/algoliasearch-client-go/v3/algolia/search"
+ "github.com/mark3labs/mcp-go/mcp"
+ "golang.org/x/net/html"
+)
+
+func (m *Manager) registerDocsTools() {
+ // Add tool for searching Encore documentation using Algolia
+ m.server.AddTool(mcp.NewTool("search_docs",
+ mcp.WithDescription("Search the Encore documentation using Algolia's search engine. This tool helps find relevant documentation about Encore features, best practices, and examples."),
+ mcp.WithString("query", mcp.Description("The search query to find relevant documentation. Can include keywords, feature names, or specific topics you're looking for.")),
+ mcp.WithNumber("page", mcp.Description("Page number for pagination, starting from 0. Use this to navigate through large result sets.")),
+ mcp.WithNumber("hits_per_page", mcp.Description("Number of results to return per page. Default is 10. Adjust this to control the size of the result set.")),
+ mcp.WithArray("facet_filters",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "Optional array of facet filters to narrow down search results. These can include categories, tags, or other metadata to refine the search.",
+ })),
+ ), m.searchDocs)
+
+ // Add tool for fetching Encore documentation content
+ m.server.AddTool(mcp.NewTool("get_docs",
+ mcp.WithDescription("Retrieve the full content of specific documentation pages. This tool is useful for getting detailed information about specific topics after finding them with search_docs."),
+ mcp.WithArray("paths",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "List of documentation paths to fetch (e.g. ['/docs/concepts', '/docs/services']). These paths should be valid documentation URLs without the domain.",
+ })),
+ ), m.getDocs)
+}
+
+func (m *Manager) searchDocs(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ // Extract parameters from the request
+ query, ok := request.Params.Arguments["query"].(string)
+ if !ok || query == "" {
+ return nil, fmt.Errorf("invalid or missing query parameter")
+ }
+
+ // Default pagination settings
+ page := 0
+ if p, ok := request.Params.Arguments["page"].(float64); ok {
+ page = int(p)
+ }
+
+ hitsPerPage := 10
+ if hpp, ok := request.Params.Arguments["hits_per_page"].(float64); ok {
+ hitsPerPage = int(hpp)
+ }
+
+ // Process facet filters if provided
+ var facetFilters []string
+ if filters, ok := request.Params.Arguments["facet_filters"].([]interface{}); ok {
+ for _, filter := range filters {
+ if filterStr, ok := filter.(string); ok && filterStr != "" {
+ facetFilters = append(facetFilters, filterStr)
+ }
+ }
+ }
+
+ // Set context timeout
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ // Perform the actual search with Algolia
+ result, err := performAlgoliaSearch(ctx, query, page, hitsPerPage, facetFilters)
+ if err != nil {
+ return nil, fmt.Errorf("failed to search docs: %w", err)
+ }
+
+ // Marshal the response
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal search results: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+// performAlgoliaSearch performs the actual search against Algolia
+func performAlgoliaSearch(ctx context.Context, query string, page, hitsPerPage int, facetFilters []string) (map[string]interface{}, error) {
+ // Initialize Algolia client with configurable app ID and API key
+ // In a production environment, these should be loaded from configuration
+ appID := "R7DAHI8GEL"
+ apiKey := "85bf0533142cccdbbc6b9deb92b19fdf"
+
+ client := search.NewClient(appID, apiKey)
+ index := client.InitIndex("encore_docs")
+
+ // Build search parameters
+ params := []interface{}{
+ opt.Page(page),
+ opt.HitsPerPage(hitsPerPage),
+ }
+
+ // Add facet filters if any
+ if len(facetFilters) > 0 {
+ // For a simple AND of all filters - need to convert []string to variadic arguments
+ if len(facetFilters) == 1 {
+ params = append(params, opt.FacetFilter(facetFilters[0]))
+ } else {
+ // Convert []string to []interface{} for compatibility
+ facetFilterInterfaces := make([]interface{}, len(facetFilters))
+ for i, filter := range facetFilters {
+ facetFilterInterfaces[i] = filter
+ }
+ params = append(params, opt.FacetFilterAnd(facetFilterInterfaces...))
+ }
+ }
+
+ // Perform the search
+ res, err := index.Search(query, params...)
+ if err != nil {
+ return nil, fmt.Errorf("algolia search failed: %w", err)
+ }
+
+ // Convert the Algolia response to our expected format
+ result := map[string]interface{}{
+ "hits": res.Hits,
+ "page": res.Page,
+ "nbHits": res.NbHits,
+ "nbPages": res.NbPages,
+ "hitsPerPage": res.HitsPerPage,
+ "processingTimeMS": res.ProcessingTimeMS,
+ "query": query,
+ "params": res.Params,
+ }
+
+ return result, nil
+}
+
+func (m *Manager) getDocs(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ // Extract paths parameter from the request
+ var docPaths []string
+ if paths, ok := request.Params.Arguments["paths"].([]interface{}); ok {
+ for _, path := range paths {
+ if pathStr, ok := path.(string); ok && pathStr != "" {
+ docPaths = append(docPaths, pathStr)
+ }
+ }
+ }
+
+ if len(docPaths) == 0 {
+ return nil, fmt.Errorf("no valid documentation paths provided")
+ }
+
+ // Set context timeout
+ ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+
+ // Fetch content for each path
+ result := make(map[string]interface{})
+ docs := make(map[string]interface{})
+
+ for _, path := range docPaths {
+ // Ensure path starts with a slash
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+
+ url := "https://encore.dev" + path
+ content, err := fetchDocContent(ctx, url)
+ if err != nil {
+ docs[path] = map[string]interface{}{
+ "error": err.Error(),
+ "success": false,
+ }
+ } else {
+ docs[path] = map[string]interface{}{
+ "content": content,
+ "url": url,
+ "success": true,
+ }
+ }
+ }
+
+ result["docs"] = docs
+ result["summary"] = map[string]interface{}{
+ "total": len(docPaths),
+ "base_url": "https://encore.dev",
+ "requested_at": time.Now().UTC().Format(time.RFC3339),
+ }
+
+ // Marshal the response
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal document results: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+// fetchDocContent fetches content from a URL and returns only the text content from the tag
+func fetchDocContent(ctx context.Context, url string) (string, error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to create request: %w", err)
+ }
+
+ // Add appropriate headers to mimic a browser request
+ req.Header.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36")
+ req.Header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8")
+
+ client := &http.Client{
+ Timeout: 10 * time.Second,
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to fetch URL: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("received non-OK status code: %d", resp.StatusCode)
+ }
+
+ // Parse the HTML document
+ doc, err := html.Parse(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse HTML: %w", err)
+ }
+
+ // Find the main tag
+ mainNode := findMainElement(doc)
+ if mainNode == nil {
+ return "", fmt.Errorf("no tag found in the document")
+ }
+
+ // Extract text content from the main tag
+ var textContent strings.Builder
+ extractText(mainNode, &textContent)
+
+ // Clean up the text content
+ cleanedText := cleanText(textContent.String())
+
+ return cleanedText, nil
+}
+
+// findMainElement finds the element in the HTML document
+func findMainElement(n *html.Node) *html.Node {
+ if n.Type == html.ElementNode && strings.ToLower(n.Data) == "main" {
+ return n
+ }
+
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if result := findMainElement(c); result != nil {
+ return result
+ }
+ }
+
+ return nil
+}
+
+// extractText recursively extracts text nodes from an HTML node
+func extractText(n *html.Node, sb *strings.Builder) {
+ // Skip script, style, and non-visible elements
+ if n.Type == html.ElementNode {
+ nodeName := strings.ToLower(n.Data)
+ if nodeName == "script" || nodeName == "style" || nodeName == "noscript" ||
+ nodeName == "meta" || nodeName == "link" || nodeName == "iframe" {
+ return
+ }
+ }
+
+ // Process text nodes
+ if n.Type == html.TextNode {
+ text := strings.TrimSpace(n.Data)
+ if text != "" {
+ sb.WriteString(text)
+ sb.WriteString(" ")
+ }
+ }
+
+ // Recursively process all child nodes
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ extractText(c, sb)
+ }
+
+ // Add line breaks for certain block elements
+ if n.Type == html.ElementNode {
+ nodeName := strings.ToLower(n.Data)
+ if nodeName == "p" || nodeName == "div" || nodeName == "h1" ||
+ nodeName == "h2" || nodeName == "h3" || nodeName == "h4" ||
+ nodeName == "h5" || nodeName == "h6" || nodeName == "li" ||
+ nodeName == "br" || nodeName == "tr" {
+ sb.WriteString("\n")
+ }
+
+ // Add extra line break for more significant sections
+ if nodeName == "section" || nodeName == "article" ||
+ nodeName == "header" || nodeName == "footer" {
+ sb.WriteString("\n\n")
+ }
+ }
+}
+
+// cleanText removes excessive whitespace and normalizes line breaks
+func cleanText(text string) string {
+ // Replace multiple spaces with a single space
+ text = regexp.MustCompile(`\s+`).ReplaceAllString(text, " ")
+
+ // Replace multiple newlines with a maximum of two
+ text = regexp.MustCompile(`\n{3,}`).ReplaceAllString(text, "\n\n")
+
+ // Trim leading/trailing whitespace
+ text = strings.TrimSpace(text)
+
+ return text
+}
diff --git a/cli/daemon/mcp/mcp.go b/cli/daemon/mcp/mcp.go
new file mode 100644
index 0000000000..b9cb3fc346
--- /dev/null
+++ b/cli/daemon/mcp/mcp.go
@@ -0,0 +1,112 @@
+package mcp
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+
+ "github.com/mark3labs/mcp-go/server"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/engine/trace2"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/objects"
+ "encr.dev/cli/daemon/run"
+ "encr.dev/cli/daemon/sqldb"
+)
+
+type Manager struct {
+ server *server.MCPServer
+ sse *server.SSEServer
+ cluster *sqldb.ClusterManager
+ ns *namespace.Manager
+ traces trace2.Store
+ run *run.Manager
+ objects *objects.ClusterManager
+ apps *apps.Manager
+
+ BaseURL string
+}
+
+type appContextKey struct{}
+
+type appContext struct {
+ AppID string
+}
+
+func WithAppID(ctx context.Context, appID string) context.Context {
+ return context.WithValue(ctx, appContextKey{}, &appContext{AppID: appID})
+}
+
+func GetAppID(ctx context.Context) (string, bool) {
+ if appCtx, ok := ctx.Value(appContextKey{}).(*appContext); ok {
+ return appCtx.AppID, true
+ }
+ return "", false
+}
+
+func NewManager(apps *apps.Manager, cluster *sqldb.ClusterManager, ns *namespace.Manager, traces trace2.Store, runMgr *run.Manager, baseURL string) *Manager {
+ // Create hooks for handling session registration
+ hooks := &server.Hooks{}
+
+ // Create a new MCP server
+ s := server.NewMCPServer(
+ "Encore MCP Server",
+ "1.0.0",
+ server.WithToolCapabilities(false),
+ server.WithHooks(hooks),
+ )
+
+ m := &Manager{
+ server: s,
+ sse: server.NewSSEServer(s,
+ server.WithAppendQueryToMessageEndpoint(),
+ server.WithKeepAlive(true),
+ server.WithHTTPContextFunc(addAppToContext)),
+ apps: apps,
+ ns: ns,
+ cluster: cluster,
+ traces: traces,
+ run: runMgr,
+ BaseURL: baseURL,
+ }
+
+ m.registerDatabaseTools()
+ m.registerTraceTools()
+ m.registerAPITools()
+ m.registerPubSubTools()
+ m.registerSrcTools()
+ m.registerBucketTools()
+ m.registerCacheTools()
+ m.registerMetricsTools()
+ m.registerCronTools()
+ m.registerSecretTools()
+ m.registerDocsTools()
+
+ m.registerTraceResources()
+ return m
+}
+
+func addAppToContext(ctx context.Context, r *http.Request) context.Context {
+ if appID := r.URL.Query().Get("app"); appID != "" {
+ return WithAppID(ctx, appID)
+ }
+ return ctx
+}
+
+func (m *Manager) Serve(listener net.Listener) error {
+ return http.Serve(listener, m.sse)
+}
+
+func (m *Manager) getApp(ctx context.Context) (*apps.Instance, error) {
+ appID, ok := GetAppID(ctx)
+ if !ok {
+ return nil, fmt.Errorf("app not found in context")
+ }
+ inst, err := m.apps.FindLatestByPlatformOrLocalID(appID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find app: %w", err)
+ }
+ return inst, nil
+}
diff --git a/cli/daemon/mcp/metrics_tools.go b/cli/daemon/mcp/metrics_tools.go
new file mode 100644
index 0000000000..582be10f94
--- /dev/null
+++ b/cli/daemon/mcp/metrics_tools.go
@@ -0,0 +1,125 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ "github.com/mark3labs/mcp-go/mcp"
+)
+
+func (m *Manager) registerMetricsTools() {
+ m.server.AddTool(mcp.NewTool("get_metrics",
+ mcp.WithDescription("Retrieve comprehensive information about all metrics defined in the currently open Encore, including their types, labels, documentation, and usage across services. This tool helps understand the application's observability and monitoring capabilities."),
+ ), m.getMetrics)
+}
+
+func (m *Manager) getMetrics(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Group metrics by service for better organization
+ metricsByService := make(map[string][]map[string]interface{})
+ globalMetrics := make([]map[string]interface{}, 0)
+
+ // Process all metrics
+ for _, metric := range md.Metrics {
+ metricInfo := map[string]interface{}{
+ "name": metric.Name,
+ "kind": metric.Kind.String(),
+ "value_type": metric.ValueType.String(),
+ "doc": metric.Doc,
+ }
+
+ // Add labels if any
+ if len(metric.Labels) > 0 {
+ labels := make([]map[string]interface{}, 0, len(metric.Labels))
+ for _, label := range metric.Labels {
+ labelInfo := map[string]interface{}{
+ "key": label.Key,
+ "type": label.Type.String(),
+ "doc": label.Doc,
+ }
+ labels = append(labels, labelInfo)
+ }
+ metricInfo["labels"] = labels
+ }
+
+ // Add to appropriate group (service-specific or global)
+ if metric.ServiceName != nil {
+ serviceName := *metric.ServiceName
+ if _, exists := metricsByService[serviceName]; !exists {
+ metricsByService[serviceName] = make([]map[string]interface{}, 0)
+ }
+ metricsByService[serviceName] = append(metricsByService[serviceName], metricInfo)
+ } else {
+ globalMetrics = append(globalMetrics, metricInfo)
+ }
+ }
+
+ // Build the final result
+ result := map[string]interface{}{
+ "services": make(map[string]interface{}),
+ "global": globalMetrics,
+ }
+
+ // Add each service's metrics
+ servicesMap := result["services"].(map[string]interface{})
+ for serviceName, metrics := range metricsByService {
+ // Sort metrics by name within each service
+ sort.Slice(metrics, func(i, j int) bool {
+ return metrics[i]["name"].(string) < metrics[j]["name"].(string)
+ })
+ servicesMap[serviceName] = metrics
+ }
+
+ // Also sort global metrics
+ sort.Slice(globalMetrics, func(i, j int) bool {
+ return globalMetrics[i]["name"].(string) < globalMetrics[j]["name"].(string)
+ })
+
+ // Add summary counts
+ summary := map[string]interface{}{
+ "total_metrics": len(md.Metrics),
+ "global_metrics": len(globalMetrics),
+ "service_count": len(metricsByService),
+ "metrics_by_service": make(map[string]int),
+ "metrics_by_kind": make(map[string]int),
+ "metrics_by_type": make(map[string]int),
+ }
+
+ // Count metrics by service
+ for service, metrics := range metricsByService {
+ summary["metrics_by_service"].(map[string]int)[service] = len(metrics)
+ }
+
+ // Count metrics by kind and type
+ kindCounts := make(map[string]int)
+ typeCounts := make(map[string]int)
+ for _, metric := range md.Metrics {
+ kindStr := metric.Kind.String()
+ kindCounts[kindStr] = kindCounts[kindStr] + 1
+
+ typeStr := metric.ValueType.String()
+ typeCounts[typeStr] = typeCounts[typeStr] + 1
+ }
+ summary["metrics_by_kind"] = kindCounts
+ summary["metrics_by_type"] = typeCounts
+
+ result["summary"] = summary
+
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal metrics information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/pubsub_tools.go b/cli/daemon/mcp/pubsub_tools.go
new file mode 100644
index 0000000000..dcfb5e2cd4
--- /dev/null
+++ b/cli/daemon/mcp/pubsub_tools.go
@@ -0,0 +1,170 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/mark3labs/mcp-go/mcp"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+func (m *Manager) registerPubSubTools() {
+ m.server.AddTool(mcp.NewTool("get_pubsub",
+ mcp.WithDescription("Retrieve detailed information about all PubSub topics and their subscriptions in the currently open Encore. This includes topic configurations, subscription patterns, message schemas, and the services that publish to or subscribe to each topic."),
+ ), m.getPubSub)
+}
+
+func (m *Manager) getPubSub(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Create a map to find topic and subscription definitions from trace nodes
+ topicDefLocations := make(map[string]map[string]interface{})
+ subscriptionDefLocations := make(map[string]map[string]map[string]interface{})
+
+ // Scan through all packages to find trace nodes related to pubsub
+ for _, pkg := range md.Pkgs {
+ for _, node := range pkg.TraceNodes {
+ // Check for topic definition nodes
+ if node.GetPubsubTopicDef() != nil {
+ topicDef := node.GetPubsubTopicDef()
+ if _, exists := topicDefLocations[topicDef.TopicName]; !exists {
+ topicDefLocations[topicDef.TopicName] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ }
+ }
+ }
+
+ // Check for subscription definition nodes
+ if node.GetPubsubSubscriber() != nil {
+ subDef := node.GetPubsubSubscriber()
+ if _, exists := subscriptionDefLocations[subDef.TopicName]; !exists {
+ subscriptionDefLocations[subDef.TopicName] = make(map[string]map[string]interface{})
+ }
+
+ if _, exists := subscriptionDefLocations[subDef.TopicName][subDef.SubscriberName]; !exists {
+ subscriptionDefLocations[subDef.TopicName][subDef.SubscriberName] = map[string]interface{}{
+ "filepath": node.Filepath,
+ "line_start": node.SrcLineStart,
+ "line_end": node.SrcLineEnd,
+ "column_start": node.SrcColStart,
+ "column_end": node.SrcColEnd,
+ }
+ }
+ }
+ }
+ }
+
+ // Now build the response with locations
+ topics := make([]map[string]interface{}, 0)
+ for _, topic := range md.PubsubTopics {
+ // Extract publishers
+ publishers := make([]map[string]interface{}, 0)
+ for _, publisher := range topic.Publishers {
+ publishers = append(publishers, map[string]interface{}{
+ "service_name": publisher.ServiceName,
+ })
+ }
+
+ // Extract subscriptions
+ subscriptions := make([]map[string]interface{}, 0)
+ for _, subscription := range topic.Subscriptions {
+ subscriptionInfo := map[string]interface{}{
+ "name": subscription.Name,
+ "service_name": subscription.ServiceName,
+ }
+
+ // Add location information for subscription if available
+ if subLocations, topicExists := subscriptionDefLocations[topic.Name]; topicExists {
+ if subLocation, subExists := subLocations[subscription.Name]; subExists {
+ subscriptionInfo["definition"] = subLocation
+ }
+ }
+
+ // Add optional fields if they're set
+ if subscription.AckDeadline > 0 {
+ subscriptionInfo["ack_deadline"] = formatDuration(subscription.AckDeadline)
+ }
+ if subscription.MessageRetention > 0 {
+ subscriptionInfo["message_retention"] = formatDuration(subscription.MessageRetention)
+ }
+ if subscription.MaxConcurrency != nil {
+ subscriptionInfo["max_concurrency"] = *subscription.MaxConcurrency
+ }
+
+ // Add retry policy if available
+ if subscription.RetryPolicy != nil {
+ retryPolicy := map[string]interface{}{}
+ if subscription.RetryPolicy.MinBackoff > 0 {
+ retryPolicy["min_backoff"] = formatDuration(subscription.RetryPolicy.MinBackoff)
+ }
+ if subscription.RetryPolicy.MaxBackoff > 0 {
+ retryPolicy["max_backoff"] = formatDuration(subscription.RetryPolicy.MaxBackoff)
+ }
+ if subscription.RetryPolicy.MaxRetries > 0 {
+ retryPolicy["max_retries"] = subscription.RetryPolicy.MaxRetries
+ }
+ subscriptionInfo["retry_policy"] = retryPolicy
+ }
+
+ subscriptions = append(subscriptions, subscriptionInfo)
+ }
+
+ // Build topic info
+ topicInfo := map[string]interface{}{
+ "name": topic.Name,
+ "publishers": publishers,
+ "subscriptions": subscriptions,
+ "delivery_guarantee": topic.DeliveryGuarantee.String(),
+ }
+
+ // Add location information for topic if available
+ if location, exists := topicDefLocations[topic.Name]; exists {
+ topicInfo["definition"] = location
+ }
+
+ // Add documentation if available
+ if topic.Doc != nil {
+ topicInfo["doc"] = *topic.Doc
+ }
+
+ // Add ordering key if available
+ if topic.OrderingKey != "" {
+ topicInfo["ordering_key"] = topic.OrderingKey
+ }
+
+ // Add message type if available
+ if topic.MessageType != nil {
+ messageTypeData, err := protojson.Marshal(topic.MessageType)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal message type: %w", err)
+ }
+ var messageTypeJson interface{}
+ if err := json.Unmarshal(messageTypeData, &messageTypeJson); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal message type JSON: %w", err)
+ }
+ topicInfo["message_type"] = messageTypeJson
+ }
+
+ topics = append(topics, topicInfo)
+ }
+
+ jsonData, err := json.Marshal(topics)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal PubSub information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/schema_json.go b/cli/daemon/mcp/schema_json.go
new file mode 100644
index 0000000000..16df30a006
--- /dev/null
+++ b/cli/daemon/mcp/schema_json.go
@@ -0,0 +1,391 @@
+package mcp
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/url"
+ "strconv"
+ "strings"
+
+ schema "encr.dev/proto/encore/parser/schema/v1"
+)
+
+// FieldLocation represents where a field is located in the API request/response
+type FieldLocation int
+
+const (
+ FieldLocationBody FieldLocation = 0
+ FieldLocationQuery FieldLocation = 1
+ FieldLocationHeader FieldLocation = 2
+ FieldLocationCookie FieldLocation = 3
+ FieldLocationUnused FieldLocation = 4
+)
+
+// DescribedField is a field with additional metadata
+type DescribedField struct {
+ *schema.Field
+ SrcName string
+ Name string
+ Location FieldLocation
+}
+
+// StructBits generates JSON representations of a struct's fields separated by location
+// It returns query, headers, cookies, and JSON body as strings
+func StructBits(s *schema.Struct, method string, asResponse bool, asGoStruct bool, queryParamsAsObject bool) (query, headers, cookies, jsonBody string) {
+ // Split the fields by location
+ fieldsByLocation := splitFieldsByLocation(s, method, asResponse)
+
+ // Generate query string
+ if len(fieldsByLocation[FieldLocationQuery]) > 0 {
+ if asGoStruct || queryParamsAsObject {
+ query = writeFieldsAsJSON(fieldsByLocation[FieldLocationQuery], asGoStruct)
+ } else {
+ var queryParams []string
+ for _, field := range fieldsByLocation[FieldLocationQuery] {
+ fieldName := field.Name
+ fieldValue := renderFieldValueAsQueryParam(field.Typ)
+
+ queryParams = append(queryParams, url.QueryEscape(fieldName)+"="+fieldValue)
+
+ // If it's a list, add a second parameter to show it's a list
+ if field.Typ.GetList() != nil {
+ queryParams = append(queryParams, url.QueryEscape(fieldName)+"="+fieldValue)
+ }
+ }
+ query = "?" + strings.Join(queryParams, "&")
+ }
+ }
+
+ // Generate headers
+ if len(fieldsByLocation[FieldLocationHeader]) > 0 {
+ headers = writeFieldsAsJSON(fieldsByLocation[FieldLocationHeader], asGoStruct)
+ }
+
+ // Generate cookies
+ if len(fieldsByLocation[FieldLocationCookie]) > 0 {
+ cookies = writeCookiesAsJSON(fieldsByLocation[FieldLocationCookie], asGoStruct)
+ }
+
+ // Generate JSON body
+ if len(fieldsByLocation[FieldLocationBody]) > 0 {
+ jsonBody = writeFieldsAsJSON(fieldsByLocation[FieldLocationBody], asGoStruct)
+ }
+
+ return
+}
+
+// writeFieldsAsJSON renders a list of fields as a JSON object
+func writeFieldsAsJSON(fields []DescribedField, asGoStruct bool) string {
+ var buf bytes.Buffer
+ buf.WriteString("\n")
+
+ for i, f := range fields {
+ fieldName := f.SrcName
+ if !asGoStruct {
+ fieldName = f.Name
+ }
+
+ buf.WriteString(" \"")
+ buf.WriteString(fieldName)
+ buf.WriteString("\": ")
+
+ renderTypeValue(&buf, f.Typ)
+
+ if i < len(fields)-1 {
+ buf.WriteString(",")
+ }
+ buf.WriteString("\n")
+ }
+
+ return buf.String()
+}
+
+// writeCookiesAsJSON renders cookie fields as JSON
+func writeCookiesAsJSON(fields []DescribedField, asGoStruct bool) string {
+ var buf bytes.Buffer
+ buf.WriteString("\n")
+
+ for i, f := range fields {
+ fieldName := f.SrcName
+ if !asGoStruct {
+ fieldName = f.Name
+ }
+
+ buf.WriteString(" \"")
+ buf.WriteString(fieldName)
+ buf.WriteString("\": ")
+
+ // If it's a builtin, render it normally, otherwise render as an empty string
+ if f.Typ.GetBuiltin() != schema.Builtin_ANY {
+ renderTypeValue(&buf, f.Typ)
+ } else {
+ buf.WriteString("\"\"")
+ }
+
+ if i < len(fields)-1 {
+ buf.WriteString(",")
+ }
+ buf.WriteString("\n")
+ }
+
+ return buf.String()
+}
+
+// renderTypeValue renders a type value to the buffer
+func renderTypeValue(buf *bytes.Buffer, typ *schema.Type) {
+ switch {
+ case typ.GetBuiltin() != schema.Builtin_ANY:
+ renderBuiltinValue(buf, typ.GetBuiltin(), false)
+ case typ.GetList() != nil:
+ buf.WriteString("[")
+ renderTypeValue(buf, typ.GetList().Elem)
+ buf.WriteString("]")
+ case typ.GetStruct() != nil:
+ buf.WriteString("{")
+ for i, f := range typ.GetStruct().Fields {
+ if f.JsonName == "-" {
+ continue
+ }
+
+ jsonName := f.JsonName
+ if jsonName == "" {
+ jsonName = f.Name
+ }
+
+ buf.WriteString("\"")
+ buf.WriteString(jsonName)
+ buf.WriteString("\": ")
+
+ renderTypeValue(buf, f.Typ)
+
+ if i < len(typ.GetStruct().Fields)-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteString("}")
+ case typ.GetMap() != nil:
+ buf.WriteString("{")
+ renderTypeValue(buf, typ.GetMap().Key)
+ buf.WriteString(": ")
+ renderTypeValue(buf, typ.GetMap().Value)
+ buf.WriteString("}")
+ case typ.GetNamed() != nil:
+ // Just render as null for simplicity
+ buf.WriteString("null")
+ case typ.GetPointer() != nil:
+ renderTypeValue(buf, typ.GetPointer().Base)
+ case typ.GetUnion() != nil && len(typ.GetUnion().Types) > 0:
+ // Just render the first type of the union
+ renderTypeValue(buf, typ.GetUnion().Types[0])
+ case typ.GetLiteral() != nil:
+ renderLiteralValue(buf, typ.GetLiteral())
+ default:
+ buf.WriteString("")
+ }
+}
+
+// renderBuiltinValue renders a builtin type value
+func renderBuiltinValue(buf *bytes.Buffer, b schema.Builtin, urlEncode bool) {
+ var value string
+
+ switch b {
+ case schema.Builtin_ANY:
+ value = ""
+ case schema.Builtin_BOOL:
+ value = "false"
+ case schema.Builtin_INT, schema.Builtin_INT8, schema.Builtin_INT16, schema.Builtin_INT32, schema.Builtin_INT64,
+ schema.Builtin_UINT, schema.Builtin_UINT8, schema.Builtin_UINT16, schema.Builtin_UINT32, schema.Builtin_UINT64:
+ value = "0"
+ case schema.Builtin_FLOAT32, schema.Builtin_FLOAT64:
+ value = "0.0"
+ case schema.Builtin_STRING:
+ value = "\"\""
+ case schema.Builtin_BYTES:
+ value = "\"\" /* base64 */"
+ case schema.Builtin_TIME:
+ value = "\"2009-11-10T23:00:00Z\""
+ case schema.Builtin_UUID:
+ value = "\"7d42f515-3517-4e76-be13-30880443546f\""
+ case schema.Builtin_JSON:
+ value = "{}"
+ case schema.Builtin_USER_ID:
+ value = "\"userID\""
+ case schema.Builtin_DECIMAL:
+ value = "\"0.0\""
+ default:
+ value = ""
+ }
+
+ if urlEncode {
+ // Remove quotes for URL encoding if they exist
+ if len(value) >= 2 && value[0] == '"' && value[len(value)-1] == '"' {
+ value = value[1 : len(value)-1]
+ }
+ buf.WriteString(url.QueryEscape(value))
+ } else {
+ buf.WriteString(value)
+ }
+}
+
+// renderLiteralValue renders a literal value
+func renderLiteralValue(buf *bytes.Buffer, lit *schema.Literal) {
+ switch v := lit.Value.(type) {
+ case *schema.Literal_Boolean:
+ if v.Boolean {
+ buf.WriteString("true")
+ } else {
+ buf.WriteString("false")
+ }
+ case *schema.Literal_Int:
+ buf.WriteString(strconv.FormatInt(v.Int, 10))
+ case *schema.Literal_Float:
+ buf.WriteString(strconv.FormatFloat(v.Float, 'f', -1, 64))
+ case *schema.Literal_Str:
+ jsonStr, _ := json.Marshal(v.Str)
+ buf.Write(jsonStr)
+ case *schema.Literal_Null:
+ buf.WriteString("null")
+ default:
+ buf.WriteString("")
+ }
+}
+
+// renderFieldValueAsQueryParam returns a URL-encoded string representation of a field's value
+func renderFieldValueAsQueryParam(typ *schema.Type) string {
+ var buf bytes.Buffer
+
+ if typ.GetBuiltin() != schema.Builtin_ANY {
+ renderBuiltinValue(&buf, typ.GetBuiltin(), true)
+ } else if typ.GetList() != nil {
+ renderTypeValue(&buf, typ.GetList().Elem)
+ } else {
+ buf.WriteString("")
+ }
+
+ return buf.String()
+}
+
+// splitFieldsByLocation categorizes struct fields by their HTTP location
+func splitFieldsByLocation(s *schema.Struct, method string, asResponse bool) map[FieldLocation][]DescribedField {
+ result := make(map[FieldLocation][]DescribedField)
+
+ for _, f := range s.Fields {
+ name, location := fieldNameAndLocation(f, method, asResponse)
+
+ // Skip unused fields
+ if location == FieldLocationUnused {
+ continue
+ }
+
+ result[location] = append(result[location], DescribedField{
+ Field: f,
+ SrcName: f.Name,
+ Name: name,
+ Location: location,
+ })
+ }
+
+ return result
+}
+
+// fieldNameAndLocation determines the name and location of a field based on HTTP method and tags
+func fieldNameAndLocation(f *schema.Field, method string, asResponse bool) (string, FieldLocation) {
+ // For response, all fields go in the body unless explicitly tagged
+ if asResponse {
+ // Check for explicit wire location
+ if f.Wire != nil {
+ if f.Wire.GetHeader() != nil {
+ name := f.Wire.GetHeader().GetName()
+ if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationHeader
+ } else if f.Wire.GetQuery() != nil {
+ name := f.Wire.GetQuery().GetName()
+ if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationQuery
+ }
+ }
+
+ // Default response location is body
+ jsonName := f.JsonName
+ if jsonName == "" {
+ jsonName = f.Name
+ }
+ return jsonName, FieldLocationBody
+ }
+
+ // For request, location depends on method and tags
+ isGetLike := method == "GET" || method == "HEAD" || method == "DELETE"
+
+ // Check for explicit wire location
+ if f.Wire != nil {
+ if f.Wire.GetHeader() != nil {
+ name := f.Wire.GetHeader().GetName()
+ if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationHeader
+ } else if f.Wire.GetQuery() != nil {
+ name := f.Wire.GetQuery().GetName()
+ if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationQuery
+ }
+ }
+
+ // Check for Cookie
+ for _, tag := range f.Tags {
+ if tag.Key == "cookie" {
+ name := tag.Name
+ if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationCookie
+ }
+ }
+
+ // For GET-like methods, fields go in query by default
+ if isGetLike {
+ name := f.QueryStringName
+ if name == "-" {
+ return f.Name, FieldLocationUnused
+ } else if name == "" {
+ name = f.Name
+ }
+ return name, FieldLocationQuery
+ }
+
+ // Default request location for POST/PUT/PATCH is body
+ jsonName := f.JsonName
+ if jsonName == "-" {
+ return f.Name, FieldLocationUnused
+ } else if jsonName == "" {
+ jsonName = f.Name
+ }
+ return jsonName, FieldLocationBody
+}
+
+// NamedOrInlineStruct returns the struct type and type arguments for a named or inline struct.
+// Returns nil if the type is neither a named struct nor an inline struct.
+func NamedOrInlineStruct(meta map[uint32]*schema.Decl, t *schema.Type) (*schema.Struct, []*schema.Type) {
+ if t == nil {
+ return nil, nil
+ }
+
+ if named := t.GetNamed(); named != nil {
+ st := meta[named.Id]
+ if st != nil && st.GetType() != nil {
+ if structType := st.GetType().GetStruct(); structType != nil {
+ return structType, named.GetTypeArguments()
+ }
+ }
+ } else if structType := t.GetStruct(); structType != nil {
+ return structType, []*schema.Type{}
+ }
+
+ return nil, nil
+}
diff --git a/cli/daemon/mcp/secret_tools.go b/cli/daemon/mcp/secret_tools.go
new file mode 100644
index 0000000000..35054c7239
--- /dev/null
+++ b/cli/daemon/mcp/secret_tools.go
@@ -0,0 +1,86 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ "github.com/mark3labs/mcp-go/mcp"
+)
+
+func (m *Manager) registerSecretTools() {
+ m.server.AddTool(mcp.NewTool("get_secrets",
+ mcp.WithDescription("Retrieve metadata about all secrets used in the currently open Encore, including their usage patterns, which services depend on them, and their configuration. This tool helps understand the application's security requirements and secret management strategy."),
+ ), m.getSecrets)
+}
+
+func (m *Manager) getSecrets(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+
+ // Build a map of all secrets and the services that use them
+ secretUsageMap := make(map[string][]map[string]interface{})
+
+ // First go through all packages to find secrets
+ for _, pkg := range md.Pkgs {
+ if len(pkg.Secrets) > 0 && pkg.ServiceName != "" {
+ // For each secret in this package
+ for _, secretName := range pkg.Secrets {
+ // Create usage info
+ usageInfo := map[string]interface{}{
+ "service_name": pkg.ServiceName,
+ "package_path": pkg.RelPath,
+ }
+
+ // Add to the map
+ if _, exists := secretUsageMap[secretName]; !exists {
+ secretUsageMap[secretName] = make([]map[string]interface{}, 0)
+ }
+ secretUsageMap[secretName] = append(secretUsageMap[secretName], usageInfo)
+ }
+ }
+ }
+
+ // Build the result
+ secrets := make([]map[string]interface{}, 0)
+
+ // Convert the map to an array
+ for secretName, usages := range secretUsageMap {
+ secretInfo := map[string]interface{}{
+ "name": secretName,
+ "usages": usages,
+ }
+
+ // Count unique services
+ serviceSet := make(map[string]bool)
+ for _, usage := range usages {
+ if svcName, ok := usage["service_name"].(string); ok {
+ serviceSet[svcName] = true
+ }
+ }
+
+ secretInfo["service_count"] = len(serviceSet)
+
+ secrets = append(secrets, secretInfo)
+ }
+
+ // Sort by name for consistent output
+ sort.Slice(secrets, func(i, j int) bool {
+ return secrets[i]["name"].(string) < secrets[j]["name"].(string)
+ })
+
+ jsonData, err := json.Marshal(secrets)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal secrets information: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/src_tools.go b/cli/daemon/mcp/src_tools.go
new file mode 100644
index 0000000000..b8460f6d33
--- /dev/null
+++ b/cli/daemon/mcp/src_tools.go
@@ -0,0 +1,73 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/mark3labs/mcp-go/mcp"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+func (m *Manager) registerSrcTools() {
+ // Add tool handlers
+ m.server.AddTool(mcp.NewTool("get_metadata",
+ mcp.WithDescription("Retrieve the complete application metadata, including service definitions, database schemas, API endpoints, and other infrastructure components. This tool provides a comprehensive view of the application's architecture and configuration."),
+ ), m.getMetadata)
+
+ // Add tool handlers
+ m.server.AddTool(mcp.NewTool("get_src_files",
+ mcp.WithDescription("Retrieve the contents of one or more source files from the application. This tool is useful for examining specific parts of the codebase or understanding implementation details."),
+ mcp.WithArray("files", mcp.Items(map[string]any{
+ "type": "string",
+ "description": "List of file paths to retrieve, relative to the application root. Each path should point to a valid source file in the project.",
+ })),
+ ), m.getSrcFiles)
+
+}
+
+func (m *Manager) getMetadata(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+ md, err := inst.CachedMetadata()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metadata: %w", err)
+ }
+ data, err := protojson.Marshal(md)
+
+ return mcp.NewToolResultText(string(data)), nil
+}
+
+func (m *Manager) getSrcFiles(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ files, ok := request.Params.Arguments["files"].([]any)
+ if !ok || len(files) == 0 {
+ return nil, fmt.Errorf("no files provided")
+ }
+
+ rtn := map[string]string{}
+ for _, file := range files {
+ fileStr := file.(string)
+ content, err := os.ReadFile(filepath.Join(inst.Root(), fileStr))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read file: %w", err)
+ }
+ rtn[fileStr] = string(content)
+ }
+
+ jsonData, err := json.Marshal(rtn)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal json: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
diff --git a/cli/daemon/mcp/trace_tools.go b/cli/daemon/mcp/trace_tools.go
new file mode 100644
index 0000000000..745ab5030c
--- /dev/null
+++ b/cli/daemon/mcp/trace_tools.go
@@ -0,0 +1,192 @@
+package mcp
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/mark3labs/mcp-go/mcp"
+
+ "encr.dev/cli/daemon/engine/trace2"
+ tracepb2 "encr.dev/proto/encore/engine/trace2"
+)
+
+func (m *Manager) registerTraceResources() {
+ // Register the trace resources
+ m.server.AddResourceTemplate(mcp.NewResourceTemplate(
+ "trace://{id}",
+ "API trace",
+ mcp.WithTemplateDescription("Retrieve detailed information about a specific trace, including all spans, timing information, and associated metadata. This resource is useful for deep debugging of individual requests."),
+ mcp.WithTemplateMIMEType("application/json"),
+ ), m.getTraceResource)
+}
+
+func (m *Manager) registerTraceTools() {
+ // Add tool for listing traces
+ m.server.AddTool(mcp.NewTool("get_traces",
+ mcp.WithDescription("Retrieve a list of request traces from the application, including their timing, status, and associated metadata. This tool helps understand the flow of requests through the system and diagnose issues."),
+ mcp.WithString("service", mcp.Description("Optional service name to filter traces by. Only returns traces that involve the specified service.")),
+ mcp.WithString("endpoint", mcp.Description("Optional endpoint name to filter traces by. Only returns traces that involve the specified endpoint.")),
+ mcp.WithString("error", mcp.Description("Optional filter for traces with errors. Set to 'true' to see only failed traces, 'false' for successful traces, or omit to see all traces.")),
+ mcp.WithString("limit", mcp.Description("Maximum number of traces to return. Helps manage response size when dealing with many traces.")),
+ mcp.WithString("start_time", mcp.Description("ISO format timestamp to filter traces created after this time. Useful for focusing on recent activity.")),
+ mcp.WithString("end_time", mcp.Description("ISO format timestamp to filter traces created before this time. Useful for focusing on a specific time period.")),
+ ), m.listTraces)
+
+ // Add tool for getting a single trace with all spans
+ m.server.AddTool(mcp.NewTool("get_trace_spans",
+ mcp.WithDescription("Retrieve detailed information about one or more traces, including all spans, timing information, and associated metadata. This tool is useful for deep debugging of individual requests."),
+ mcp.WithArray("trace_ids",
+ mcp.Items(map[string]any{
+ "type": "string",
+ "description": "The unique identifiers of the traces to retrieve. These IDs are returned by the get_traces tool.",
+ })),
+ ), m.getTrace)
+}
+
+func (m *Manager) listTraces(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ // Build trace query
+ query := &trace2.Query{
+ AppID: inst.PlatformOrLocalID(),
+ Limit: 100, // Default limit
+ }
+
+ if service, ok := request.Params.Arguments["service"].(string); ok && service != "" {
+ query.Service = service
+ }
+ if endpoint, ok := request.Params.Arguments["endpoint"].(string); ok && endpoint != "" {
+ query.Endpoint = endpoint
+ }
+ if errorStr, ok := request.Params.Arguments["error"].(string); ok && errorStr != "" {
+ if errorStr == "true" {
+ isError := true
+ query.IsError = &isError
+ } else if errorStr == "false" {
+ isError := false
+ query.IsError = &isError
+ }
+ }
+ if limitStr, ok := request.Params.Arguments["limit"].(string); ok && limitStr != "" {
+ var limit int
+ if _, err := fmt.Sscanf(limitStr, "%d", &limit); err == nil && limit > 0 {
+ query.Limit = limit
+ }
+ }
+ if startTime, ok := request.Params.Arguments["start_time"].(string); ok && startTime != "" {
+ if t, err := time.Parse(time.RFC3339, startTime); err == nil {
+ query.StartTime = t
+ }
+ }
+ if endTime, ok := request.Params.Arguments["end_time"].(string); ok && endTime != "" {
+ if t, err := time.Parse(time.RFC3339, endTime); err == nil {
+ query.EndTime = t
+ }
+ }
+
+ // Collect traces
+ var traces []*tracepb2.SpanSummary
+ err = m.traces.List(ctx, query, func(span *tracepb2.SpanSummary) bool {
+ traces = append(traces, span)
+ return true
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to list traces: %w", err)
+ }
+
+ // Convert to JSON
+ jsonData, err := json.Marshal(traces)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal traces: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getTrace(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ traceIDs, ok := request.Params.Arguments["trace_ids"].([]interface{})
+ if !ok || len(traceIDs) == 0 {
+ return nil, fmt.Errorf("trace_ids is required and must be a non-empty array")
+ }
+
+ result := make(map[string][]*tracepb2.TraceEvent)
+
+ for _, traceIDVal := range traceIDs {
+ traceID, ok := traceIDVal.(string)
+ if !ok || traceID == "" {
+ continue // Skip invalid IDs
+ }
+
+ // Collect all events for the trace
+ var events []*tracepb2.TraceEvent
+ err = m.traces.Get(ctx, inst.PlatformOrLocalID(), traceID, func(event *tracepb2.TraceEvent) bool {
+ events = append(events, event)
+ return true
+ })
+ if err != nil {
+ if errors.Is(err, trace2.ErrNotFound) {
+ // Just skip not found traces
+ continue
+ }
+ return nil, fmt.Errorf("failed to get trace %s: %w", traceID, err)
+ }
+
+ result[traceID] = events
+ }
+
+ // Convert to JSON
+ jsonData, err := json.Marshal(result)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal traces: %w", err)
+ }
+
+ return mcp.NewToolResultText(string(jsonData)), nil
+}
+
+func (m *Manager) getTraceResource(ctx context.Context, request mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) {
+ inst, err := m.getApp(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get app: %w", err)
+ }
+
+ traceID := strings.TrimPrefix(request.Params.URI, "trace://")
+
+ // Collect all events for the trace
+ var events []*tracepb2.TraceEvent
+ err = m.traces.Get(ctx, inst.PlatformOrLocalID(), traceID, func(event *tracepb2.TraceEvent) bool {
+ events = append(events, event)
+ return true
+ })
+ if err != nil {
+ if errors.Is(err, trace2.ErrNotFound) {
+ return nil, fmt.Errorf("trace %s not found", traceID)
+ }
+ return nil, fmt.Errorf("failed to get trace %s: %w", traceID, err)
+ }
+
+ // Convert to JSON
+ jsonData, err := json.Marshal(events)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal events: %w", err)
+ }
+
+ return []mcp.ResourceContents{
+ mcp.TextResourceContents{
+ URI: request.Params.URI,
+ MIMEType: "application/json",
+ Text: string(jsonData),
+ },
+ }, nil
+}
diff --git a/cli/daemon/mcp/util.go b/cli/daemon/mcp/util.go
new file mode 100644
index 0000000000..cd844d5218
--- /dev/null
+++ b/cli/daemon/mcp/util.go
@@ -0,0 +1,23 @@
+package mcp
+
+import (
+ "time"
+
+ metav1 "encr.dev/proto/encore/parser/meta/v1"
+)
+
+// findServiceNameForPackage returns the service name for a given package path
+func findServiceNameForPackage(md *metav1.Data, pkgPath string) string {
+ for _, pkg := range md.Pkgs {
+ if pkg.RelPath == pkgPath && pkg.ServiceName != "" {
+ return pkg.ServiceName
+ }
+ }
+ return ""
+}
+
+// formatDuration formats a nanosecond duration into a human-readable string
+func formatDuration(nanos int64) string {
+ duration := time.Duration(nanos) * time.Nanosecond
+ return duration.String()
+}
diff --git a/cli/daemon/namespace.go b/cli/daemon/namespace.go
new file mode 100644
index 0000000000..042467f086
--- /dev/null
+++ b/cli/daemon/namespace.go
@@ -0,0 +1,75 @@
+package daemon
+
+import (
+ "context"
+
+ "github.com/golang/protobuf/ptypes/empty"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/pkg/fns"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+func (s *Server) CreateNamespace(ctx context.Context, req *daemonpb.CreateNamespaceRequest) (*daemonpb.Namespace, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, err
+ }
+ ns, err := s.ns.Create(ctx, app, namespace.Name(req.Name))
+ if err != nil {
+ return nil, err
+ }
+ return ns.ToProto(), nil
+}
+
+func (s *Server) ListNamespaces(ctx context.Context, req *daemonpb.ListNamespacesRequest) (*daemonpb.ListNamespacesResponse, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, err
+ }
+ nss, err := s.ns.List(ctx, app)
+ if err != nil {
+ return nil, err
+ }
+ protos := fns.Map(nss, (*namespace.Namespace).ToProto)
+ return &daemonpb.ListNamespacesResponse{Namespaces: protos}, nil
+}
+
+func (s *Server) DeleteNamespace(ctx context.Context, req *daemonpb.DeleteNamespaceRequest) (*empty.Empty, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.ns.Delete(ctx, app, namespace.Name(req.Name)); err != nil {
+ return nil, err
+ }
+ return &empty.Empty{}, nil
+}
+
+func (s *Server) SwitchNamespace(ctx context.Context, req *daemonpb.SwitchNamespaceRequest) (*daemonpb.Namespace, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ if req.Create {
+ _, err := s.ns.Create(ctx, app, namespace.Name(req.Name))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ ns, err := s.ns.Switch(ctx, app, namespace.Name(req.Name))
+ if err != nil {
+ return nil, err
+ }
+ return ns.ToProto(), nil
+}
+
+func (s *Server) namespaceOrActive(ctx context.Context, app *apps.Instance, ns *string) (*namespace.Namespace, error) {
+ if ns == nil {
+ return s.ns.GetActive(ctx, app)
+ }
+ return s.ns.GetByName(ctx, app, namespace.Name(*ns))
+}
diff --git a/cli/daemon/namespace/namespace.go b/cli/daemon/namespace/namespace.go
new file mode 100644
index 0000000000..d7ca1f1c2f
--- /dev/null
+++ b/cli/daemon/namespace/namespace.go
@@ -0,0 +1,339 @@
+package namespace
+
+import (
+ "context"
+ "database/sql"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/rs/xid"
+
+ "encr.dev/cli/daemon/apps"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+var (
+ ErrNotFound = errors.New("namespace not found")
+ ErrActive = errors.New("namespace is active")
+)
+
+type (
+ ID string
+ Name string
+)
+
+func (id ID) String() string { return string(id) }
+
+func ParseID(s string) (ID, bool) {
+ id, err := xid.FromString(s)
+ if err != nil {
+ return "", false
+ }
+ return ID(id.String()), true
+}
+
+func NewManager(db *sql.DB) *Manager {
+ return &Manager{db, nil}
+}
+
+// Manager manages namespaces.
+type Manager struct {
+ db *sql.DB
+ handlers []DeletionHandler
+}
+
+func (mgr *Manager) RegisterDeletionHandler(h DeletionHandler) {
+ mgr.handlers = append(mgr.handlers, h)
+}
+
+type Namespace struct {
+ ID ID
+ App *apps.Instance
+ Name Name
+ Active bool
+ CreatedAt time.Time
+ LastActiveAt *time.Time
+}
+
+func (m *Manager) Create(ctx context.Context, app *apps.Instance, name Name) (*Namespace, error) {
+ now := time.Now()
+ id := ID(xid.NewWithTime(now).String())
+
+ tx, err := m.db.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback() // committed explicitly on success
+
+ _, err = tx.ExecContext(ctx, `
+ INSERT INTO namespace (id, app_id, name, active, created_at)
+ VALUES (?, ?, ?, ?, ?)
+ `, id, app.PlatformOrLocalID(), name, false, now)
+ if err != nil {
+ return nil, errors.Wrap(err, "create namespace")
+ }
+
+ ns := &Namespace{
+ ID: id,
+ App: app,
+ Name: name,
+ CreatedAt: now,
+ }
+
+ // If there is no active namespace, make this one active.
+ {
+ var activeName string
+ err = tx.QueryRowContext(ctx, `
+ SELECT name FROM namespace WHERE app_id = ? AND active = true
+ `, app.PlatformOrLocalID()).Scan(&activeName)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ // No active namespace; make this one active.
+ _, err = tx.ExecContext(ctx, `
+ UPDATE namespace
+ SET active = true, last_active_at = ?
+ WHERE id = ?
+ `, now, id)
+ }
+ if err != nil {
+ return nil, errors.Wrap(err, "create namespace")
+ }
+ }
+ ns.Active = true
+ ns.LastActiveAt = &now
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, errors.Wrap(err, "create namespace")
+ }
+
+ return ns, nil
+}
+
+func (m *Manager) List(ctx context.Context, app *apps.Instance) ([]*Namespace, error) {
+ rows, err := m.db.QueryContext(ctx, `
+ SELECT id, name, active, created_at, last_active_at
+ FROM namespace
+ WHERE app_id = ?
+ ORDER BY name ASC
+ `, app.PlatformOrLocalID())
+ if err != nil {
+ return nil, errors.Wrap(err, "list namespaces")
+ }
+ defer rows.Close()
+ var nss []*Namespace
+
+ for rows.Next() {
+ var ns Namespace
+ if err := rows.Scan(&ns.ID, &ns.Name, &ns.Active, &ns.CreatedAt, &ns.LastActiveAt); err != nil {
+ return nil, errors.Wrap(err, "scan namespace")
+ }
+ ns.App = app
+ nss = append(nss, &ns)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, errors.Wrap(err, "list namespaces")
+ }
+
+ // If we have no namespaces at all, create a default one.
+ if len(nss) == 0 {
+ ns, err := m.Create(ctx, app, "default")
+ if err != nil {
+ return nil, err
+ }
+ nss = []*Namespace{ns}
+ }
+
+ return nss, nil
+}
+
+func (m *Manager) GetByName(ctx context.Context, app *apps.Instance, name Name) (*Namespace, error) {
+ var ns Namespace
+ err := m.db.QueryRowContext(ctx, `
+ SELECT id, name, active, created_at, last_active_at
+ FROM namespace
+ WHERE app_id = ? AND name = ?
+ `, app.PlatformOrLocalID(), name).Scan(&ns.ID, &ns.Name, &ns.Active, &ns.CreatedAt, &ns.LastActiveAt)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, ErrNotFound
+ }
+ return nil, errors.Wrap(err, "get namespace")
+ }
+ ns.App = app
+ return &ns, nil
+}
+
+func (m *Manager) GetByID(ctx context.Context, app *apps.Instance, id ID) (*Namespace, error) {
+ var ns Namespace
+ err := m.db.QueryRowContext(ctx, `
+ SELECT id, name, active, created_at, last_active_at
+ FROM namespace
+ WHERE app_id = ? AND id = ?
+ `, app.PlatformOrLocalID(), id).Scan(&ns.ID, &ns.Name, &ns.Active, &ns.CreatedAt, &ns.LastActiveAt)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, ErrNotFound
+ }
+ return nil, errors.Wrap(err, "get namespace")
+ }
+ ns.App = app
+ return &ns, nil
+}
+
+func (m *Manager) Delete(ctx context.Context, app *apps.Instance, name Name) error {
+ tx, err := m.db.BeginTx(ctx, nil)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback() // committed explicitly on success
+
+ var ns Namespace
+ err = tx.QueryRowContext(ctx, `
+ DELETE FROM namespace
+ WHERE app_id = ? AND name = ?
+ RETURNING id, name, active, created_at, last_active_at
+ `, app.PlatformOrLocalID(), name).Scan(&ns.ID, &ns.Name, &ns.Active, &ns.CreatedAt, &ns.LastActiveAt)
+ if ns.Active {
+ return ErrActive
+ }
+ ns.App = app
+
+ // Check all the deletion handlers.
+ for _, h := range m.handlers {
+ if err := h.CanDeleteNamespace(ctx, app, &ns); err != nil {
+ return errors.Newf("cannot delete namespace: %v", err)
+ }
+ }
+
+ // Actually delete the namespace.
+ for _, h := range m.handlers {
+ if err := h.DeleteNamespace(ctx, app, &ns); err != nil {
+ return errors.Newf("failed to delete namespace: %v", err)
+ }
+ }
+
+ err = tx.Commit()
+ return errors.Wrap(err, "delete namespace")
+}
+
+func (m *Manager) Switch(ctx context.Context, app *apps.Instance, name Name) (*Namespace, error) {
+ // Resolve the namespace to switch to.
+ var target *Namespace
+
+ // If the name is "-", switch to the previous namespace.
+ if name == "-" {
+ nss, err := m.List(ctx, app)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find the non-active namespace that was most recently active
+ var lastActive *Namespace
+ for _, ns := range nss {
+ if !ns.Active && ns.LastActiveAt != nil {
+ if lastActive == nil || ns.LastActiveAt.After(*lastActive.LastActiveAt) {
+ lastActive = ns
+ }
+ }
+ }
+
+ if lastActive == nil {
+ return nil, ErrNotFound
+ }
+ target = lastActive
+ } else {
+ var err error
+ target, err = m.GetByName(ctx, app, name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ tx, err := m.db.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ defer tx.Rollback() // committed explicitly on success
+
+ // Mark all namespaces as inactive.
+ _, err = tx.ExecContext(ctx, `
+ UPDATE namespace SET active = false
+ WHERE app_id = ?
+ `, app.PlatformOrLocalID())
+ if err != nil {
+ return nil, errors.Wrap(err, "switch namespace")
+ }
+
+ // Mark the selected namespace as active.
+ _, err = tx.ExecContext(ctx, `
+ UPDATE namespace SET active = true, last_active_at = ?
+ WHERE id = ?
+ `, time.Now(), target.ID)
+ if err != nil {
+ return nil, errors.Wrap(err, "switch namespace")
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, errors.Wrap(err, "switch namespace")
+ }
+
+ target.Active = true
+ return target, nil
+}
+
+// GetActive returns the active namespace for the given app.
+func (m *Manager) GetActive(ctx context.Context, app *apps.Instance) (*Namespace, error) {
+ var ns Namespace
+ err := m.db.QueryRowContext(ctx, `
+ SELECT id, name, active, created_at, last_active_at
+ FROM namespace
+ WHERE app_id = ? AND active = true
+ `, app.PlatformOrLocalID()).Scan(&ns.ID, &ns.Name, &ns.Active, &ns.CreatedAt, &ns.LastActiveAt)
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return nil, err
+ } else if err == nil {
+ ns.App = app
+ return &ns, nil
+ }
+
+ // No active namespace.
+
+ // Do we have any namespaces at all?
+ nss, err := m.List(ctx, app)
+ if err != nil {
+ return nil, err
+ } else if len(nss) > 0 {
+ return m.Switch(ctx, app, nss[0].Name)
+ } else {
+ // No namespaces. Create a new one.
+ return m.Create(ctx, app, "default")
+ }
+}
+
+func (ns *Namespace) ToProto() *daemonpb.Namespace {
+ res := &daemonpb.Namespace{
+ Id: string(ns.ID),
+ Name: string(ns.Name),
+ Active: ns.Active,
+ CreatedAt: ns.CreatedAt.String(),
+ }
+ if ns.LastActiveAt != nil {
+ s := ns.LastActiveAt.String()
+ res.LastActiveAt = &s
+ }
+ return res
+}
+
+// DeletionHandler is the interface for components that want to listen for
+// and handle namespace deletion events.
+type DeletionHandler interface {
+ // CanDeleteNamespace is called to determine whether the namespace can be deleted
+ // by the component. To signal the namespace cannot be deleted, return a non-nil error.
+ CanDeleteNamespace(ctx context.Context, app *apps.Instance, ns *Namespace) error
+
+ // DeleteNamespace is called when a namespace is deleted.
+ // Due to the non-atomic nature of many components, failure to handle
+ // the deletion cannot be fully rolled back.
+ DeleteNamespace(ctx context.Context, app *apps.Instance, ns *Namespace) error
+}
diff --git a/cli/daemon/objects/manager.go b/cli/daemon/objects/manager.go
new file mode 100644
index 0000000000..1023687d9a
--- /dev/null
+++ b/cli/daemon/objects/manager.go
@@ -0,0 +1,57 @@
+package objects
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/pkg/emulators/storage/gcsemu"
+)
+
+// NewClusterManager creates a new ClusterManager.
+func NewClusterManager(ns *namespace.Manager) *ClusterManager {
+ mgr := &ClusterManager{
+ ns: ns,
+ }
+ return mgr
+}
+
+type ClusterManager struct {
+ ns *namespace.Manager
+}
+
+func (cm *ClusterManager) BaseDir(ns namespace.ID) (string, error) {
+ cache, err := os.UserCacheDir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(cache, "encore", "objects", ns.String()), nil
+}
+
+// CanDeleteNamespace implements namespace.DeletionHandler.
+func (cm *ClusterManager) CanDeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ return nil
+}
+
+// DeleteNamespace implements namespace.DeletionHandler.
+func (cm *ClusterManager) DeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ baseDir, err := cm.BaseDir(ns.ID)
+ if err == nil {
+ err = os.RemoveAll(baseDir)
+ }
+ return err
+}
+
+// PersistentStoreFallback is a public server fallback handler
+// for resolving stores based on the cluster manager's base directory.
+func (cm *ClusterManager) PersistentStoreFallback(id string) (gcsemu.Store, bool) {
+ if baseDir, err := cm.BaseDir(namespace.ID(id)); err == nil {
+ if _, err := os.Stat(baseDir); err == nil {
+ return gcsemu.NewFileStore(baseDir), true
+ }
+ }
+ return nil, false
+}
diff --git a/cli/daemon/objects/objects.go b/cli/daemon/objects/objects.go
new file mode 100644
index 0000000000..d0b3478917
--- /dev/null
+++ b/cli/daemon/objects/objects.go
@@ -0,0 +1,109 @@
+package objects
+
+import (
+ // nosemgrep
+
+ "fmt"
+ "net"
+ "net/http"
+
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/pkg/emulators/storage/gcsemu"
+ "github.com/cockroachdb/errors"
+ "github.com/rs/xid"
+ "github.com/rs/zerolog/log"
+ "go4.org/syncutil"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type Server struct {
+ id string
+ public *PublicBucketServer
+ startOnce syncutil.Once
+ cancel func() // set by Start
+ store gcsemu.Store
+ emu *gcsemu.GcsEmu
+ ln net.Listener
+ srv *http.Server
+ inMemory bool
+}
+
+func NewInMemoryServer(public *PublicBucketServer) *Server {
+ id := xid.New().String()
+ store := gcsemu.NewMemStore()
+ return newServer(public, id, store, true)
+}
+
+func NewDirServer(public *PublicBucketServer, nsID namespace.ID, baseDir string) *Server {
+ store := gcsemu.NewFileStore(baseDir)
+ return newServer(public, nsID.String(), store, false)
+}
+
+func newServer(public *PublicBucketServer, id string, store gcsemu.Store, isInMem bool) *Server {
+ return &Server{
+ public: public,
+ id: id,
+ store: store,
+ emu: gcsemu.NewGcsEmu(gcsemu.Options{Store: store}),
+ inMemory: isInMem,
+ }
+}
+
+func (s *Server) Initialize(md *meta.Data) error {
+ for _, bucket := range md.Buckets {
+ if err := s.emu.InitBucket(bucket.Name); err != nil {
+ return errors.Wrap(err, "initialize object storage bucket")
+ }
+ }
+ return nil
+}
+
+func (s *Server) Start() error {
+ return s.startOnce.Do(func() error {
+ if s.inMemory {
+ s.public.Register(s.id, s.store)
+ }
+ mux := http.NewServeMux()
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return errors.Wrap(err, "listen tcp")
+ }
+ s.emu.Register(mux)
+ s.ln = ln
+ s.srv = &http.Server{Handler: mux}
+
+ go func() {
+ if err := s.srv.Serve(ln); !errors.Is(err, http.ErrServerClosed) {
+ log.Error().Err(err).Msg("unable to listen to gcs server")
+ }
+ }()
+
+ return nil
+ })
+}
+
+func (s *Server) Stop() {
+ _ = s.srv.Close()
+ if s.inMemory {
+ s.public.Deregister(s.id)
+ }
+}
+
+func (s *Server) Endpoint() string {
+ // Ensure the server has been started
+ if err := s.Start(); err != nil {
+ panic(err)
+ }
+ port := s.ln.Addr().(*net.TCPAddr).Port
+ return fmt.Sprintf("http://localhost:%d", port)
+}
+
+func (s *Server) PublicBaseURL() string {
+ return fmt.Sprintf("%s/%s", s.public.BaseAddr(), s.id)
+}
+
+// IsUsed reports whether the application uses object storage at all.
+func IsUsed(md *meta.Data) bool {
+ return len(md.Buckets) > 0
+}
diff --git a/cli/daemon/objects/public.go b/cli/daemon/objects/public.go
new file mode 100644
index 0000000000..f1003909b2
--- /dev/null
+++ b/cli/daemon/objects/public.go
@@ -0,0 +1,211 @@
+package objects
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/api/storage/v1"
+
+ "encr.dev/pkg/emulators/storage/gcsemu"
+)
+
+// Fallback is a function that returns a store for a given namespace.
+// It is used for resolving namespace ids to stores, where
+// the store is not pre-registered by Register.
+type Fallback func(namespace string) (gcsemu.Store, bool)
+
+// NewPublicBucketServer creates a new PublicBucketServer.
+// If fallback is nil, no fallback will be used.
+func NewPublicBucketServer(baseAddr string, fallback Fallback) *PublicBucketServer {
+ mux := http.NewServeMux()
+ srv := &PublicBucketServer{
+ mux: mux,
+ baseAddr: baseAddr,
+ fallback: fallback,
+ namespaces: make(map[string]gcsemu.Store),
+ }
+ mux.HandleFunc("/{namespace}/{bucket}/{object...}", srv.handler)
+ return srv
+}
+
+type PublicBucketServer struct {
+ mux *http.ServeMux
+ baseAddr string
+ fallback Fallback
+
+ mu sync.RWMutex
+ namespaces map[string]gcsemu.Store
+}
+
+func (s *PublicBucketServer) Serve(ln net.Listener) error {
+ return http.Serve(ln, s)
+}
+
+func (s *PublicBucketServer) Register(namespace string, store gcsemu.Store) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.namespaces[namespace] = store
+}
+
+func (s *PublicBucketServer) Deregister(namespace string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ delete(s.namespaces, namespace)
+}
+
+func (s *PublicBucketServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.mux.ServeHTTP(w, req)
+}
+
+func (s *PublicBucketServer) BaseAddr() string {
+ return s.baseAddr
+}
+
+func (s *PublicBucketServer) handler(w http.ResponseWriter, req *http.Request) {
+ nsID := req.PathValue("namespace")
+ bucketName := req.PathValue("bucket")
+ objName := req.PathValue("object")
+
+ // Determine which store to use
+ s.mu.RLock()
+ store, ok := s.namespaces[nsID]
+ s.mu.RUnlock()
+ if !ok && s.fallback != nil {
+ store, ok = s.fallback(nsID)
+ }
+ if !ok {
+ http.Error(w, "unknown namespace", http.StatusNotFound)
+ return
+ }
+ switch req.Method {
+ case "OPTIONS":
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "PUT, GET, HEAD")
+ w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Content-Encoding, Date, X-Goog-Generation, X-Goog-Metageneration")
+ w.Header().Set("Access-Control-Expose-Headers", "Content-Type, Content-Length, Content-Encoding, Date, X-Goog-Generation, X-Goog-Metageneration")
+ case "GET", "HEAD":
+ _, isSigned := (queryLowerCase(req))["x-goog-signature"]
+ if isSigned {
+ err := validateGcsSignedRequest(req, time.Now())
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+ obj, contents, err := store.Get("", bucketName, objName)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ } else if obj == nil {
+ http.Error(w, "object not found", http.StatusNotFound)
+ return
+ }
+
+ if obj.ContentType != "" {
+ w.Header().Set("Content-Type", obj.ContentType)
+ }
+ if obj.Etag != "" {
+ w.Header().Set("Etag", obj.Etag)
+ }
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Expose-Headers", "Content-Type, Content-Length, Content-Encoding, Date, X-Goog-Generation, X-Goog-Metageneration")
+ w.Header().Set("Content-Length", strconv.Itoa(len(contents)))
+ w.Header().Set("Accept-Ranges", "bytes")
+
+ // Only write the body for GET requests, not HEAD
+ if req.Method == "GET" {
+ http.ServeContent(w, req, obj.Name, time.Time{}, bytes.NewReader(contents))
+ }
+ case "PUT":
+ err := validateGcsSignedRequest(req, time.Now())
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ buf, err := io.ReadAll(req.Body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ metaIn := parseObjectMeta(req)
+ err = store.Add(bucketName, objName, buf, &metaIn)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // Read back the object so we can add the etag value to the response.
+ metaOut, _, err := store.Get("", bucketName, objName)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Expose-Headers", "Content-Type, Content-Length, Content-Encoding, Date, X-Goog-Generation, X-Goog-Metageneration")
+ w.Header().Set("Etag", metaOut.Etag)
+ default:
+ http.Error(w, "method not allowed", http.StatusBadRequest)
+ }
+}
+
+// Only GCS is supported for local development
+func validateGcsSignedRequest(req *http.Request, now time.Time) error {
+ const dateLayout = "20060102T150405Z"
+ const gracePeriod = time.Duration(30) * time.Second
+
+ query := queryLowerCase(req)
+
+ // We don't try to actually verify the signature, we only check that it's non-empty.
+
+ for _, s := range []string{
+ "x-goog-signature",
+ "x-goog-credential",
+ "x-goog-date",
+ "x-goog-expires"} {
+ if len(query[s]) <= 0 {
+ return fmt.Errorf("missing or empty query param %q", s)
+ }
+ }
+
+ t0, err := time.Parse(dateLayout, query["x-goog-date"])
+ if err != nil {
+ return errors.New("failed to parse x-goog-date")
+ }
+ if t0.After(now.Add(gracePeriod)) {
+ return errors.New("URL expiration base date is in the future")
+ }
+
+ td, err := strconv.Atoi(query["x-goog-expires"])
+ if err != nil {
+ return errors.New("failed to parse x-goog-expires value into an integer")
+ }
+ t := t0.Add(time.Duration(td) * time.Second)
+
+ if t.Before(now.Add(-gracePeriod)) {
+ return errors.New("URL is expired")
+ }
+
+ return nil
+}
+
+func queryLowerCase(req *http.Request) map[string]string {
+ query := map[string]string{}
+ for k, vs := range req.URL.Query() {
+ query[strings.ToLower(k)] = vs[0]
+ }
+ return query
+}
+
+func parseObjectMeta(req *http.Request) storage.Object {
+ return storage.Object{ContentType: req.Header.Get("Content-Type")}
+}
diff --git a/cli/daemon/pubsub/nsq.go b/cli/daemon/pubsub/nsq.go
new file mode 100644
index 0000000000..b4f589415e
--- /dev/null
+++ b/cli/daemon/pubsub/nsq.go
@@ -0,0 +1,133 @@
+package pubsub
+
+import (
+ "os"
+ "strings"
+
+ "github.com/cockroachdb/errors"
+ "github.com/nsqio/go-nsq"
+ "github.com/nsqio/nsq/nsqd"
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "go4.org/syncutil"
+)
+
+type NSQDaemon struct {
+ nsqd *nsqd.NSQD
+ startOnce syncutil.Once
+
+ Opts *nsqd.Options
+}
+
+func (n *NSQDaemon) Stats() (*nsqd.Stats, error) {
+ if n.nsqd == nil {
+ return nil, errors.New("nsqd not started")
+ }
+ stats := n.nsqd.GetStats("", "", true)
+ return &stats, nil
+}
+
+func (n *NSQDaemon) isReady() error {
+ p, err := nsq.NewProducer(n.Addr(), nsq.NewConfig())
+ p.SetLogger(&logAdapter{"nsq producer"}, nsq.LogLevelWarning)
+ if err != nil {
+ return err
+ }
+ err = p.Ping()
+ p.Stop()
+ n.nsqd.GetError()
+ return err
+}
+
+func (n *NSQDaemon) Addr() string {
+ return n.nsqd.RealTCPAddr().String()
+}
+
+func (n *NSQDaemon) Start() error {
+ return n.startOnce.Do(func() error {
+ if n.Opts == nil {
+ n.Opts = nsqd.NewOptions()
+ tmpDir, err := os.MkdirTemp("", "encore-nsqd")
+ if err != nil {
+ return errors.Wrap(err, "failed to create tmp nsqd datapath")
+ }
+ n.Opts.DataPath = tmpDir
+
+ n.Opts.LogLevel = nsqd.LOG_WARN
+ n.Opts.Logger = &logAdapter{"nsqd"}
+
+ // Take the default address options and scope down to localhost (to prevent firewall warnings / permission requests)
+ // then set the port to 0 to allow any port to be used which is free
+ n.Opts.TCPAddress = "127.0.0.1:0"
+ n.Opts.HTTPAddress = "127.0.0.1:0"
+ n.Opts.HTTPSAddress = "127.0.0.1:0"
+ n.Opts.MaxMsgSize = 10 * 1024 * 1024 // 10MB
+ }
+ nsq, err := nsqd.New(n.Opts)
+ if err != nil {
+ return errors.Wrap(err, "failed to create new nsqd")
+ }
+ n.nsqd = nsq
+ go func() {
+ err = nsq.Main()
+ if err != nil {
+ log.Err(err).Msg("failed to start nsqd")
+ }
+ }()
+ // Ping the daemon to make sure it has started correctly
+ return n.isReady()
+ })
+}
+
+func (n *NSQDaemon) Stop() {
+ if n.nsqd != nil {
+ n.nsqd.Exit()
+ }
+}
+
+type logAdapter struct{ serviceName string }
+
+var _ nsqd.Logger = (*logAdapter)(nil)
+
+func (l *logAdapter) Output(maxdepth int, s string) error {
+ // Attempt to extract the level, start with cutting on ":"
+ lvl, logMsg, found := strings.Cut(s, ":")
+ if !found || strings.Contains(lvl, " ") {
+ // then if that fails or we have a space in that cut, try cutting on the first space
+ newLvl, suffix, _ := strings.Cut(lvl, " ")
+ lvl = newLvl
+
+ if found {
+ logMsg = suffix + ":" + logMsg
+ }
+ }
+
+ // Attempt to convert the level string to a zerolog level
+ logLevel := l.OutputLevel(lvl)
+ if logLevel == zerolog.NoLevel {
+ // and if that fails, then just log the message
+ logMsg = s
+ }
+
+ log.WithLevel(logLevel).Str("service", l.serviceName).Msg(strings.TrimSpace(logMsg))
+
+ return nil
+}
+
+func (l *logAdapter) OutputLevel(lvl string) zerolog.Level {
+ switch strings.ToLower(lvl) {
+ case "debug", "dbg":
+ return zerolog.DebugLevel
+ case "info", "inf":
+ return zerolog.InfoLevel
+ case "warn", "wrn":
+ return zerolog.WarnLevel
+ case "error", "err":
+ return zerolog.ErrorLevel
+ case "fatal":
+ return zerolog.FatalLevel
+ default:
+ log.Warn().Msg("unknown level: " + lvl)
+ return zerolog.NoLevel
+ }
+}
diff --git a/cli/daemon/pubsub/utils.go b/cli/daemon/pubsub/utils.go
new file mode 100644
index 0000000000..ca89e4eb4f
--- /dev/null
+++ b/cli/daemon/pubsub/utils.go
@@ -0,0 +1,10 @@
+package pubsub
+
+import (
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+// IsUsed reports whether the application uses pubsub at all.
+func IsUsed(md *meta.Data) bool {
+ return len(md.PubsubTopics) > 0
+}
diff --git a/cli/daemon/redis/redis.go b/cli/daemon/redis/redis.go
new file mode 100644
index 0000000000..439cd47d93
--- /dev/null
+++ b/cli/daemon/redis/redis.go
@@ -0,0 +1,104 @@
+package redis
+
+import (
+ mathrand "math/rand" // nosemgrep
+ "time"
+
+ "github.com/alicebob/miniredis/v2"
+ "github.com/cockroachdb/errors"
+ "go4.org/syncutil"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type Server struct {
+ startOnce syncutil.Once
+ mini *miniredis.Miniredis
+ cleanup *time.Ticker
+ quit chan struct{}
+ addr string
+}
+
+const tickInterval = 1 * time.Second
+
+func New() *Server {
+ return &Server{
+ mini: miniredis.NewMiniRedis(),
+ quit: make(chan struct{}),
+ }
+}
+
+func (s *Server) Start() error {
+ return s.startOnce.Do(func() error {
+ if err := s.mini.Start(); err != nil {
+ return errors.Wrap(err, "failed to start redis server")
+ }
+ s.addr = s.mini.Addr()
+ s.cleanup = time.NewTicker(tickInterval)
+ go s.doCleanup()
+ return nil
+ })
+}
+func (s *Server) Stop() {
+ s.mini.Close()
+ s.cleanup.Stop()
+ close(s.quit)
+}
+
+func (s *Server) Miniredis() *miniredis.Miniredis {
+ return s.mini
+}
+
+func (s *Server) Addr() string {
+ // Ensure the server has been started
+ if err := s.Start(); err != nil {
+ panic(err)
+ }
+ return s.addr
+}
+
+func (s *Server) doCleanup() {
+ var acc time.Duration
+ const cleanupInterval = 15 * time.Second
+
+ for {
+ select {
+ case <-s.quit:
+ return
+ case <-s.cleanup.C:
+ }
+ s.mini.FastForward(tickInterval)
+
+ // Clean up keys every so often
+ acc += tickInterval
+ if acc > cleanupInterval {
+ acc -= cleanupInterval
+ s.clearKeys()
+ }
+ }
+}
+
+// clearKeys clears random keys to get the redis server
+// down to 100 persisted keys, as a simple way to bound
+// the max memory usage.
+func (s *Server) clearKeys() {
+ const maxKeys = 100
+ keys := s.mini.Keys()
+ if n := len(keys); n > maxKeys {
+ toDelete := n - maxKeys
+ deleted := 0
+ for deleted < toDelete {
+ id := mathrand.Intn(len(keys))
+ if keys[id] != "" {
+ s.mini.Del(keys[id])
+ keys[id] = "" // mark it as deleted
+ deleted++
+ }
+ }
+ }
+}
+
+// IsUsed reports whether the application uses redis at all.
+func IsUsed(md *meta.Data) bool {
+ return len(md.CacheClusters) > 0
+}
diff --git a/cli/daemon/run.go b/cli/daemon/run.go
index 5f7bb56582..d40d774278 100644
--- a/cli/daemon/run.go
+++ b/cli/daemon/run.go
@@ -1,281 +1,266 @@
package daemon
import (
- "context"
+ "encoding/json"
"fmt"
- "io/ioutil"
- "os/exec"
- "path/filepath"
+ "net"
+ "net/url"
+ "os"
+ "strings"
"time"
- "encr.dev/cli/daemon/internal/appfile"
- "encr.dev/cli/daemon/internal/manifest"
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/rs/zerolog/log"
+
"encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/sqldb"
- "encr.dev/parser"
+ "encr.dev/internal/optracker"
+ "encr.dev/internal/userconfig"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/fns"
daemonpb "encr.dev/proto/encore/daemon"
- meta "encr.dev/proto/encore/parser/meta/v1"
- "github.com/rs/zerolog/log"
- "golang.org/x/mod/modfile"
)
// Run runs the application.
func (s *Server) Run(req *daemonpb.RunRequest, stream daemonpb.Daemon_RunServer) error {
- sendErr := func(err error) {
- stream.Send(&daemonpb.RunMessage{
- Msg: &daemonpb.RunMessage_Output{Output: &daemonpb.CommandOutput{
- Stderr: []byte(err.Error() + "\n"),
+ ctx := stream.Context()
+ slog := &streamLog{stream: stream, buffered: true}
+ stderr := slog.Stderr(false)
+
+ sendExit := func(code int32) {
+ _ = stream.Send(&daemonpb.CommandMessage{
+ Msg: &daemonpb.CommandMessage_Exit{Exit: &daemonpb.CommandExit{
+ Code: code,
}},
})
- stream.Send(&daemonpb.RunMessage{
- Msg: &daemonpb.RunMessage_Exit{Exit: &daemonpb.CommandExit{
- Code: 1,
- }},
- })
- }
-
- // Prefetch secrets if the app is linked.
- if appSlug, err := appfile.Slug(req.AppRoot); err == nil && appSlug != "" {
- s.sm.Prefetch(appSlug)
}
- // Parse the app to figure out what infrastructure is needed.
- parse, err := s.parseApp(req.AppRoot, req.WorkingDir, false)
+ userConfig, err := userconfig.ForApp(req.AppRoot).Get()
if err != nil {
- sendErr(err)
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("failed to load config: %v"), err))
+ sendExit(1)
return nil
}
- man, err := manifest.ReadOrCreate(req.AppRoot)
+ ctx, tracer, err := s.beginTracing(ctx, req.AppRoot, req.WorkingDir, req.TraceFile)
if err != nil {
- sendErr(err)
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("failed to begin tracing: %v"), err))
+ sendExit(1)
return nil
}
- s.cacheAppRoot(man.AppID, req.AppRoot)
+ defer fns.CloseIgnore(tracer)
- clusterID := man.AppID
- dbSetupErr := make(chan error, 1)
-
- // Set up the database only if the app requires it.
- if requiresSQLDB(parse.Meta) {
- cluster := s.cm.Init(stream.Context(), &sqldb.InitParams{
- ClusterID: clusterID,
- Meta: parse.Meta,
- Memfs: false,
- })
- if _, err := exec.LookPath("docker"); err != nil {
- sendErr(fmt.Errorf("This application requires docker to run since it uses an SQL database. Install docker first."))
- return nil
+ // ListenAddr should always be passed but guard against old clients.
+ listenAddr := req.ListenAddr
+ if listenAddr == "" {
+ listenAddr = ":4000"
+ }
+ ln, err := net.Listen("tcp", listenAddr)
+ if err != nil {
+ if errIsAddrInUse(err) {
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("Failed to run on %s - port is already in use"), listenAddr))
+ } else {
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("Failed to run on %s - %v"), listenAddr, err))
}
- if err := cluster.Start(streamLog{stream: runStreamAdapter{stream}}); err != nil {
- sendErr(fmt.Errorf("Database setup failed: %v", err))
- return nil
+ if host, port, ok := findAvailableAddr(listenAddr); ok {
+ if host == "localhost" || host == "127.0.0.1" {
+ _, _ = fmt.Fprintf(stderr, "Note: port %d is available; specify %s to use it\n",
+ port, aurora.Sprintf(aurora.Cyan("--port=%d"), port))
+ } else {
+ _, _ = fmt.Fprintf(stderr, "Note: address %s:%d is available; specify %s to use it\n",
+ host, port, aurora.Sprintf(aurora.Cyan("--listen=%s:%d"), host, port))
+ }
+ } else {
+ _, _ = fmt.Fprintf(stderr, "Note: specify %s to run on another port\n",
+ aurora.Cyan("--port=NUMBER"))
}
- // Set up the database asynchronously since it can take a while.
- go func() {
- if err := cluster.CreateAndMigrate(stream.Context(), req.AppRoot, parse.Meta); err != nil {
- dbSetupErr <- err
- }
- }()
+ sendExit(1)
+ return nil
}
+ defer fns.CloseIgnore(ln)
- // Hold the stream mutex so we can set up the stream map
- // before output starts.
- s.mu.Lock()
- run, err := s.mgr.Start(stream.Context(), run.StartParams{
- AppRoot: req.AppRoot,
- AppID: man.AppID,
- WorkingDir: req.WorkingDir,
- DBClusterID: clusterID,
- Parse: parse,
- Watch: req.Watch,
- })
+ app, err := s.apps.Track(req.AppRoot)
if err != nil {
- s.mu.Unlock()
- sendErr(err)
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("failed to resolve app: %v"), err))
+ sendExit(1)
return nil
}
- s.streams[run.ID] = stream
- s.mu.Unlock()
- pid := run.Proc().Pid
- _ = stream.Send(&daemonpb.RunMessage{
- Msg: &daemonpb.RunMessage_Started{Started: &daemonpb.RunStarted{
- RunId: run.ID,
- Pid: int32(pid),
- Port: int32(run.Port),
- }},
- })
-
- // Wait for the run to close, or the database setup to fail.
- select {
- case <-run.Done():
- case err := <-dbSetupErr:
- log.Error().Err(err).Str("appID", run.AppID).Msg("failed to setup db")
- sendErr(fmt.Errorf("Database setup failed: %v", err))
+ ns, err := s.namespaceOrActive(ctx, app, req.Namespace)
+ if err != nil {
+ _, _ = fmt.Fprintln(stderr, aurora.Sprintf(aurora.Red("failed to resolve namespace: %v"), err))
+ sendExit(1)
+ return nil
}
- s.mu.Lock()
- delete(s.streams, run.ID)
- s.mu.Unlock()
- return nil
-}
+ ops := optracker.New(stderr, stream)
+ defer ops.AllDone() // Kill the tracker when we exit this function
-// Test runs tests.
-func (s *Server) Test(req *daemonpb.TestRequest, stream daemonpb.Daemon_TestServer) error {
- sendErr := func(err error) {
- stream.Send(&daemonpb.CommandMessage{
- Msg: &daemonpb.CommandMessage_Output{Output: &daemonpb.CommandOutput{
- Stderr: []byte(err.Error() + "\n"),
- }},
- })
- streamExit(stream, 1)
- }
+ // Check for available update before we start the proc
+ // so the output from the proc doesn't race with our
+ // prints below.
+ newVer := s.availableUpdate()
- // Prefetch secrets if the app is linked.
- if appSlug, err := appfile.Slug(req.AppRoot); err == nil && appSlug != "" {
- s.sm.Prefetch(appSlug)
- }
+ // If force upgrade has been enabled, we force the upgrade now before we try and run the app
+ if newVer != nil && newVer.ForceUpgrade {
+ _, _ = fmt.Fprintf(stderr, aurora.Red("An urgent security update for Encore is available.").String()+"\n")
+ if newVer.SecurityNotes != "" {
+ _, _ = fmt.Fprintf(stderr, aurora.Sprintf(aurora.Yellow("%s"), newVer.SecurityNotes)+"\n")
+ }
- // Parse the app to figure out what infrastructure is needed.
- parse, err := s.parseApp(req.AppRoot, req.WorkingDir, true /* parse tests */)
- if err != nil {
- sendErr(err)
- return nil
- }
+ _, _ = fmt.Fprintf(stderr, "Upgrading Encore to %v...\n", newVer.Version())
+ if err := newVer.DoUpgrade(stderr, stderr); err != nil {
+ _, _ = fmt.Fprintf(stderr, aurora.Sprintf(aurora.Red("Upgrade failed: %v"), err)+"\n")
+ }
- man, err := manifest.ReadOrCreate(req.AppRoot)
- if err != nil {
- sendErr(err)
+ slog.FlushBuffers()
+ sendExit(1) // Kill the client
+ os.Exit(1) // Kill the daemon too
return nil
}
- s.cacheAppRoot(man.AppID, req.AppRoot)
-
- setupCtx, setupCancel := context.WithTimeout(context.Background(), 30*time.Second)
- clusterID := man.AppID + "-test"
- cluster := s.cm.Init(setupCtx, &sqldb.InitParams{
- ClusterID: clusterID,
- Memfs: true,
- Meta: parse.Meta,
- })
- // Set up the database asynchronously since it can take a while.
- dbSetupErr := make(chan error, 1)
- go func() {
- defer setupCancel()
- if err := cluster.Start(streamLog{stream: stream}); err != nil {
- dbSetupErr <- err
- } else if err := cluster.Recreate(setupCtx, req.AppRoot, nil, parse.Meta); err != nil {
- dbSetupErr <- err
- }
- }()
+ // Hold the stream mutex so we can set up the stream map
+ // before output starts.
+ s.mu.Lock()
- testCtx, cancel := context.WithCancel(stream.Context())
- defer cancel()
+ // If the listen addr contains no interface, render it as "localhost:port"
+ // instead of just ":port".
+ displayListenAddr := req.ListenAddr
+ if strings.HasPrefix(listenAddr, ":") {
+ displayListenAddr = "localhost" + req.ListenAddr
+ }
- testResults := make(chan error, 1)
- go func() {
- testResults <- s.mgr.Test(testCtx, run.TestParams{
- AppRoot: req.AppRoot,
- WorkingDir: req.WorkingDir,
- DBClusterID: clusterID,
- Args: req.Args,
- Stdout: &streamWriter{stream: stream, stderr: false},
- Stderr: &streamWriter{stream: stream, stderr: true},
- })
- }()
+ browser := run.BrowserModeFromProto(req.Browser)
+ if browser == run.BrowserModeAuto {
+ browser = run.BrowserModeFromConfig(userConfig)
+ }
- select {
- case err := <-dbSetupErr:
- sendErr(err)
- return nil
- case err := <-testResults:
- if err != nil {
- sendErr(err)
+ runInstance, err := s.mgr.Start(ctx, run.StartParams{
+ App: app,
+ NS: ns,
+ WorkingDir: req.WorkingDir,
+ Listener: ln,
+ ListenAddr: displayListenAddr,
+ Watch: req.Watch,
+ Environ: req.Environ,
+ OpsTracker: ops,
+ Browser: browser,
+ Debug: run.DebugModeFromProto(req.DebugMode),
+ })
+ if err != nil {
+ s.mu.Unlock()
+ if errList := run.AsErrorList(err); errList != nil {
+ _ = errList.SendToStream(stream)
} else {
- streamExit(stream, 0)
+ errStr := err.Error()
+ if !strings.HasSuffix(errStr, "\n") {
+ errStr += "\n"
+ }
+ _, _ = stderr.Write([]byte(errStr))
}
+ sendExit(1)
return nil
}
-}
-
-// Check checks the app for compilation errors.
-func (s *Server) Check(req *daemonpb.CheckRequest, stream daemonpb.Daemon_CheckServer) error {
- log := newStreamLogger(stream)
- err := s.mgr.Check(stream.Context(), req.AppRoot, req.WorkingDir)
- if err != nil {
- log.Error().Msg(err.Error())
- streamExit(stream, 1)
- } else {
- streamExit(stream, 0)
- }
- return nil
-}
-
-// OnStart implements run.EventListener.
-func (s *Server) OnStart(r *run.Run) {}
-
-// OnReload implements run.EventListener.
-func (s *Server) OnReload(r *run.Run) {}
-
-// OnStop implements run.EventListener.
-func (s *Server) OnStop(r *run.Run) {}
-
-// OnStdout implements run.EventListener.
-func (s *Server) OnStdout(r *run.Run, line []byte) {
- s.mu.Lock()
- stream, ok := s.streams[r.ID]
+ defer runInstance.Close()
+ s.streams[runInstance.ID] = slog
s.mu.Unlock()
- if ok {
- stream.Send(&daemonpb.RunMessage{Msg: &daemonpb.RunMessage_Output{
- Output: &daemonpb.CommandOutput{Stdout: line},
- }})
- }
-}
+ ops.AllDone()
-// OnStderr implements run.EventListener.
-func (s *Server) OnStderr(r *run.Run, line []byte) {
- s.mu.Lock()
- stream, ok := s.streams[r.ID]
- s.mu.Unlock()
+ secrets, _ := s.sm.Load(app).Get(ctx, nil)
+ externalDBs := map[string]string{}
+ for key, val := range secrets.Values {
+ if db, ok := strings.CutPrefix(key, "sqldb::"); ok {
+ var connCfg struct {
+ ConnString string `json:"connection_string"`
+ }
+ err := json.Unmarshal([]byte(val), &connCfg)
+ if err != nil {
+ log.Warn().Err(err).Str("key", key).Msg("failed to unmarshal connection string")
+ continue
+ }
+ connURL, err := url.Parse(connCfg.ConnString)
+ if err != nil {
+ log.Warn().Err(err).Str("key", key).Msg("failed to parse connection string")
+ continue
+ }
+ connURL.User = url.User(connURL.User.Username())
+ externalDBs[db] = connURL.String()
- if ok {
- stream.Send(&daemonpb.RunMessage{Msg: &daemonpb.RunMessage_Output{
- Output: &daemonpb.CommandOutput{Stderr: line},
- }})
+ }
}
-}
-
-// parseApp parses the app.
-func (s *Server) parseApp(appRoot, workingDir string, parseTests bool) (*parser.Result, error) {
- modPath := filepath.Join(appRoot, "go.mod")
- modData, err := ioutil.ReadFile(modPath)
- if err != nil {
- return nil, err
+ _, _ = stderr.Write([]byte("\n"))
+ _, _ = fmt.Fprintf(stderr, " Encore development server running!\n\n")
+
+ _, _ = fmt.Fprintf(stderr, " Your API is running at: %s\n", aurora.Cyan("http://"+runInstance.ListenAddr))
+ _, _ = fmt.Fprintf(stderr, " Development Dashboard URL: %s\n", aurora.Cyan(fmt.Sprintf(
+ "%s/%s", s.mgr.DashBaseURL, app.PlatformOrLocalID())))
+ _, _ = fmt.Fprintf(stderr, " MCP SSE URL: %s\n", aurora.Cyan(fmt.Sprintf(
+ "%s/sse?appID=%s", s.mcp.BaseURL, app.PlatformOrLocalID())))
+
+ if ns := runInstance.NS; !ns.Active || ns.Name != "default" {
+ _, _ = fmt.Fprintf(stderr, " Namespace: %s\n", aurora.Cyan(ns.Name))
+ if len(externalDBs) > 0 {
+ _, _ = fmt.Fprintln(stderr, " External databases:")
+ }
}
- mod, err := modfile.Parse(modPath, modData, nil)
- if err != nil {
- return nil, err
+ for db, connStr := range externalDBs {
+ _, _ = fmt.Fprintf(stderr, " %s: %s\n", db, aurora.Cyan(connStr))
}
-
- cfg := &parser.Config{
- AppRoot: appRoot,
- Version: "",
- ModulePath: mod.Module.Mod.Path,
- WorkingDir: workingDir,
- ParseTests: parseTests,
+ if req.DebugMode == daemonpb.RunRequest_DEBUG_ENABLED {
+ // Print the pid for debugging. Currently we only support this if we have a default gateway.
+ if gw, ok := runInstance.ProcGroup().Gateways["api-gateway"]; ok {
+ _, _ = fmt.Fprintf(stderr, " Process ID: %d\n", aurora.Cyan(gw.Pid))
+ }
+ }
+ // Log which experiments are enabled, if any
+ if exp := runInstance.ProcGroup().Experiments.List(); len(exp) > 0 {
+ strs := make([]string, len(exp))
+ for i, e := range exp {
+ strs[i] = string(e)
+ }
+ _, _ = fmt.Fprintf(stderr, " Enabled experiment(s): %s\n", aurora.Yellow(strings.Join(strs, ", ")))
}
- return parser.Parse(cfg)
-}
-func requiresSQLDB(md *meta.Data) bool {
- for _, svc := range md.Svcs {
- if len(svc.Migrations) > 0 {
- return true
+ // If there's a newer version available, print a message.
+ if newVer != nil {
+ if newVer.SecurityUpdate {
+ _, _ = stderr.Write([]byte(aurora.Sprintf(
+ aurora.Yellow("\n New Encore release available with security updates: %s (you have %s)\n Update with: encore version update\n"),
+ newVer.Version(), version.Version)))
+
+ if newVer.SecurityNotes != "" {
+ _, _ = stderr.Write([]byte(aurora.Sprintf(
+ aurora.Faint("\n %s\n"),
+ newVer.SecurityNotes)))
+ }
+ } else {
+ _, _ = stderr.Write([]byte(aurora.Sprintf(
+ aurora.Faint("\n New Encore release available: %s (you have %s)\n Update with: encore version update\n"),
+ newVer.Version(), version.Version)))
}
}
- return false
+ _, _ = stderr.Write([]byte("\n"))
+
+ slog.FlushBuffers()
+
+ go func() {
+ // Wait a little bit for the app to start
+ select {
+ case <-runInstance.Done():
+ return
+ case <-time.After(5 * time.Second):
+ if proc := runInstance.ProcGroup(); proc != nil {
+ showFirstRunExperience(runInstance, proc.Meta, stderr)
+ }
+ }
+ }()
+
+ <-runInstance.Done() // wait for run to complete
+
+ s.mu.Lock()
+ delete(s.streams, runInstance.ID)
+ s.mu.Unlock()
+ return nil
}
diff --git a/cli/daemon/run/call.go b/cli/daemon/run/call.go
new file mode 100644
index 0000000000..c98608d045
--- /dev/null
+++ b/cli/daemon/run/call.go
@@ -0,0 +1,329 @@
+package run
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/rs/zerolog/log"
+ "github.com/tailscale/hujson"
+
+ "encr.dev/parser/encoding"
+ v1 "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type ApiCallParams struct {
+ AppID string
+ Service string
+ Endpoint string
+ Path string
+ Method string
+ Payload []byte
+ AuthPayload []byte `json:"auth_payload,omitempty"`
+ AuthToken string `json:"auth_token,omitempty"`
+ CorrelationID string `json:"correlation_id,omitempty"`
+}
+
+func CallAPI(ctx context.Context, run *Run, p *ApiCallParams) (map[string]any, error) {
+ log := log.With().Str("app_id", p.AppID).Str("path", p.Path).Str("service", p.Service).Str("endpoint", p.Endpoint).Logger()
+ if run == nil {
+ log.Error().Str("app_id", p.AppID).Msg("dash: cannot make api call: app not running")
+ return nil, fmt.Errorf("app not running")
+ }
+ proc := run.ProcGroup()
+ if proc == nil {
+ log.Error().Str("app_id", p.AppID).Msg("dash: cannot make api call: app not running")
+ return nil, fmt.Errorf("app not running")
+ }
+
+ baseURL := "http://" + run.ListenAddr
+ req, err := prepareRequest(ctx, baseURL, proc.Meta, p)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: unable to prepare request")
+ return nil, err
+ }
+
+ if p.CorrelationID != "" {
+ req.Header.Set("X-Correlation-ID", p.CorrelationID)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ log.Error().Err(err).Msg("dash: api call failed")
+ return nil, err
+ }
+ body, _ := io.ReadAll(resp.Body)
+ _ = resp.Body.Close()
+
+ // Encode the body back into a Go style struct
+ if resp.StatusCode >= 200 && resp.StatusCode < 300 {
+ body = handleResponse(proc.Meta, p, resp.Header, body)
+ }
+
+ log.Info().Int("status", resp.StatusCode).Msg("dash: api call completed")
+ return map[string]interface{}{
+ "status": resp.Status,
+ "status_code": resp.StatusCode,
+ "body": body,
+ "trace_id": resp.Header.Get("X-Encore-Trace-Id"),
+ }, nil
+}
+
+// findRPC finds the RPC with the given service and endpoint name.
+// If it cannot be found it reports nil.
+func findRPC(md *v1.Data, service, endpoint string) *v1.RPC {
+ for _, svc := range md.Svcs {
+ if svc.Name == service {
+ for _, rpc := range svc.Rpcs {
+ if rpc.Name == endpoint {
+ return rpc
+ }
+ }
+ break
+ }
+ }
+ return nil
+}
+
+// prepareRequest prepares a request for sending based on the given ApiCallParams.
+func prepareRequest(ctx context.Context, baseURL string, md *v1.Data, p *ApiCallParams) (*http.Request, error) {
+ reqSpec := newHTTPRequestSpec()
+ rpc := findRPC(md, p.Service, p.Endpoint)
+ if rpc == nil {
+ return nil, fmt.Errorf("unknown service/endpoint: %s/%s", p.Service, p.Endpoint)
+ }
+
+ rpcEncoding, err := encoding.DescribeRPC(md, rpc, nil)
+ if err != nil {
+ return nil, fmt.Errorf("describe rpc: %v", err)
+ }
+
+ // Add request encoding
+ {
+ reqEnc := rpcEncoding.RequestEncodingForMethod(p.Method)
+ if reqEnc == nil {
+ return nil, fmt.Errorf("unsupported method: %s (supports: %s)", p.Method, strings.Join(rpc.HttpMethods, ","))
+ }
+ if len(p.Payload) > 0 {
+ if err := addToRequest(reqSpec, p.Payload, reqEnc.ParameterEncodingMapByName()); err != nil {
+ return nil, fmt.Errorf("encode request params: %v", err)
+ }
+ }
+ }
+
+ // Add auth encoding, if any
+ if h := md.AuthHandler; h != nil {
+ auth, err := encoding.DescribeAuth(md, h.Params, nil)
+ if err != nil {
+ return nil, fmt.Errorf("describe auth: %v", err)
+ }
+ if auth.LegacyTokenFormat {
+ reqSpec.Header.Set("Authorization", "Bearer "+p.AuthToken)
+ } else {
+ if err := addToRequest(reqSpec, p.AuthPayload, auth.ParameterEncodingMapByName()); err != nil {
+ return nil, fmt.Errorf("encode auth params: %v", err)
+ }
+ }
+ }
+
+ var body io.Reader = nil
+ if reqSpec.Body != nil {
+ data, _ := json.Marshal(reqSpec.Body)
+ body = bytes.NewReader(data)
+ if reqSpec.Header["Content-Type"] == nil {
+ reqSpec.Header.Set("Content-Type", "application/json")
+ }
+ }
+
+ reqURL := baseURL + p.Path
+ if len(reqSpec.Query) > 0 {
+ reqURL += "?" + reqSpec.Query.Encode()
+ }
+
+ req, err := http.NewRequestWithContext(ctx, p.Method, reqURL, body)
+ if err != nil {
+ return nil, err
+ }
+ for k, v := range reqSpec.Header {
+ req.Header[k] = v
+ }
+ for _, c := range reqSpec.Cookies {
+ req.AddCookie(c)
+ }
+ return req, nil
+}
+
+func handleResponse(md *v1.Data, p *ApiCallParams, headers http.Header, body []byte) []byte {
+ rpc := findRPC(md, p.Service, p.Endpoint)
+ if rpc == nil {
+ return body
+ }
+
+ encodingOptions := &encoding.Options{}
+ rpcEncoding, err := encoding.DescribeRPC(md, rpc, encodingOptions)
+ if err != nil {
+ return body
+ }
+
+ decoded := map[string]json.RawMessage{}
+ if err := json.Unmarshal(body, &decoded); err != nil {
+ return body
+ }
+
+ members := make([]hujson.ObjectMember, 0)
+ if rpcEncoding.ResponseEncoding != nil {
+ for i, m := range rpcEncoding.ResponseEncoding.HeaderParameters {
+ value := headers.Get(m.Name)
+
+ var beforeExtra []byte
+ if i == 0 {
+ beforeExtra = []byte("\n // HTTP Headers\n ")
+ }
+
+ members = append(members, hujson.ObjectMember{
+ Name: hujson.Value{Value: hujson.String(m.Name), BeforeExtra: beforeExtra},
+ Value: hujson.Value{Value: hujson.String(value)},
+ })
+ }
+
+ for i, m := range rpcEncoding.ResponseEncoding.BodyParameters {
+ value, ok := decoded[m.Name]
+ if !ok {
+ value = []byte("null")
+ }
+
+ var beforeExtra []byte
+ if i == 0 {
+ if len(rpcEncoding.ResponseEncoding.HeaderParameters) > 0 {
+ beforeExtra = []byte("\n\n // JSON Payload\n ")
+ } else {
+ beforeExtra = []byte("\n ")
+ }
+ }
+
+ // nosemgrep: trailofbits.go.invalid-usage-of-modified-variable.invalid-usage-of-modified-variable
+ hValue, err := hujson.Parse(value)
+ if err != nil {
+ hValue = hujson.Value{Value: hujson.Literal(value)}
+ }
+
+ members = append(members, hujson.ObjectMember{
+ Name: hujson.Value{Value: hujson.String(m.Name), BeforeExtra: beforeExtra},
+ Value: hValue,
+ })
+ }
+ }
+
+ value := hujson.Value{Value: &hujson.Object{Members: members}}
+ value.Format()
+ return value.Pack()
+}
+
+// httpRequestSpec specifies how the HTTP request should be generated.
+type httpRequestSpec struct {
+ // Body are the fields to encode as the JSON body.
+ // If nil, no body is added.
+ Body map[string]json.RawMessage
+
+ // Header are the HTTP headers to set in the request.
+ Header http.Header
+
+ // Query are the query string fields to set.
+ Query url.Values
+
+ // Cookies are the cookies to send.
+ Cookies []*http.Cookie
+}
+
+func newHTTPRequestSpec() *httpRequestSpec {
+ return &httpRequestSpec{
+ Body: nil, // to distinguish between no body and "{}".
+ Header: make(http.Header),
+ Query: make(url.Values),
+ }
+}
+
+// addToRequest decodes rawPayload and adds it to the request according to the given parameter encodings.
+// The body argument is where body parameters are added; other parameter locations are added
+// directly to the request object itself.
+func addToRequest(req *httpRequestSpec, rawPayload []byte, params map[string][]*encoding.ParameterEncoding) error {
+ payload, err := hujson.Parse(rawPayload)
+ if err != nil {
+ return fmt.Errorf("invalid payload: %v", err)
+ }
+ vals, ok := payload.Value.(*hujson.Object)
+ if !ok {
+ return fmt.Errorf("invalid payload: expected JSON object, got %s", payload.Pack())
+ }
+
+ seenKeys := make(map[string]int)
+
+ for _, kv := range vals.Members {
+ lit, _ := kv.Name.Value.(hujson.Literal)
+ key := lit.String()
+ val := kv.Value
+ val.Standardize()
+
+ if matches := params[key]; len(matches) > 0 {
+ // Get the index of this particular match, in case we have conflicts.
+ idx := seenKeys[key]
+ seenKeys[key]++
+ if idx < len(matches) {
+ param := matches[idx]
+ switch param.Location {
+ case encoding.Body:
+ if req.Body == nil {
+ req.Body = make(map[string]json.RawMessage)
+ }
+ req.Body[param.WireFormat] = val.Pack()
+
+ case encoding.Query:
+ switch v := val.Value.(type) {
+ case hujson.Literal:
+ req.Query.Add(param.WireFormat, v.String())
+ case *hujson.Array:
+ for _, elem := range v.Elements {
+ if lit, ok := elem.Value.(hujson.Literal); ok {
+ req.Query.Add(param.WireFormat, lit.String())
+ } else {
+ return fmt.Errorf("unsupported value type for query string array element: %T", elem.Value)
+ }
+ }
+ default:
+ return fmt.Errorf("unsupported value type for query string: %T", v)
+ }
+
+ case encoding.Header:
+ switch v := val.Value.(type) {
+ case hujson.Literal:
+ req.Header.Add(param.WireFormat, v.String())
+ default:
+ return fmt.Errorf("unsupported value type for query string: %T", v)
+ }
+
+ case encoding.Cookie:
+ switch v := val.Value.(type) {
+ case hujson.Literal:
+ // nosemgrep
+ req.Cookies = append(req.Cookies, &http.Cookie{
+ Name: param.WireFormat,
+ Value: v.String(),
+ })
+ default:
+ return fmt.Errorf("unsupported value type for cookie: %T", v)
+ }
+
+ default:
+ return fmt.Errorf("unsupported parameter location %v", param.Location)
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/cli/daemon/run/check.go b/cli/daemon/run/check.go
new file mode 100644
index 0000000000..2d68a95c6c
--- /dev/null
+++ b/cli/daemon/run/check.go
@@ -0,0 +1,109 @@
+package run
+
+import (
+ "context"
+ "runtime"
+
+ "github.com/cockroachdb/errors"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/vcs"
+)
+
+type CheckParams struct {
+ // App is the app to start.
+ App *apps.Instance
+
+ // WorkingDir is the working dir, for formatting
+ // error messages with relative paths.
+ WorkingDir string
+
+ // CodegenDebug, if true, specifies to keep the output
+ // around for codegen debugging purposes.
+ CodegenDebug bool
+
+ // Environ are the environment variables to set,
+ // in the same format as os.Environ().
+ Environ []string
+
+ // Tests specifies whether to parse and codegen for tests as well.
+ Tests bool
+}
+
+// Check checks the app for errors.
+// It reports a buildDir (if available) when codegenDebug is true.
+func (mgr *Manager) Check(ctx context.Context, p CheckParams) (buildDir string, err error) {
+ expSet, err := p.App.Experiments(p.Environ)
+ if err != nil {
+ return "", err
+ }
+
+ // TODO: We should check that all secret keys are defined as well.
+
+ vcsRevision := vcs.GetRevision(p.App.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: p.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: p.CodegenDebug,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ bld := builderimpl.Resolve(p.App.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: p.App,
+ Experiments: expSet,
+ WorkingDir: p.WorkingDir,
+ ParseTests: p.Tests,
+ })
+ if err != nil {
+ return "", err
+ }
+ if err := p.App.CacheMetadata(parse.Meta); err != nil {
+ return "", errors.Wrap(err, "cache metadata")
+ }
+
+ // Validate the service configs.
+ _, err = bld.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ // Dummy data to satisfy config validation.
+ APIBaseURL: "http://localhost:0",
+ EnvName: "encore-check",
+ EnvType: cueutil.EnvType_Development,
+ CloudType: cueutil.CloudType_Local,
+ },
+ })
+ if err != nil {
+ return "", err
+ }
+
+ result, err := bld.Compile(ctx, builder.CompileParams{
+ Build: buildInfo,
+ App: p.App,
+ Parse: parse,
+ OpTracker: nil, // TODO
+ Experiments: expSet,
+ WorkingDir: p.WorkingDir,
+ })
+
+ if result != nil && len(result.Outputs) > 0 {
+ buildDir = result.Outputs[0].GetArtifactDir().ToIO()
+ }
+ return buildDir, err
+}
diff --git a/cli/daemon/run/errors.go b/cli/daemon/run/errors.go
new file mode 100644
index 0000000000..20b7757881
--- /dev/null
+++ b/cli/daemon/run/errors.go
@@ -0,0 +1,20 @@
+package run
+
+import (
+ "errors"
+
+ "encr.dev/pkg/errlist"
+ "encr.dev/v2/internals/perr"
+)
+
+func AsErrorList(err error) *errlist.List {
+ if errList := errlist.Convert(err); errList != nil {
+ return errList
+ }
+
+ list := &perr.ListAsErr{}
+ if errors.As(err, &list) {
+ return &errlist.List{List: list.ErrorList()}
+ }
+ return nil
+}
diff --git a/cli/daemon/run/exec_command.go b/cli/daemon/run/exec_command.go
new file mode 100644
index 0000000000..529a7ea098
--- /dev/null
+++ b/cli/daemon/run/exec_command.go
@@ -0,0 +1,193 @@
+package run
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "time"
+
+ "github.com/cockroachdb/errors"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run/infra"
+ "encr.dev/internal/optracker"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/promise"
+ "encr.dev/pkg/vcs"
+)
+
+// ExecCommandParams groups the parameters for the ExecCommand method.
+type ExecCommandParams struct {
+ // App is the app to execute the script for.
+ App *apps.Instance
+
+ // NS is the namespace to use.
+ NS *namespace.Namespace
+
+ // Command to execute
+ Command string
+
+ // ScriptArgs are the arguments to pass to the script binary.
+ ScriptArgs []string
+
+ // WorkingDir is the working dir to execute the script from.
+ // It's relative to the app root.
+ WorkingDir string
+
+ // Environ are the environment variables to set when running the tests,
+ // in the same format as os.Environ().
+ Environ []string
+
+ // Stdout and Stderr are where "go test" output should be written.
+ Stdout, Stderr io.Writer
+
+ OpTracker *optracker.OpTracker
+}
+
+// ExecCommand executes the script.
+func (mgr *Manager) ExecCommand(ctx context.Context, p ExecCommandParams) (err error) {
+ expSet, err := p.App.Experiments(p.Environ)
+ if err != nil {
+ return err
+ }
+
+ rm := infra.NewResourceManager(p.App, mgr.ClusterMgr, mgr.ObjectsMgr, mgr.PublicBuckets, p.NS, p.Environ, mgr.DBProxyPort, false)
+ defer rm.StopAll()
+
+ tracker := p.OpTracker
+ jobs := optracker.NewAsyncBuildJobs(ctx, p.App.PlatformOrLocalID(), tracker)
+
+ // Parse the app to figure out what infrastructure is needed.
+ start := time.Now()
+ parseOp := tracker.Add("Building Encore application graph", start)
+ topoOp := tracker.Add("Analyzing service topology", start)
+
+ bld := builderimpl.Resolve(p.App.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ vcsRevision := vcs.GetRevision(p.App.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: p.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: p.App,
+ Experiments: expSet,
+ WorkingDir: p.WorkingDir,
+ ParseTests: false,
+ })
+ if err != nil {
+ // Don't use the error itself in tracker.Fail, as it will lead to duplicate error output.
+ tracker.Fail(parseOp, errors.New("parse error"))
+ return err
+ }
+ if err := p.App.CacheMetadata(parse.Meta); err != nil {
+ return errors.Wrap(err, "cache metadata")
+ }
+ tracker.Done(parseOp, 500*time.Millisecond)
+ tracker.Done(topoOp, 300*time.Millisecond)
+
+ rm.StartRequiredServices(jobs, parse.Meta)
+
+ var secrets map[string]string
+ if usesSecrets(parse.Meta) {
+ jobs.Go("Fetching application secrets", true, 150*time.Millisecond, func(ctx context.Context) error {
+ data, err := mgr.Secret.Load(p.App).Get(ctx, expSet)
+ if err != nil {
+ return err
+ }
+ secrets = data.Values
+ return nil
+ })
+ }
+
+ apiBaseURL := fmt.Sprintf("http://localhost:%d", mgr.RuntimePort)
+
+ configProm := promise.New(func() (*builder.ServiceConfigsResult, error) {
+ return bld.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ APIBaseURL: apiBaseURL,
+ EnvName: "local",
+ EnvType: cueutil.EnvType_Development,
+ CloudType: cueutil.CloudType_Local,
+ },
+ })
+ })
+
+ if err := jobs.Wait(); err != nil {
+ return err
+ }
+
+ gateways := make(map[string]GatewayConfig)
+ for _, gw := range parse.Meta.Gateways {
+ gateways[gw.EncoreName] = GatewayConfig{
+ BaseURL: apiBaseURL,
+ Hostnames: []string{"localhost"},
+ }
+ }
+
+ cfg, err := configProm.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ authKey := genAuthKey()
+ configGen := &RuntimeConfigGenerator{
+ app: p.App,
+ infraManager: rm,
+ md: parse.Meta,
+ AppID: option.Some(GenID()),
+ EnvID: option.Some(GenID()),
+ TraceEndpoint: option.Some(fmt.Sprintf("http://localhost:%d/trace", mgr.RuntimePort)),
+ AuthKey: authKey,
+ Gateways: gateways,
+ DefinedSecrets: secrets,
+ SvcConfigs: cfg.Configs,
+ IncludeMetaEnv: bld.NeedsMeta(),
+ }
+ procConf, err := configGen.AllInOneProc()
+ if err != nil {
+ return err
+ }
+ procEnv, err := configGen.ProcEnvs(procConf, bld.UseNewRuntimeConfig())
+ if err != nil {
+ return errors.Wrap(err, "compute proc envs")
+ }
+
+ defaultEnv := []string{"ENCORE_RUNTIME_LOG=error"}
+ env := append(defaultEnv, p.Environ...)
+ env = append(env, procConf.ExtraEnv...)
+ env = append(env, procEnv...)
+
+ tracker.AllDone()
+
+ // nosemgrep: go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.CommandContext(ctx, p.Command, p.ScriptArgs...)
+ cmd.Dir = filepath.Join(p.App.Root(), p.WorkingDir)
+ cmd.Stdout = p.Stdout
+ cmd.Stderr = p.Stderr
+ cmd.Env = env
+ return cmd.Run()
+}
diff --git a/cli/daemon/run/exec_script.go b/cli/daemon/run/exec_script.go
new file mode 100644
index 0000000000..ece65995f9
--- /dev/null
+++ b/cli/daemon/run/exec_script.go
@@ -0,0 +1,237 @@
+package run
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "slices"
+ "time"
+
+ "github.com/cockroachdb/errors"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run/infra"
+ encoreEnv "encr.dev/internal/env"
+ "encr.dev/internal/lookpath"
+ "encr.dev/internal/optracker"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/paths"
+ "encr.dev/pkg/promise"
+ "encr.dev/pkg/vcs"
+)
+
+// ExecScriptParams groups the parameters for the ExecScript method.
+type ExecScriptParams struct {
+ // App is the app to execute the script for.
+ App *apps.Instance
+
+ // NS is the namespace to use.
+ NS *namespace.Namespace
+
+ // MainPkg is the package path to the command to execute.
+ MainPkg paths.Pkg
+
+ // ScriptArgs are the arguments to pass to the script binary.
+ ScriptArgs []string
+
+ // WorkingDir is the working dir to execute the script from.
+ // It's relative to the app root.
+ WorkingDir string
+
+ // Environ are the environment variables to set when running the tests,
+ // in the same format as os.Environ().
+ Environ []string
+
+ // Stdout and Stderr are where "go test" output should be written.
+ Stdout, Stderr io.Writer
+
+ OpTracker *optracker.OpTracker
+}
+
+// ExecScript executes the script.
+func (mgr *Manager) ExecScript(ctx context.Context, p ExecScriptParams) (err error) {
+ expSet, err := p.App.Experiments(p.Environ)
+ if err != nil {
+ return err
+ }
+
+ rm := infra.NewResourceManager(p.App, mgr.ClusterMgr, mgr.ObjectsMgr, mgr.PublicBuckets, p.NS, p.Environ, mgr.DBProxyPort, false)
+ defer rm.StopAll()
+
+ tracker := p.OpTracker
+ jobs := optracker.NewAsyncBuildJobs(ctx, p.App.PlatformOrLocalID(), tracker)
+
+ // Parse the app to figure out what infrastructure is needed.
+ start := time.Now()
+ parseOp := tracker.Add("Building Encore application graph", start)
+ topoOp := tracker.Add("Analyzing service topology", start)
+
+ bld := builderimpl.Resolve(p.App.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ vcsRevision := vcs.GetRevision(p.App.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: p.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+ MainPkg: option.Some(p.MainPkg),
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: p.App,
+ Experiments: expSet,
+ WorkingDir: p.WorkingDir,
+ ParseTests: false,
+ })
+ if err != nil {
+ // Don't use the error itself in tracker.Fail, as it will lead to duplicate error output.
+ tracker.Fail(parseOp, errors.New("parse error"))
+ return err
+ }
+ if err := p.App.CacheMetadata(parse.Meta); err != nil {
+ return errors.Wrap(err, "cache metadata")
+ }
+ tracker.Done(parseOp, 500*time.Millisecond)
+ tracker.Done(topoOp, 300*time.Millisecond)
+
+ rm.StartRequiredServices(jobs, parse.Meta)
+
+ var secrets map[string]string
+ if usesSecrets(parse.Meta) {
+ jobs.Go("Fetching application secrets", true, 150*time.Millisecond, func(ctx context.Context) error {
+ data, err := mgr.Secret.Load(p.App).Get(ctx, expSet)
+ if err != nil {
+ return err
+ }
+ secrets = data.Values
+ return nil
+ })
+ }
+
+ apiBaseURL := fmt.Sprintf("http://localhost:%d", mgr.RuntimePort)
+
+ configProm := promise.New(func() (*builder.ServiceConfigsResult, error) {
+ return bld.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ APIBaseURL: apiBaseURL,
+ EnvName: "local",
+ EnvType: cueutil.EnvType_Development,
+ CloudType: cueutil.CloudType_Local,
+ },
+ })
+ })
+
+ var build *builder.CompileResult
+ jobs.Go("Compiling application source code", false, 0, func(ctx context.Context) (err error) {
+ build, err = bld.Compile(ctx, builder.CompileParams{
+ Build: buildInfo,
+ App: p.App,
+ Parse: parse,
+ OpTracker: tracker,
+ Experiments: expSet,
+ WorkingDir: p.WorkingDir,
+ })
+ if err != nil {
+ return errors.Wrap(err, "compile error on exec")
+ }
+ return nil
+ })
+
+ if err := jobs.Wait(); err != nil {
+ return err
+ }
+
+ gateways := make(map[string]GatewayConfig)
+ for _, gw := range parse.Meta.Gateways {
+ gateways[gw.EncoreName] = GatewayConfig{
+ BaseURL: apiBaseURL,
+ Hostnames: []string{"localhost"},
+ }
+ }
+
+ outputs := build.Outputs
+ if len(outputs) != 1 {
+ return errors.New("ExecScript currently only supports a single build output")
+ }
+ entrypoints := outputs[0].GetEntrypoints()
+ if len(entrypoints) != 1 {
+ return errors.New("ExecScript currently only supports a single entrypoint")
+ }
+ proc := entrypoints[0].Cmd.Expand(outputs[0].GetArtifactDir())
+
+ cfg, err := configProm.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ authKey := genAuthKey()
+ configGen := &RuntimeConfigGenerator{
+ app: p.App,
+ infraManager: rm,
+ md: parse.Meta,
+ AppID: option.Some(GenID()),
+ EnvID: option.Some(GenID()),
+ TraceEndpoint: option.Some(fmt.Sprintf("http://localhost:%d/trace", mgr.RuntimePort)),
+ AuthKey: authKey,
+ Gateways: gateways,
+ DefinedSecrets: secrets,
+ SvcConfigs: cfg.Configs,
+ IncludeMetaEnv: bld.NeedsMeta(),
+ }
+ procConf, err := configGen.AllInOneProc()
+ if err != nil {
+ return err
+ }
+ procEnv, err := configGen.ProcEnvs(procConf, bld.UseNewRuntimeConfig())
+ if err != nil {
+ return errors.Wrap(err, "compute proc envs")
+ }
+
+ env := append(os.Environ(), proc.Env...)
+ env = append(env, p.Environ...)
+ env = append(env, procConf.ExtraEnv...)
+
+ env = append(env, procEnv...)
+ env = append(env, encodeServiceConfigs(cfg.Configs)...)
+ if runtimeLibPath := encoreEnv.EncoreRuntimeLib(); runtimeLibPath != "" {
+ env = append(env, "ENCORE_RUNTIME_LIB="+runtimeLibPath)
+ }
+
+ tracker.AllDone()
+
+ cwd := filepath.Join(p.App.Root(), p.WorkingDir)
+ binary, err := lookpath.InDir(cwd, env, proc.Command[0])
+ if err != nil {
+ return err
+ }
+
+ args := append(slices.Clone(proc.Command[1:]), p.ScriptArgs...)
+ // nosemgrep: go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.CommandContext(ctx, binary, args...)
+ cmd.Dir = filepath.Join(p.App.Root(), p.WorkingDir)
+ cmd.Stdout = p.Stdout
+ cmd.Stderr = p.Stderr
+ cmd.Env = env
+ return cmd.Run()
+}
diff --git a/cli/daemon/run/http.go b/cli/daemon/run/http.go
index 5beb87724c..e7d3730cba 100644
--- a/cli/daemon/run/http.go
+++ b/cli/daemon/run/http.go
@@ -1,77 +1,39 @@
package run
import (
- "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/binary"
"fmt"
- "net"
"net/http"
- "net/http/httputil"
- "strconv"
- "strings"
+ "time"
+
+ "encore.dev/appruntime/exported/config"
)
// ServeHTTP implements http.Handler by forwarding the request to the currently running process.
func (r *Run) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- endpoint := strings.TrimLeft(req.URL.Path, "/")
- if endpoint == "" {
- // If this appears to be a browser, serve a redirect to the dashboard.
- // Otherwise, give a helpful error message for terminals and such.
- dashURL := fmt.Sprintf("http://localhost:%d/%s", r.mgr.DashPort, r.AppID)
- if ua := req.Header.Get("User-Agent"); strings.Contains(ua, "Gecko") {
- http.Redirect(w, req, dashURL, http.StatusFound)
- return
- }
-
- http.Error(w, "No endpoint given. Make API calls to /service.Endpoint instead."+
- "Visit the browser dashboard at: "+dashURL, http.StatusBadRequest)
- return
- }
-
- proc := r.proc.Load().(*Proc)
- proc.forwardReq(endpoint, w, req)
+ proc := r.proc.Load().(*ProcGroup)
+ proc.ProxyReq(w, req)
}
-// forwardReq forwards the request to the Encore app.
-func (p *Proc) forwardReq(endpoint string, w http.ResponseWriter, req *http.Request) {
- if req.Method == "OPTIONS" {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
- w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
- w.WriteHeader(200)
- return
- }
- // director is a simplified version from httputil.NewSingleHostReverseProxy.
- director := func(r *http.Request) {
- r.URL.Scheme = "http"
- r.URL.Host = "localhost:" + strconv.Itoa(p.Run.Port)
- r.URL.Path = "/" + endpoint
- r.URL.RawQuery = req.URL.RawQuery
- if _, ok := r.Header["User-Agent"]; !ok {
- // explicitly disable User-Agent so it's not set to default value
- r.Header.Set("User-Agent", "")
- }
- }
- // modifyResponse sets the appropriate CORS headers for local development.
- modifyResponse := func(r *http.Response) error {
- r.Header.Set("Access-Control-Allow-Origin", "*")
- r.Header.Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
- r.Header.Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
- return nil
+func addAuthKeyToRequest(req *http.Request, authKey config.EncoreAuthKey) {
+ if req.Header == nil {
+ req.Header = make(http.Header)
}
- // Create a transport that connects over yamux.
- // Normally transports should be long-lived, but since we disable keep-alives
- // and don't create real TCP connections we can get away with this.
- transport := &http.Transport{
- DisableKeepAlives: true,
- DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
- return p.client.Open()
- },
- }
+ date := time.Now().UTC().Format(http.TimeFormat)
+ req.Header.Set("Date", date)
- (&httputil.ReverseProxy{
- Director: director,
- ModifyResponse: modifyResponse,
- Transport: transport,
- }).ServeHTTP(w, req)
+ mac := hmac.New(sha256.New, authKey.Data)
+ _, _ = fmt.Fprintf(mac, "%s\x00%s", date, req.URL.Path)
+
+ bytes := make([]byte, 4, 4+sha256.Size)
+ binary.BigEndian.PutUint32(bytes[0:4], authKey.KeyID)
+ bytes = mac.Sum(bytes)
+ auth := base64.RawStdEncoding.EncodeToString(bytes)
+ req.Header.Set("X-Encore-Auth", auth)
}
+
+const TestHeaderDisablePlatformAuth = "X-Encore-Test-Disable-Platform-Auth"
diff --git a/cli/daemon/run/infra/encorecloudtesting.go b/cli/daemon/run/infra/encorecloudtesting.go
new file mode 100644
index 0000000000..66a40dd5cd
--- /dev/null
+++ b/cli/daemon/run/infra/encorecloudtesting.go
@@ -0,0 +1,56 @@
+package infra
+
+import (
+ "encoding/base64"
+ "strconv"
+
+ "github.com/cockroachdb/errors"
+
+ "go.encore.dev/platform-sdk/pkg/auth"
+
+ "encore.dev/appruntime/exported/config"
+)
+
+// setTestEncoreCloud sets the Encore Cloud API configuration to use a local
+// Encore Cloud API server.
+//
+// It returns true if one has been configured, or false if not.
+//
+// To use it the `encore run` command must be started with the following environment variables:
+// - ENCORECLOUD_LOCAL_SERVER: the URL of the local Encore Cloud API server
+// - ENCORECLOUD_LOCAL_KEY_ID: the ID of the key to use for authentication
+// - ENCORECLOUD_LOCAL_KEY_DATA: the base64-encoded data of the key to use for authentication
+func (rm *ResourceManager) setTestEncoreCloud(cfg *config.Runtime) (useLocalCloudServer bool, err error) {
+ localServer := rm.environ.Get("ENCORECLOUD_LOCAL_SERVER")
+ if localServer == "" {
+ return false, nil
+ }
+
+ // Get the key and secret
+ keyIDStr := rm.environ.Get("ENCORECLOUD_LOCAL_KEY_ID")
+ keyData64 := rm.environ.Get("ENCORECLOUD_LOCAL_KEY_DATA")
+ if keyIDStr == "" || keyData64 == "" {
+ return false, errors.New("ENCORECLOUD_LOCAL_KEY_ID and ENCORECLOUD_LOCAL_KEY_DATA must be set if using ENCORECLOUD_LOCAL_SERVER")
+ }
+
+ keyID, err := strconv.Atoi(keyIDStr)
+ if err != nil || keyID <= 0 {
+ return false, errors.New("ENCORECLOUD_LOCAL_KEY_ID must be a positive integer")
+ }
+
+ keyData, err := base64.StdEncoding.DecodeString(keyData64)
+ if err != nil {
+ return false, errors.New("ENCORECLOUD_LOCAL_KEY_DATA must be a valid base64 string")
+ }
+
+ cfg.EncoreCloudAPI = &config.EncoreCloudAPI{
+ Server: localServer,
+ AuthKeys: []auth.Key{
+ {
+ KeyID: uint32(keyID),
+ Data: keyData,
+ },
+ },
+ }
+ return true, nil
+}
diff --git a/cli/daemon/run/infra/infra.go b/cli/daemon/run/infra/infra.go
new file mode 100644
index 0000000000..5f02ee11e3
--- /dev/null
+++ b/cli/daemon/run/infra/infra.go
@@ -0,0 +1,477 @@
+package infra
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+
+ "encore.dev/appruntime/exported/config"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/objects"
+ "encr.dev/cli/daemon/pubsub"
+ "encr.dev/cli/daemon/redis"
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/internal/optracker"
+ "encr.dev/pkg/environ"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type Type string
+
+const (
+ PubSub Type = "pubsub"
+ Cache Type = "cache"
+ SQLDB Type = "sqldb"
+ Objects Type = "objects"
+)
+
+const (
+ // this ID is used in the Encore Cloud README file as an example
+ // on how to create a topic resource
+ encoreCloudExampleTopicID = "res_0o9ioqnrirflhhm3t720"
+
+ // this ID is used in the Encore Cloud README file as a example
+ // on how to create a subscription on the above topic
+ encoreCloudExampleSubscriptionID = "res_0o9ioqnrirflhhm3t730"
+)
+
+// ResourceManager manages a set of infrastructure resources
+// to support the running Encore application.
+type ResourceManager struct {
+ app *apps.Instance
+ dbProxyPort int
+ sqlMgr *sqldb.ClusterManager
+ objectsMgr *objects.ClusterManager
+ publicBuckets *objects.PublicBucketServer
+ ns *namespace.Namespace
+ environ environ.Environ
+ log zerolog.Logger
+ forTests bool
+
+ mutex sync.Mutex
+ servers map[Type]Resource
+}
+
+func NewResourceManager(app *apps.Instance, sqlMgr *sqldb.ClusterManager, objectsMgr *objects.ClusterManager, publicBuckets *objects.PublicBucketServer, ns *namespace.Namespace, environ environ.Environ, dbProxyPort int, forTests bool) *ResourceManager {
+ return &ResourceManager{
+ app: app,
+ dbProxyPort: dbProxyPort,
+ sqlMgr: sqlMgr,
+ objectsMgr: objectsMgr,
+ publicBuckets: publicBuckets,
+ ns: ns,
+ environ: environ,
+ forTests: forTests,
+
+ servers: make(map[Type]Resource),
+ log: log.With().Str("app_id", app.PlatformOrLocalID()).Logger(),
+ }
+}
+
+func (rm *ResourceManager) StopAll() {
+ rm.mutex.Lock()
+ defer rm.mutex.Unlock()
+
+ rm.log.Info().Int("num", len(rm.servers)).Msg("Stopping all resource services")
+
+ for _, daemon := range rm.servers {
+ daemon.Stop()
+ }
+}
+
+type Resource interface {
+ // Stop shuts down the resource.
+ Stop()
+}
+
+// StartRequiredServices will start the required services for the current application
+// if they are not already running based on the given parse result
+func (rm *ResourceManager) StartRequiredServices(a *optracker.AsyncBuildJobs, md *meta.Data) {
+ if sqldb.IsUsed(md) && rm.GetSQLCluster() == nil {
+ a.Go("Creating PostgreSQL database cluster", true, 300*time.Millisecond, rm.StartSQLCluster(a, md))
+ }
+
+ if pubsub.IsUsed(md) && rm.GetPubSub() == nil {
+ a.Go("Starting PubSub daemon", true, 250*time.Millisecond, rm.StartPubSub)
+ }
+
+ if redis.IsUsed(md) && rm.GetRedis() == nil {
+ a.Go("Starting Redis server", true, 250*time.Millisecond, rm.StartRedis)
+ }
+
+ if objects.IsUsed(md) && rm.GetObjects() == nil {
+ a.Go("Starting Object Storage server", true, 250*time.Millisecond, rm.StartObjects(md))
+ }
+}
+
+// StartPubSub starts a PubSub daemon.
+func (rm *ResourceManager) StartPubSub(ctx context.Context) error {
+ nsqd := &pubsub.NSQDaemon{}
+ err := nsqd.Start()
+ if err != nil {
+ return err
+ }
+
+ rm.mutex.Lock()
+ rm.servers[PubSub] = nsqd
+ rm.mutex.Unlock()
+ return nil
+}
+
+// GetPubSub returns the PubSub daemon if it is running otherwise it returns nil
+func (rm *ResourceManager) GetPubSub() *pubsub.NSQDaemon {
+ rm.mutex.Lock()
+ defer rm.mutex.Unlock()
+
+ if daemon, found := rm.servers[PubSub]; found {
+ return daemon.(*pubsub.NSQDaemon)
+ }
+ return nil
+}
+
+// StartRedis starts a Redis server.
+func (rm *ResourceManager) StartRedis(ctx context.Context) error {
+ srv := redis.New()
+ err := srv.Start()
+ if err != nil {
+ return err
+ }
+
+ rm.mutex.Lock()
+ rm.servers[Cache] = srv
+ rm.mutex.Unlock()
+ return nil
+}
+
+// GetRedis returns the Redis server if it is running otherwise it returns nil
+func (rm *ResourceManager) GetRedis() *redis.Server {
+ rm.mutex.Lock()
+ defer rm.mutex.Unlock()
+
+ if srv, found := rm.servers[Cache]; found {
+ return srv.(*redis.Server)
+ }
+ return nil
+}
+
+// StartObjects starts an Object Storage server.
+func (rm *ResourceManager) StartObjects(md *meta.Data) func(context.Context) error {
+ return func(ctx context.Context) error {
+ var srv *objects.Server
+ if rm.forTests {
+ srv = objects.NewInMemoryServer(rm.publicBuckets)
+ } else {
+ if rm.objectsMgr == nil {
+ return fmt.Errorf("StartObjects: no Object Storage cluster manager provided")
+ } else if rm.publicBuckets == nil {
+ return fmt.Errorf("StartObjects: no Object Storage public bucket server provided")
+ }
+ baseDir, err := rm.objectsMgr.BaseDir(rm.ns.ID)
+ if err != nil {
+ return err
+ }
+ srv = objects.NewDirServer(rm.publicBuckets, rm.ns.ID, baseDir)
+ }
+
+ if err := srv.Initialize(md); err != nil {
+ return err
+ } else if err := srv.Start(); err != nil {
+ return err
+ }
+
+ rm.mutex.Lock()
+ rm.servers[Objects] = srv
+ rm.mutex.Unlock()
+ return nil
+ }
+}
+
+// GetObjects returns the Object Storage server if it is running otherwise it returns nil
+func (rm *ResourceManager) GetObjects() *objects.Server {
+ rm.mutex.Lock()
+ defer rm.mutex.Unlock()
+
+ if srv, found := rm.servers[Objects]; found {
+ return srv.(*objects.Server)
+ }
+ return nil
+}
+
+func (rm *ResourceManager) StartSQLCluster(a *optracker.AsyncBuildJobs, md *meta.Data) func(ctx context.Context) error {
+ return func(ctx context.Context) error {
+ // This can be the case in tests.
+ if rm.sqlMgr == nil {
+ return fmt.Errorf("StartSQLCluster: no SQL Cluster manager provided")
+ }
+
+ typ := sqldb.Run
+ if rm.forTests {
+ typ = sqldb.Test
+ }
+
+ if err := rm.sqlMgr.Ready(); err != nil {
+ return err
+ }
+
+ cluster := rm.sqlMgr.Create(ctx, &sqldb.CreateParams{
+ ClusterID: sqldb.GetClusterID(rm.app, typ, rm.ns),
+ Memfs: typ.Memfs(),
+ })
+
+ if _, err := cluster.Start(ctx, a.Tracker()); err != nil {
+ return errors.Wrap(err, "failed to start cluster")
+ }
+
+ rm.mutex.Lock()
+ rm.servers[SQLDB] = cluster
+ rm.mutex.Unlock()
+
+ // Set up the database asynchronously since it can take a while.
+ if rm.forTests {
+ a.Go("Recreating databases", true, 250*time.Millisecond, func(ctx context.Context) error {
+ err := cluster.Recreate(ctx, rm.app.Root(), nil, md)
+ if err != nil {
+ rm.log.Error().Err(err).Msg("failed to recreate db")
+ return err
+ }
+ return nil
+ })
+ } else {
+ a.Go("Running database migrations", true, 250*time.Millisecond, func(ctx context.Context) error {
+ err := cluster.SetupAndMigrate(ctx, rm.app.Root(), md.SqlDatabases)
+ if err != nil {
+ rm.log.Error().Err(err).Msg("failed to setup db")
+ return err
+ }
+ return nil
+ })
+ }
+
+ return nil
+ }
+}
+
+// GetSQLCluster returns the SQL cluster
+func (rm *ResourceManager) GetSQLCluster() *sqldb.Cluster {
+ rm.mutex.Lock()
+ defer rm.mutex.Unlock()
+
+ if cluster, found := rm.servers[SQLDB]; found {
+ return cluster.(*sqldb.Cluster)
+ }
+ return nil
+}
+
+// UpdateConfig updates the given config with infrastructure information.
+// Note that all the requisite services must have started up already,
+// which in practice means that (*optracker.AsyncBuildJobs).Wait must have returned first.
+func (rm *ResourceManager) UpdateConfig(cfg *config.Runtime, md *meta.Data, dbProxyPort int) error {
+ useLocalEncoreCloudAPIForTesting, err := rm.setTestEncoreCloud(cfg)
+ if err != nil {
+ return err
+ }
+
+ if cluster := rm.GetSQLCluster(); cluster != nil {
+ srv := &config.SQLServer{
+ Host: "localhost:" + strconv.Itoa(dbProxyPort),
+ }
+ serverID := len(cfg.SQLServers)
+ cfg.SQLServers = append(cfg.SQLServers, srv)
+
+ for _, db := range md.SqlDatabases {
+ cfg.SQLDatabases = append(cfg.SQLDatabases, &config.SQLDatabase{
+ ServerID: serverID,
+ EncoreName: db.Name,
+ DatabaseName: db.Name,
+ User: "encore",
+ Password: cluster.Password,
+ })
+ }
+
+ // Configure max connections based on 96 connections
+ // divided evenly among the databases
+ maxConns := 96 / len(cfg.SQLDatabases)
+ for _, db := range cfg.SQLDatabases {
+ db.MaxConnections = maxConns
+ }
+ }
+
+ if nsq := rm.GetPubSub(); nsq != nil {
+ provider := &config.PubsubProvider{
+ NSQ: &config.NSQProvider{
+ Host: nsq.Addr(),
+ },
+ }
+ providerID := len(cfg.PubsubProviders)
+ cfg.PubsubProviders = append(cfg.PubsubProviders, provider)
+
+ // If we're testing the Encore Cloud API locally, override from NSQ
+ if useLocalEncoreCloudAPIForTesting {
+ providerID = len(cfg.PubsubProviders)
+ cfg.PubsubProviders = append(cfg.PubsubProviders, &config.PubsubProvider{
+ EncoreCloud: &config.EncoreCloudPubsubProvider{},
+ })
+ }
+
+ cfg.PubsubTopics = make(map[string]*config.PubsubTopic)
+ for _, t := range md.PubsubTopics {
+ providerName := t.Name
+ if useLocalEncoreCloudAPIForTesting {
+ providerName = encoreCloudExampleTopicID
+ }
+
+ topicCfg := &config.PubsubTopic{
+ ProviderID: providerID,
+ EncoreName: t.Name,
+ ProviderName: providerName,
+ Subscriptions: make(map[string]*config.PubsubSubscription),
+ }
+
+ for _, s := range t.Subscriptions {
+ subscriptionID := t.Name
+ if useLocalEncoreCloudAPIForTesting {
+ subscriptionID = encoreCloudExampleSubscriptionID
+ }
+
+ topicCfg.Subscriptions[s.Name] = &config.PubsubSubscription{
+ ID: subscriptionID,
+ EncoreName: s.Name,
+ ProviderName: s.Name,
+ }
+ }
+
+ cfg.PubsubTopics[t.Name] = topicCfg
+ }
+ }
+
+ if redis := rm.GetRedis(); redis != nil {
+ srv := &config.RedisServer{
+ Host: redis.Addr(),
+ }
+ serverID := len(cfg.RedisServers)
+ cfg.RedisServers = append(cfg.RedisServers, srv)
+
+ for _, cluster := range md.CacheClusters {
+ cfg.RedisDatabases = append(cfg.RedisDatabases, &config.RedisDatabase{
+ ServerID: serverID,
+ Database: 0,
+ EncoreName: cluster.Name,
+ KeyPrefix: cluster.Name + "/",
+ })
+ }
+ }
+
+ return nil
+}
+
+// SQLServerConfig returns the SQL server configuration.
+func (rm *ResourceManager) SQLServerConfig() (config.SQLServer, error) {
+ cluster := rm.GetSQLCluster()
+ if cluster == nil {
+ return config.SQLServer{}, errors.New("no SQL cluster found")
+ }
+
+ srvCfg := config.SQLServer{
+ Host: "localhost:" + strconv.Itoa(rm.dbProxyPort),
+ }
+
+ return srvCfg, nil
+}
+
+// SQLDatabaseConfig returns the SQL server and database configuration for the given database.
+func (rm *ResourceManager) SQLDatabaseConfig(db *meta.SQLDatabase) (config.SQLDatabase, error) {
+ cluster := rm.GetSQLCluster()
+ if cluster == nil {
+ return config.SQLDatabase{}, errors.New("no SQL cluster found")
+ }
+
+ dbCfg := config.SQLDatabase{
+ EncoreName: db.Name,
+ DatabaseName: db.Name,
+ User: "encore",
+ Password: cluster.Password,
+ }
+
+ return dbCfg, nil
+}
+
+// PubSubProviderConfig returns the PubSub provider configuration.
+func (rm *ResourceManager) PubSubProviderConfig() (config.PubsubProvider, error) {
+ nsq := rm.GetPubSub()
+ if nsq == nil {
+ return config.PubsubProvider{}, errors.New("no PubSub server found")
+ }
+
+ return config.PubsubProvider{
+ NSQ: &config.NSQProvider{
+ Host: nsq.Addr(),
+ },
+ }, nil
+}
+
+// PubSubTopicConfig returns the PubSub provider and topic configuration for the given topic.
+func (rm *ResourceManager) PubSubTopicConfig(topic *meta.PubSubTopic) (config.PubsubProvider, config.PubsubTopic, error) {
+ providerCfg, err := rm.PubSubProviderConfig()
+ if err != nil {
+ return config.PubsubProvider{}, config.PubsubTopic{}, err
+ }
+
+ topicCfg := config.PubsubTopic{
+ EncoreName: topic.Name,
+ ProviderName: topic.Name,
+ Subscriptions: make(map[string]*config.PubsubSubscription),
+ }
+
+ return providerCfg, topicCfg, nil
+}
+
+// PubSubSubscriptionConfig returns the PubSub subscription configuration for the given subscription.
+func (rm *ResourceManager) PubSubSubscriptionConfig(_ *meta.PubSubTopic, sub *meta.PubSubTopic_Subscription) (config.PubsubSubscription, error) {
+ subCfg := config.PubsubSubscription{
+ ID: sub.Name,
+ EncoreName: sub.Name,
+ ProviderName: sub.Name,
+ }
+
+ return subCfg, nil
+}
+
+// RedisConfig returns the Redis server and database configuration for the given database.
+func (rm *ResourceManager) RedisConfig(redis *meta.CacheCluster) (config.RedisServer, config.RedisDatabase, error) {
+ server := rm.GetRedis()
+ if server == nil {
+ return config.RedisServer{}, config.RedisDatabase{}, errors.New("no Redis server found")
+ }
+
+ srvCfg := config.RedisServer{
+ Host: server.Addr(),
+ }
+
+ dbCfg := config.RedisDatabase{
+ EncoreName: redis.Name,
+ KeyPrefix: redis.Name + "/",
+ }
+
+ return srvCfg, dbCfg, nil
+}
+
+// BucketProviderConfig returns the bucket provider configuration.
+func (rm *ResourceManager) BucketProviderConfig() (config.BucketProvider, string, error) {
+ obj := rm.GetObjects()
+ if obj == nil {
+ return config.BucketProvider{}, "", errors.New("no object storage found")
+ }
+
+ return config.BucketProvider{
+ GCS: &config.GCSBucketProvider{
+ Endpoint: obj.Endpoint(),
+ },
+ }, obj.PublicBaseURL(), nil
+}
diff --git a/cli/daemon/run/manager.go b/cli/daemon/run/manager.go
index 6ff0e07e40..54fad63170 100644
--- a/cli/daemon/run/manager.go
+++ b/cli/daemon/run/manager.go
@@ -2,28 +2,37 @@ package run
import (
"fmt"
- "net"
"sort"
- "strconv"
"sync"
+ "time"
+ "github.com/cockroachdb/errors"
+ "github.com/rs/xid"
+
+ encore "encore.dev"
+ "encore.dev/appruntime/exported/config"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/objects"
+ "encr.dev/cli/daemon/run/infra"
"encr.dev/cli/daemon/secret"
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/pkg/errlist"
+ meta "encr.dev/proto/encore/parser/meta/v1"
)
-// BasePort is the default port Encore apps start listening on.
-const BasePort = 4060
-
// Manager manages the set of running applications.
type Manager struct {
- RuntimePort int // port for Encore runtime
- DBProxyPort int // port for sqldb proxy
- DashPort int // port for dev dashboard
- Secret *secret.Manager
+ RuntimePort int // port for Encore runtime
+ DBProxyPort int // port for sqldb proxy
+ DashBaseURL string // base url for the dev dashboard
+ Secret *secret.Manager
+ ClusterMgr *sqldb.ClusterManager
+ ObjectsMgr *objects.ClusterManager
+ PublicBuckets *objects.PublicBucketServer
listeners []EventListener
-
- mu sync.Mutex
- runs map[string]*Run // id -> run
+ mu sync.Mutex
+ runs map[string]*Run // id -> run
}
// EventListener is the interface for listening to events
@@ -31,6 +40,8 @@ type Manager struct {
type EventListener interface {
// OnStart is called when a run starts.
OnStart(r *Run)
+ // OnCompileStart is called when a run starts compiling.
+ OnCompileStart(r *Run)
// OnReload is called when a run reloads.
OnReload(r *Run)
// OnStop is called when a run stops.
@@ -39,15 +50,17 @@ type EventListener interface {
OnStdout(r *Run, out []byte)
// OnStderr is called when a run outputs something on stderr.
OnStderr(r *Run, out []byte)
+ // OnError is called when a run encounters an error.
+ OnError(r *Run, err *errlist.List)
}
// FindProc finds the proc with the given id.
// It reports nil if no such proc was found.
-func (mgr *Manager) FindProc(procID string) *Proc {
+func (mgr *Manager) FindProc(procID string) *ProcGroup {
mgr.mu.Lock()
defer mgr.mu.Unlock()
for _, run := range mgr.runs {
- if p := run.Proc(); p != nil && p.ID == procID {
+ if p := run.ProcGroup(); p != nil && p.ID == procID {
return p
}
}
@@ -60,8 +73,13 @@ func (mgr *Manager) FindRunByAppID(appID string) *Run {
mgr.mu.Lock()
defer mgr.mu.Unlock()
for _, run := range mgr.runs {
- if run.AppID == appID {
- return run
+ if appID == run.App.PlatformID() || appID == run.App.LocalID() {
+ select {
+ case <-run.Done():
+ // exited
+ default:
+ return run
+ }
}
}
return nil
@@ -76,34 +94,17 @@ func (mgr *Manager) ListRuns() []*Run {
}
mgr.mu.Unlock()
- sort.Slice(runs, func(i, j int) bool { return runs[i].AppID < runs[j].AppID })
+ sort.Slice(runs, func(i, j int) bool { return runs[i].App.PlatformOrLocalID() < runs[j].App.PlatformOrLocalID() })
return runs
}
-// newListener attempts to find an unused port at BasePort or above
-// and opens a TCP listener on localhost for that port.
-func (mgr *Manager) newListener() (ln net.Listener, port int, err error) {
- // Try up to 10 ports, plus however many processes we have
- mgr.mu.Lock()
- n := len(mgr.runs)
- mgr.mu.Unlock()
- for i := 0; i < (10 + n); i++ {
- port := BasePort + i
- ln, err := net.Listen("tcp", "localhost:"+strconv.Itoa(port))
- if err == nil {
- return ln, port, nil
- }
- }
- return nil, 0, fmt.Errorf("could not find available port in %d-%d", BasePort, BasePort+(n+9))
-}
-
// AddListener adds an event listener to mgr.
// It must be called before starting the first run.
func (mgr *Manager) AddListener(ln EventListener) {
mgr.listeners = append(mgr.listeners, ln)
}
-func (mgr *Manager) runStdout(r *Run, out []byte) {
+func (mgr *Manager) RunStdout(r *Run, out []byte) {
// Make sure the run has started before we start outputting
<-r.started
for _, ln := range mgr.listeners {
@@ -111,10 +112,128 @@ func (mgr *Manager) runStdout(r *Run, out []byte) {
}
}
-func (mgr *Manager) runStderr(r *Run, out []byte) {
+func (mgr *Manager) RunStderr(r *Run, out []byte) {
// Make sure the run has started before we start outputting
<-r.started
for _, ln := range mgr.listeners {
ln.OnStderr(r, out)
}
}
+
+func (mgr *Manager) RunError(r *Run, err *errlist.List) {
+ for _, ln := range mgr.listeners {
+ ln.OnError(r, err)
+ }
+}
+
+type parseAppParams struct {
+ App *apps.Instance
+ Environ []string
+ WorkingDir string
+ ParseTests bool
+ ScriptMainPkg string
+}
+
+type generateConfigParams struct {
+ App *apps.Instance
+ RM *infra.ResourceManager
+ Meta *meta.Data
+
+ ForTests bool
+ AuthKey config.EncoreAuthKey
+ APIBaseURL string
+
+ ConfigAppID string
+ ConfigEnvID string
+
+ ExternalCalls bool
+}
+
+// generateServiceDiscoveryMap generates a map of service names to
+// where the Encore daemon is listening to forward to that service binary.
+func (mgr *Manager) generateServiceDiscoveryMap(p generateConfigParams) (map[string]config.Service, error) {
+ services := make(map[string]config.Service)
+
+ // Add all the services from the app
+ for _, svc := range p.Meta.Svcs {
+ services[svc.Name] = config.Service{
+ Name: svc.Name,
+ // For now all services are hosted by the same running instance
+ URL: p.APIBaseURL,
+ Protocol: config.Http,
+ ServiceAuth: mgr.getInternalServiceToServiceAuthMethod(),
+ }
+ }
+
+ return services, nil
+}
+
+// getInternalServiceToServiceAuthMethod returns the auth method to use
+// when making service to service calls locally.
+//
+// This currently just returns the noop auth method, but in the future
+// this function will allow us to use environmental variables to configure
+// the auth method and test different auth methods locally.
+func (mgr *Manager) getInternalServiceToServiceAuthMethod() config.ServiceAuth {
+ return config.ServiceAuth{Method: "encore-auth"}
+}
+
+func (mgr *Manager) generateConfig(p generateConfigParams) (*config.Runtime, error) {
+ envType := encore.EnvDevelopment
+ if p.ForTests {
+ envType = encore.EnvTest
+ }
+
+ globalCORS, err := p.App.GlobalCORS()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get global CORS")
+ }
+
+ deployID := xid.New().String()
+ if p.ForTests {
+ deployID = "clitest_" + deployID
+ } else {
+ deployID = "run_" + deployID
+ }
+
+ serviceDiscovery, err := mgr.generateServiceDiscoveryMap(p)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate service discovery map")
+ }
+
+ cfg := &config.Runtime{
+ AppID: p.ConfigAppID,
+ AppSlug: p.App.PlatformID(),
+ APIBaseURL: p.APIBaseURL,
+ DeployID: deployID,
+ DeployedAt: time.Now().UTC(), // Force UTC to not cause confusion
+ EnvID: p.ConfigEnvID,
+ EnvName: "local",
+ EnvCloud: string(encore.CloudLocal),
+ EnvType: string(envType),
+ TraceEndpoint: fmt.Sprintf("http://localhost:%d/trace", mgr.RuntimePort),
+ AuthKeys: []config.EncoreAuthKey{p.AuthKey},
+ CORS: &config.CORS{
+ Debug: globalCORS.Debug,
+ AllowOriginsWithCredentials: []string{
+ // Allow all origins with credentials for local development;
+ // since it's only running on localhost for development this is safe.
+ config.UnsafeAllOriginWithCredentials,
+ },
+ AllowOriginsWithoutCredentials: []string{"*"},
+ ExtraAllowedHeaders: globalCORS.AllowHeaders,
+ ExtraExposedHeaders: globalCORS.ExposeHeaders,
+ AllowPrivateNetworkAccess: true,
+ },
+ ServiceDiscovery: serviceDiscovery,
+ ServiceAuth: []config.ServiceAuth{
+ mgr.getInternalServiceToServiceAuthMethod(),
+ },
+ DynamicExperiments: nil, // All experiments would be included in the static config here
+ }
+
+ if err := p.RM.UpdateConfig(cfg, p.Meta, mgr.DBProxyPort); err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
diff --git a/cli/daemon/run/manager_test.go b/cli/daemon/run/manager_test.go
deleted file mode 100644
index c34d0fc861..0000000000
--- a/cli/daemon/run/manager_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package run
-
-import (
- "testing"
-
- qt "github.com/frankban/quicktest"
- "go.uber.org/goleak"
-)
-
-// TestNewListener tests that newListener tries multiple ports.
-func TestNewListener(t *testing.T) {
- defer goleak.VerifyNone(t, goleak.IgnoreCurrent())
- c := qt.New(t)
- mgr := &Manager{}
-
- ln1, port1, err1 := mgr.newListener()
- ln2, port2, err2 := mgr.newListener()
- defer closeAll(ln1, ln2)
- c.Assert(err1, qt.IsNil)
- c.Assert(err2, qt.IsNil)
- c.Assert(port1 >= BasePort && port1 <= (BasePort+10), qt.IsTrue)
- c.Assert(port2, qt.Equals, port1+1)
-
- // newListener should pick up the original port again once ln1 is closed.
- ln1.Close()
- ln3, port3, err3 := mgr.newListener()
- defer closeAll(ln3)
- c.Assert(err3, qt.IsNil)
- c.Assert(port3, qt.Equals, port1)
-}
diff --git a/cli/daemon/run/proc_groups.go b/cli/daemon/run/proc_groups.go
new file mode 100644
index 0000000000..2ed5627d7b
--- /dev/null
+++ b/cli/daemon/run/proc_groups.go
@@ -0,0 +1,532 @@
+package run
+
+import (
+ "context"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/netip"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog"
+
+ "encore.dev/appruntime/apisdk/api/transport"
+ "encore.dev/appruntime/exported/config"
+ "encore.dev/appruntime/exported/experiments"
+ "encr.dev/cli/daemon/internal/sym"
+ "encr.dev/internal/lookpath"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/noopgateway"
+ "encr.dev/pkg/noopgwdesc"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type procGroupOptions struct {
+ Ctx context.Context
+ ProcID string // unique process id
+ Run *Run // the run the process belongs to
+ Meta *meta.Data // app metadata snapshot
+ Experiments *experiments.Set // enabled experiments
+ AuthKey config.EncoreAuthKey
+ Logger RunLogger
+ WorkingDir string
+ ConfigGen *RuntimeConfigGenerator
+}
+
+func newProcGroup(opts procGroupOptions) *ProcGroup {
+ p := &ProcGroup{
+ ID: opts.ProcID,
+ Run: opts.Run,
+ Meta: opts.Meta,
+ Experiments: opts.Experiments,
+ workingDir: opts.WorkingDir,
+ ctx: opts.Ctx,
+ logger: opts.Logger,
+ log: opts.Run.log.With().Str("proc_id", opts.ProcID).Logger(),
+ ConfigGen: opts.ConfigGen,
+
+ symParsed: make(chan struct{}),
+ Services: make(map[string]*Proc),
+ Gateways: make(map[string]*Proc),
+ authKey: opts.AuthKey,
+ }
+
+ p.procCond.L = &p.procMu
+ return p
+}
+
+// ProcGroup represents a running Encore application
+//
+// It is a collection of [Proc]'s that are all part of the same application,
+// where each [Proc] represents a one or more services or an API gateway.
+type ProcGroup struct {
+ ID string // unique process id
+ Run *Run // the run the process belongs to
+ Meta *meta.Data // app metadata snapshot
+ Experiments *experiments.Set // enabled experiments
+
+ Gateways map[string]*Proc // the gateway processes, by name (if any)
+ Services map[string]*Proc // all the service processes by name
+
+ ConfigGen *RuntimeConfigGenerator // generates runtime configuration
+
+ procMu sync.Mutex // protects both allProcesses and runningProcs
+ procCond sync.Cond // used to signal a change in runningProcs
+ allProcesses []*Proc // all processes in the group
+ runningProcs uint32 // number of running processes
+
+ ctx context.Context
+ logger RunLogger
+ log zerolog.Logger
+ workingDir string
+
+ // Used for proxying requests when there is no gateway.
+ noopGW *noopgateway.Gateway
+
+ authKey config.EncoreAuthKey
+ sym *sym.Table
+ symErr error
+ symParsed chan struct{} // closed when sym and symErr are set
+}
+
+func (pg *ProcGroup) ProxyReq(w http.ResponseWriter, req *http.Request) {
+ // Currently we only support proxying to the default gateway.
+ // Need to rethink how this should work when we support multiple gateways.
+ if gw, ok := pg.Gateways["api-gateway"]; ok {
+ gw.ProxyReq(w, req)
+ } else {
+ pg.noopGW.ServeHTTP(w, req)
+ }
+}
+
+// Done returns a channel that is closed when all processes in the group have exited.
+func (pg *ProcGroup) Done() <-chan struct{} {
+ c := make(chan struct{})
+ go func() {
+ pg.procMu.Lock()
+ defer pg.procMu.Unlock()
+
+ for pg.runningProcs > 0 {
+ // If we have more than one process, wait for one to exit
+ pg.procCond.Wait()
+ }
+
+ close(c)
+ }()
+
+ return c
+}
+
+// Start starts all the processes in the group.
+func (pg *ProcGroup) Start() (err error) {
+ pg.procMu.Lock()
+ defer pg.procMu.Unlock()
+
+ for _, p := range pg.allProcesses {
+ if err = p.start(); err != nil {
+ p.Kill()
+ return err
+ }
+ }
+
+ pg.noopGW = newNoopGateway(pg)
+ return nil
+}
+
+// Close closes the process and waits for it to shutdown.
+// It can safely be called multiple times.
+func (pg *ProcGroup) Close() {
+ var wg sync.WaitGroup
+ pg.procMu.Lock()
+ wg.Add(len(pg.allProcesses))
+ for _, p := range pg.allProcesses {
+ go func(p *Proc) {
+ p.Close()
+ wg.Done()
+ }(p)
+ }
+
+ pg.procMu.Unlock()
+
+ wg.Wait()
+}
+
+// Kill kills all the processes in the group.
+// It does not wait for them to exit.
+func (pg *ProcGroup) Kill() {
+ pg.procMu.Lock()
+ defer pg.procMu.Unlock()
+
+ for _, p := range pg.allProcesses {
+ p.Kill()
+ }
+}
+
+// parseSymTable parses the symbol table of the binary at binPath
+// and stores the result in p.sym and p.symErr.
+func (pg *ProcGroup) parseSymTable(binPath string) {
+ parse := func() (*sym.Table, error) {
+ f, err := os.Open(binPath)
+ if err != nil {
+ return nil, err
+ }
+ defer fns.CloseIgnore(f)
+ return sym.Load(f)
+ }
+
+ defer close(pg.symParsed)
+ pg.sym, pg.symErr = parse()
+}
+
+// SymTable waits for the proc's symbol table to be parsed and then returns it.
+// ctx is used to cancel the wait.
+func (pg *ProcGroup) SymTable(ctx context.Context) (*sym.Table, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-pg.symParsed:
+ return pg.sym, pg.symErr
+ }
+}
+
+// newProc creates a new process in the group and sets up the required stuff in the struct
+func (pg *ProcGroup) newProc(processName string, listenAddr netip.AddrPort) (*Proc, error) {
+ dst := &url.URL{
+ Scheme: "http",
+ Host: listenAddr.String(),
+ }
+ proxy := &httputil.ReverseProxy{
+ // Enable h2c for the proxy.
+ Transport: transport.NewH2CTransport(http.DefaultTransport),
+ Rewrite: func(r *httputil.ProxyRequest) {
+ r.SetURL(dst)
+ r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
+ r.SetXForwarded()
+ // Copy the host head over.
+ r.Out.Host = r.In.Host
+
+ // Add the auth key unless the test header is set.
+ if r.Out.Header.Get(TestHeaderDisablePlatformAuth) == "" {
+ addAuthKeyToRequest(r.Out, pg.authKey)
+ }
+ },
+ }
+
+ p := &Proc{
+ group: pg,
+ log: pg.log.With().Str("proc", processName).Logger(),
+ listenAddr: listenAddr,
+ httpProxy: proxy,
+ exit: make(chan struct{}),
+ }
+
+ pg.procMu.Lock()
+ pg.allProcesses = append(pg.allProcesses, p)
+ pg.procMu.Unlock()
+
+ return p, nil
+}
+
+func (pg *ProcGroup) NewAllInOneProc(spec builder.Cmd, listenAddr netip.AddrPort, env []string) error {
+ p, err := pg.newProc("all-in-one", listenAddr)
+ if err != nil {
+ return err
+ }
+
+ // Append both the command-specific env and the base environment.
+ env = append(env, spec.Env...)
+
+ cwd := filepath.Join(pg.Run.App.Root(), pg.workingDir)
+ binary, err := lookpath.InDir(cwd, env, spec.Command[0])
+ if err != nil {
+ return err
+ }
+
+ // This is safe since the command comes from our build.
+ // nosemgrep go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.CommandContext(pg.ctx, binary, spec.Command[1:]...)
+ cmd.Env = env
+ cmd.Dir = cwd
+
+ // Proxy stdout and stderr to the given app logger, if any.
+ if l := pg.logger; l != nil {
+ cmd.Stdout = newLogWriter(pg.Run, l.RunStdout)
+ cmd.Stderr = newLogWriter(pg.Run, l.RunStderr)
+ }
+
+ p.cmd = cmd
+
+ // Assign all the gateways to this process.
+ for _, gw := range pg.Meta.Gateways {
+ pg.Gateways[gw.EncoreName] = p
+ }
+
+ return nil
+}
+
+func (pg *ProcGroup) NewProcForService(serviceName string, listenAddr netip.AddrPort, spec builder.Cmd, env []string) error {
+ if !listenAddr.IsValid() {
+ return errors.New("invalid listen address")
+ }
+
+ p, err := pg.newProc(serviceName, listenAddr)
+ if err != nil {
+ return err
+ }
+ pg.Services[serviceName] = p
+
+ // Append both the command-specific env and the base environment.
+ env = append(env, spec.Env...)
+
+ cwd := filepath.Join(pg.Run.App.Root(), pg.workingDir)
+ binary, err := lookpath.InDir(cwd, env, spec.Command[0])
+ if err != nil {
+ return err
+ }
+
+ // This is safe since the command comes from our build.
+ // nosemgrep go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.CommandContext(pg.ctx, binary, spec.Command[1:]...)
+ cmd.Env = env
+ cmd.Dir = cwd
+
+ // Proxy stdout and stderr to the given app logger, if any.
+ if l := pg.logger; l != nil {
+ cmd.Stdout = newLogWriter(pg.Run, l.RunStdout)
+ cmd.Stderr = newLogWriter(pg.Run, l.RunStderr)
+ }
+
+ p.cmd = cmd
+
+ return nil
+}
+
+func (pg *ProcGroup) NewProcForGateway(gatewayName string, listenAddr netip.AddrPort, spec builder.Cmd, env []string) error {
+ if !listenAddr.IsValid() {
+ return errors.New("invalid listen address")
+ }
+
+ p, err := pg.newProc("gateway-"+gatewayName, listenAddr)
+ if err != nil {
+ return err
+ }
+ pg.Gateways[gatewayName] = p
+
+ // Append both the command-specific env and the base environment.
+ env = append(env, spec.Env...)
+
+ cwd := filepath.Join(pg.Run.App.Root(), pg.workingDir)
+ binary, err := lookpath.InDir(cwd, env, spec.Command[0])
+ if err != nil {
+ return err
+ }
+
+ // This is safe since the command comes from our build.
+ // nosemgrep go.lang.security.audit.dangerous-exec-command.dangerous-exec-command
+ cmd := exec.CommandContext(pg.ctx, binary, spec.Command[1:]...)
+ cmd.Env = env
+ cmd.Dir = cwd
+
+ // Bound the wait time to esure prompt live reload if something goes wrong
+ // with IO copying.
+ cmd.WaitDelay = 500 * time.Millisecond
+
+ // Proxy stdout and stderr to the given app logger, if any.
+ if l := pg.logger; l != nil {
+ cmd.Stdout = newLogWriter(pg.Run, l.RunStdout)
+ cmd.Stderr = newLogWriter(pg.Run, l.RunStderr)
+ }
+
+ p.cmd = cmd
+
+ return nil
+}
+
+type warning struct {
+ Title string
+ Help string
+}
+
+func (pg *ProcGroup) Warnings() (rtn []warning) {
+ if missing := pg.ConfigGen.MissingSecrets(); len(missing) > 0 {
+ rtn = append(rtn, warning{
+ Title: "secrets not defined: " + strings.Join(missing, ", "),
+ Help: "undefined secrets are left empty for local development only.\nsee https://encore.dev/docs/primitives/secrets for more information",
+ })
+ }
+
+ return rtn
+}
+
+// Proc represents a single Encore process running within a [ProcGroup].
+type Proc struct {
+ group *ProcGroup // The group this process belongs to
+ log zerolog.Logger // The logger for this process
+ exit chan struct{} // closed when the process has exited
+ cmd *exec.Cmd // The command for this specific process
+
+ listenAddr netip.AddrPort // The port the HTTP server of the process should listen on
+ httpProxy *httputil.ReverseProxy // The reverse proxy for the HTTP server of the process
+
+ // The following fields are only valid after Start() has been called.
+ Started atomic.Bool // whether the process has started
+ StartedAt time.Time // when the process started
+ Pid int // the OS process id
+}
+
+// Start starts the process and returns immediately.
+//
+// If the process has already been started, this is a no-op.
+func (p *Proc) Start() error {
+ p.group.procMu.Lock()
+ defer p.group.procMu.Unlock()
+
+ return p.start()
+}
+
+// start starts the process and returns immediately
+//
+// It must be called while locked under the p.group.procMu lock.
+func (p *Proc) start() error {
+ if !p.Started.CompareAndSwap(false, true) {
+ return nil
+ }
+
+ if err := p.cmd.Start(); err != nil {
+ return errors.Wrap(err, "could not start process")
+ }
+ p.log.Info().Str("addr", p.listenAddr.String()).Msg("process started")
+ p.group.runningProcs++
+
+ p.Pid = p.cmd.Process.Pid
+ p.StartedAt = time.Now()
+
+ // Start watching the process for when it quits.
+ go func() {
+ defer close(p.exit)
+
+ // Wait for the process to exit.
+ err := p.cmd.Wait()
+ if err != nil && p.group.ctx.Err() == nil {
+ p.log.Error().Err(err).Msg("process exited with error")
+ } else {
+ p.log.Info().Msg("process exited successfully")
+ }
+
+ // Flush the logs in case the output did not end in a newline.
+ for _, w := range [...]io.Writer{p.cmd.Stdout, p.cmd.Stderr} {
+ if w != nil {
+ w.(*logWriter).Flush()
+ }
+ }
+ }()
+
+ // When the process exits, decrement the running count for the group
+ // and wake up any goroutines waiting for on the running count to shrink
+ go func() {
+ <-p.exit
+ p.group.procMu.Lock()
+ defer p.group.procMu.Unlock()
+ p.group.runningProcs--
+
+ p.group.procCond.Broadcast()
+ }()
+
+ return nil
+}
+
+// Close closes the process and waits for it to exit.
+// It is safe to call Close multiple times.
+func (p *Proc) Close() {
+ if err := p.cmd.Process.Signal(os.Interrupt); err != nil {
+ // If there's an error sending the signal, just kill the process.
+ // This might happen because Interrupt is not supported on Windows.
+ p.Kill()
+ }
+
+ timer := time.NewTimer(gracefulShutdownTime + (500 * time.Millisecond))
+ defer timer.Stop()
+
+ select {
+ case <-p.exit:
+ // already exited
+ case <-timer.C:
+ p.group.log.Error().Msg("timed out waiting for process to exit; killing")
+ p.Kill()
+ <-p.exit
+ }
+}
+
+// ProxyReq proxies the request to the Encore app.
+func (p *Proc) ProxyReq(w http.ResponseWriter, req *http.Request) {
+ p.httpProxy.ServeHTTP(w, req)
+}
+
+// Kill causes the Process to exit immediately. Kill does not wait until
+// the Process has actually exited. This only kills the Process itself,
+// not any other processes it may have started.
+func (p *Proc) Kill() {
+ if p.cmd != nil && p.cmd.Process != nil {
+ _ = p.cmd.Process.Kill()
+ }
+}
+
+// pollUntilProcessIsListening polls the listen address until
+// the process is actively listening, five seconds have passed,
+// or the context is canceled, whichever happens first.
+//
+// It reports true if the process is listening on return, false otherwise.
+func (p *Proc) pollUntilProcessIsListening(ctx context.Context) (ok bool) {
+ b := backoff.NewExponentialBackOff()
+ b.InitialInterval = 50 * time.Millisecond
+ b.MaxInterval = 250 * time.Millisecond
+ b.MaxElapsedTime = 5 * time.Second
+
+ err := backoff.Retry(func() error {
+ if err := ctx.Err(); err != nil {
+ return backoff.Permanent(err)
+ }
+
+ conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", p.listenAddr.String())
+ if err == nil {
+ _ = conn.Close()
+ }
+ return err
+ }, b)
+ return err == nil
+}
+
+func newNoopGateway(pg *ProcGroup) *noopgateway.Gateway {
+ svcDiscovery := make(map[noopgateway.ServiceName]string)
+ for _, svc := range pg.Meta.Svcs {
+ if proc, ok := pg.Services[svc.Name]; ok {
+ svcDiscovery[noopgateway.ServiceName(svc.Name)] = proc.listenAddr.String()
+ }
+ }
+
+ desc := noopgwdesc.Describe(pg.Meta, svcDiscovery)
+ gw := noopgateway.New(desc)
+
+ gw.Rewrite = func(rp *httputil.ProxyRequest) {
+ // Copy the host head over.
+ rp.Out.Host = rp.In.Host
+
+ // Add the auth key unless the test header is set.
+ if rp.Out.Header.Get(TestHeaderDisablePlatformAuth) == "" {
+ addAuthKeyToRequest(rp.Out, pg.authKey)
+ }
+ }
+
+ return gw
+}
diff --git a/cli/daemon/run/run.go b/cli/daemon/run/run.go
index b28b64af1b..fabccdfdc9 100644
--- a/cli/daemon/run/run.go
+++ b/cli/daemon/run/run.go
@@ -6,105 +6,177 @@ import (
"context"
"crypto/rand"
"encoding/base64"
+ "encoding/binary"
"fmt"
- "io"
- "io/ioutil"
"net"
"net/http"
- "os"
- "os/exec"
- "path/filepath"
+ "net/netip"
+ "runtime"
+ "slices"
"sort"
- "strconv"
+ "strings"
"sync/atomic"
"time"
+ "github.com/cockroachdb/errors"
+ "github.com/logrusorgru/aurora/v3"
+ "github.com/rs/xid"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
- "golang.org/x/mod/modfile"
-
- "encr.dev/cli/daemon/internal/appfile"
- "encr.dev/cli/internal/env"
- "encr.dev/cli/internal/xos"
- "encr.dev/compiler"
- "encr.dev/parser"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+ "golang.org/x/sync/errgroup"
+
+ "encore.dev/appruntime/exported/config"
+ "encore.dev/appruntime/exported/experiments"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run/infra"
+ "encr.dev/cli/daemon/secret"
+ "encr.dev/internal/optracker"
+ "encr.dev/internal/userconfig"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/promise"
+ "encr.dev/pkg/svcproxy"
+ "encr.dev/pkg/vcs"
+ daemonpb "encr.dev/proto/encore/daemon"
meta "encr.dev/proto/encore/parser/meta/v1"
- "github.com/hashicorp/yamux"
)
// Run represents a running Encore application.
type Run struct {
- ID string // unique ID for this instance of the running app
- AppID string // unique identifier for the app
- AppSlug string // the optional app slug, if linked to encore.dev
- Root string // the filesystem path to the app root
- Port int // the port the app is listening on
-
+ ID string // unique ID for this instance of the running app
+ App *apps.Instance
+ ListenAddr string // the address the app is listening on
+ SvcProxy *svcproxy.SvcProxy
+ ResourceManager *infra.ResourceManager
+ NS *namespace.Namespace
+
+ Builder builder.Impl
log zerolog.Logger
- mgr *Manager
- params *StartParams
+ Mgr *Manager
+ Params *StartParams
+ secrets *secret.LoadResult
+
ctx context.Context // ctx is closed when the run is to exit
proc atomic.Value // current process
exited chan struct{} // exit is closed when the run has fully exited
started chan struct{} // started is closed once the run has fully started
-
}
// StartParams groups the parameters for the Run method.
type StartParams struct {
- // AppRoot is the application root.
- AppRoot string
+ // App is the app to start.
+ App *apps.Instance
- // AppID is the unique app id, as defined by the manifest.
- AppID string
+ // NS is the namespace to use.
+ NS *namespace.Namespace
// WorkingDir is the working dir, for formatting
// error messages with relative paths.
WorkingDir string
- // DBClusterID is the database cluster id to connect to.
- DBClusterID string
-
- // Parse is the parse result for the initial run of the app.
- // If nil the app is parsed before starting.
- Parse *parser.Result
-
// Watch enables watching for code changes for live reloading.
Watch bool
+
+ Listener net.Listener // listener to use
+ ListenAddr string // address we're listening on
+
+ // Environ are the environment variables to set for the running app,
+ // in the same format as os.Environ().
+ Environ []string
+
+ // The Ops tracker being used for this run
+ OpsTracker *optracker.OpTracker
+
+ // Browser specifies the browser mode to use.
+ Browser BrowserMode
+
+ // Debug specifies to compile the application for debugging.
+ Debug builder.DebugMode
+}
+
+// BrowserMode specifies how to open the browser when starting 'encore run'.
+type BrowserMode int
+
+const (
+ BrowserModeAuto BrowserMode = iota // open if not already open
+ BrowserModeNever // never open
+ BrowserModeAlways // always open
+)
+
+func BrowserModeFromConfig(cfg *userconfig.Config) BrowserMode {
+ switch cfg.RunBrowser {
+ case "never":
+ return BrowserModeNever
+ case "always":
+ return BrowserModeAlways
+ default:
+ return BrowserModeAuto
+ }
+}
+
+func BrowserModeFromProto(b daemonpb.RunRequest_BrowserMode) BrowserMode {
+ switch b {
+ case daemonpb.RunRequest_BROWSER_AUTO:
+ return BrowserModeAuto
+ case daemonpb.RunRequest_BROWSER_NEVER:
+ return BrowserModeNever
+ case daemonpb.RunRequest_BROWSER_ALWAYS:
+ return BrowserModeAlways
+ default:
+ return BrowserModeAuto
+ }
+}
+
+func DebugModeFromProto(d daemonpb.RunRequest_DebugMode) builder.DebugMode {
+ switch d {
+ case daemonpb.RunRequest_DEBUG_DISABLED:
+ return builder.DebugModeDisabled
+ case daemonpb.RunRequest_DEBUG_ENABLED:
+ return builder.DebugModeEnabled
+ case daemonpb.RunRequest_DEBUG_BREAK:
+ return builder.DebugModeBreak
+ default:
+ return builder.DebugModeDisabled
+ }
}
// Start starts the application.
// Its lifetime is bounded by ctx.
func (mgr *Manager) Start(ctx context.Context, params StartParams) (run *Run, err error) {
- ln, port, err := mgr.newListener()
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- ln.Close()
- }
- }()
+ logger := log.With().Str("app_id", params.App.PlatformOrLocalID()).Logger()
- appSlug, err := appfile.Slug(params.AppRoot)
+ svcProxy, err := svcproxy.New(ctx, logger)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "failed to create service proxy")
}
run = &Run{
- ID: genID(),
- AppID: params.AppID,
- Root: params.AppRoot,
- AppSlug: appSlug,
- Port: port,
-
- log: log.With().Str("appID", params.AppID).Logger(),
- mgr: mgr,
- params: ¶ms,
- ctx: ctx,
- exited: make(chan struct{}),
- started: make(chan struct{}),
- }
+ ID: GenID(),
+ App: params.App,
+ NS: params.NS,
+ ResourceManager: infra.NewResourceManager(params.App, mgr.ClusterMgr, mgr.ObjectsMgr, mgr.PublicBuckets, params.NS, params.Environ, mgr.DBProxyPort, false),
+ ListenAddr: params.ListenAddr,
+ SvcProxy: svcProxy,
+ log: logger,
+ Mgr: mgr,
+ Params: ¶ms,
+ secrets: mgr.Secret.Load(params.App),
+ ctx: ctx,
+ exited: make(chan struct{}),
+ started: make(chan struct{}),
+ }
+ defer func(r *Run) {
+ // Stop all the resource servers if we exit due to an error
+ if err != nil {
+ r.Close()
+ }
+ }(run)
// Add the run to our map before starting to avoid
// racing with initialization (though it's unlikely to ever matter).
@@ -115,7 +187,10 @@ func (mgr *Manager) Start(ctx context.Context, params StartParams) (run *Run, er
mgr.runs[run.ID] = run
mgr.mu.Unlock()
- if err := run.start(ln); err != nil {
+ if err := run.start(params.Listener, params.OpsTracker); err != nil {
+ if errList := AsErrorList(err); errList != nil {
+ return nil, errList
+ }
return nil, err
}
@@ -128,17 +203,37 @@ func (mgr *Manager) Start(ctx context.Context, params StartParams) (run *Run, er
return run, nil
}
-// runLogger is the interface for listening to run logs.
+func (r *Run) Close() {
+ if r.Builder != nil {
+ _ = r.Builder.Close()
+ }
+ r.SvcProxy.Close()
+ r.ResourceManager.StopAll()
+}
+
+// RunLogger is the interface for listening to run logs.
// The log methods are called for each logline on stdout and stderr respectively.
-type runLogger interface {
- runStdout(r *Run, line []byte)
- runStderr(r *Run, line []byte)
+type RunLogger interface {
+ RunStdout(r *Run, line []byte)
+ RunStderr(r *Run, line []byte)
}
-// Proc returns the current running process.
+// ProcGroup returns the current running process.
// It may have already exited.
-func (r *Run) Proc() *Proc {
- return r.proc.Load().(*Proc)
+// If the proc has not yet started it may return nil.
+//
+// If run is nil then nil will be returned
+func (r *Run) ProcGroup() *ProcGroup {
+ if r == nil {
+ return nil
+ }
+
+ p, _ := r.proc.Load().(*ProcGroup)
+ return p
+}
+
+func (r *Run) StoreProc(p *ProcGroup) {
+ r.proc.Store(p)
}
// Done returns a channel that is closed when the run is closed.
@@ -148,47 +243,21 @@ func (r *Run) Done() <-chan struct{} {
// Reload rebuilds the app and, if successful,
// starts a new proc and switches over.
-func (r *Run) Reload() (*Proc, error) {
- modPath := filepath.Join(r.Root, "go.mod")
- modData, err := ioutil.ReadFile(modPath)
- if err != nil {
- return nil, err
- }
- mod, err := modfile.Parse(modPath, modData, nil)
- if err != nil {
- return nil, err
- }
-
- cfg := &parser.Config{
- AppRoot: r.Root,
- Version: "",
- ModulePath: mod.Module.Mod.Path,
- WorkingDir: r.params.WorkingDir,
- ParseTests: false,
- }
- parse, err := parser.Parse(cfg)
- if err != nil {
- return nil, err
- }
-
- p, err := r.buildAndStart(r.ctx, parse)
+func (r *Run) Reload() error {
+ err := r.buildAndStart(r.ctx, nil, true)
if err != nil {
- return nil, err
+ return err
}
- prev := r.Proc()
- r.proc.Store(p)
- prev.close()
-
- for _, ln := range r.mgr.listeners {
+ for _, ln := range r.Mgr.listeners {
ln.OnReload(r)
}
- return p, nil
+ return nil
}
// start starts the application and serves requests over HTTP using ln.
-func (r *Run) start(ln net.Listener) (err error) {
+func (r *Run) start(ln net.Listener, tracker *optracker.OpTracker) (err error) {
defer func() {
if err != nil {
// This is closed below when err == nil,
@@ -198,46 +267,49 @@ func (r *Run) start(ln net.Listener) (err error) {
}
}()
- p, err := r.buildAndStart(r.ctx, r.params.Parse)
+ err = r.buildAndStart(r.ctx, tracker, false)
if err != nil {
return err
}
- r.proc.Store(p)
// Below this line the function must never return an error
- // in order to only ensure we close r.exited exactly once.
+ // in order to only ensure we Close r.exited exactly once.
go func() {
- for _, ln := range r.mgr.listeners {
+ for _, ln := range r.Mgr.listeners {
ln.OnStart(r)
}
close(r.started)
}()
+ // Wrap the handler with h2c support to enable HTTP/2 in cleartext
+ // (the std http library only accepts HTTP/2 over TLS).
+ // We need this to be able to forward e.g. gRPC requests to the app.
+ handler := h2c.NewHandler(r, &http2.Server{})
+
// Run the http server until the app exits.
- srv := &http.Server{Addr: ln.Addr().String(), Handler: r}
+ srv := &http.Server{Addr: ln.Addr().String(), Handler: handler}
go func() {
- if err := srv.Serve(ln); err != http.ErrServerClosed {
+ if err := srv.Serve(ln); !errors.Is(err, http.ErrServerClosed) {
r.log.Error().Err(err).Msg("could not serve")
}
}()
go func() {
<-r.ctx.Done()
- srv.Close()
- ln.Close()
+ _ = srv.Close()
}()
- // Monitor the running proc and close the app when it exits.
+ // Monitor the running proc and Close the app when it exits.
go func() {
for {
- p := r.proc.Load().(*Proc)
+ p := r.proc.Load().(*ProcGroup)
<-p.Done()
// p exited, but it could have been a reload.
// Check to make sure p is still the active proc.
- p2 := r.proc.Load().(*Proc)
+ p2 := r.proc.Load().(*ProcGroup)
if p2 == p {
// We're done.
- for _, ln := range r.mgr.listeners {
+ for _, ln := range r.Mgr.listeners {
ln.OnStop(r)
}
close(r.exited)
@@ -251,241 +323,363 @@ func (r *Run) start(ln net.Listener) (err error) {
// buildAndStart builds the app, starts the proc, and cleans up
// the build dir when it exits.
// The proc exits when ctx is canceled.
-func (r *Run) buildAndStart(ctx context.Context, parse *parser.Result) (p *Proc, err error) {
+func (r *Run) buildAndStart(ctx context.Context, tracker *optracker.OpTracker, isReload bool) error {
// Return early if the ctx is already canceled.
if err := ctx.Err(); err != nil {
- return nil, err
+ return err
}
- cfg := &compiler.Config{
- Version: "",
- WorkingDir: r.params.WorkingDir,
- CgoEnabled: true,
- EncoreRuntimePath: env.EncoreRuntimePath(),
- EncoreGoRoot: env.EncoreGoRoot(),
- Parse: parse,
+ for _, ln := range r.Mgr.listeners {
+ ln.OnCompileStart(r)
}
- build, err := compiler.Build(r.Root, cfg)
+ jobs := optracker.NewAsyncBuildJobs(ctx, r.App.PlatformOrLocalID(), tracker)
+
+ // Parse the app source code
+ // Parse the app to figure out what infrastructure is needed.
+ start := time.Now()
+ parseOp := tracker.Add("Building Encore application graph", start)
+ topoOp := tracker.Add("Analyzing service topology", start)
+
+ expSet, err := r.App.Experiments(r.Params.Environ)
if err != nil {
- return nil, fmt.Errorf("compile error: %v", err)
+ return err
+ }
+
+ if r.Builder == nil {
+ r.Builder = builderimpl.Resolve(r.App.Lang(), expSet)
+ }
+
+ vcsRevision := vcs.GetRevision(r.App.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: r.Params.Debug,
+ Environ: r.Params.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
}
+
+ // A context that is canceled when the proc exits.
+ procCtx, cancelProcCtx := context.WithCancel(ctx)
+
+ // Cancel the proc context if we exit with a non-nil error.
defer func() {
if err != nil {
- os.RemoveAll(build.Dir)
+ cancelProcCtx()
}
}()
- var secrets map[string]string
- if usesSecrets(r.params.Parse.Meta) {
- if r.AppSlug == "" {
- return nil, fmt.Errorf("the app defines secrets, but is not yet linked to encore.dev; link it with `encore app link` to use secrets")
+ parse, err := r.Builder.Parse(procCtx, builder.ParseParams{
+ Build: buildInfo,
+ App: r.App,
+ Experiments: expSet,
+ WorkingDir: r.Params.WorkingDir,
+ ParseTests: false,
+ })
+ if err != nil {
+ // Don't use the error itself in tracker.Fail, as it will lead to duplicate error output.
+ tracker.Fail(parseOp, errors.New("parse error"))
+ return err
+ }
+
+ if err := r.App.CacheMetadata(parse.Meta); err != nil {
+ return errors.Wrap(err, "cache metadata")
+ }
+ tracker.Done(parseOp, 500*time.Millisecond)
+ tracker.Done(topoOp, 300*time.Millisecond)
+
+ r.ResourceManager.StartRequiredServices(jobs, parse.Meta)
+
+ configProm := promise.New(func() (*builder.ServiceConfigsResult, error) {
+ return r.Builder.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ APIBaseURL: fmt.Sprintf("http://%s", r.ListenAddr),
+ EnvName: "local",
+ EnvType: cueutil.EnvType_Development,
+ CloudType: cueutil.CloudType_Local,
+ },
+ })
+ })
+
+ var build *builder.CompileResult
+ jobs.Go("Compiling application source code", false, 0, func(ctx context.Context) (err error) {
+ build, err = r.Builder.Compile(ctx, builder.CompileParams{
+ Build: buildInfo,
+ App: r.App,
+ Parse: parse,
+ OpTracker: tracker,
+ Experiments: expSet,
+ WorkingDir: r.Params.WorkingDir,
+ Environ: r.Params.Environ,
+ })
+ if err != nil {
+ return errors.Wrap(err, "compile error")
}
- data, err := r.mgr.Secret.Get(ctx, r.AppSlug)
+ return nil
+ })
+
+ var secrets map[string]string
+ jobs.Go("Fetching application secrets", true, 150*time.Millisecond, func(ctx context.Context) error {
+ data, err := r.secrets.Get(ctx, expSet)
if err != nil {
- return nil, err
+ return err
}
secrets = data.Values
+ return nil
+ })
+
+ if err := jobs.Wait(); err != nil {
+ return err
}
- p, err = r.startProc(&startProcParams{
- Ctx: ctx,
- BuildDir: build.Dir,
- BinPath: build.Exe,
- Meta: build.Parse.Meta,
- Logger: r.mgr,
- RuntimePort: r.mgr.RuntimePort,
- DBProxyPort: r.mgr.DBProxyPort,
- DBClusterID: r.params.DBClusterID,
- Secrets: secrets,
+ svcCfg, err := configProm.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ startOp := tracker.Add("Starting Encore application", start)
+ newProcess, err := r.StartProcGroup(&StartProcGroupParams{
+ Ctx: ctx,
+ Outputs: build.Outputs,
+ Meta: parse.Meta,
+ Logger: r.Mgr,
+ Secrets: secrets,
+ ServiceConfigs: svcCfg.Configs,
+ Environ: r.Params.Environ,
+ WorkingDir: r.Params.WorkingDir,
+ IsReload: isReload,
+ Experiments: expSet,
})
if err != nil {
- return nil, err
+ tracker.Fail(startOp, err)
+ return err
}
+
+ // Close the proc context when the proc exits.
go func() {
- <-p.Done()
- os.RemoveAll(build.Dir)
+ select {
+ case <-procCtx.Done():
+ // Already done
+ case <-newProcess.Done():
+ cancelProcCtx()
+ }
+ }()
+
+ previousProcess := r.proc.Swap(newProcess)
+ if previousProcess != nil {
+ previousProcess.(*ProcGroup).Close()
+ }
+
+ tracker.Done(startOp, 50*time.Millisecond)
+
+ go func() {
+ // Wait one second before logging all the missing secrets.
+ time.Sleep(1 * time.Second)
+
+ // Log any warnings.
+ for _, warning := range newProcess.Warnings() {
+ line := "\n" + aurora.Red(fmt.Sprintf("warning: %s", warning.Title)).String() + "\n" +
+ aurora.Gray(16, fmt.Sprintf("note: %s", warning.Help)).String() + "\n\n"
+ r.Mgr.RunStderr(r, []byte(line))
+ }
}()
- return p, nil
-}
-// Proc represents a running Encore process.
-type Proc struct {
- ID string // unique process id
- Run *Run // the run the process belongs to
- Pid int // the OS process id
- Meta *meta.Data // app metadata snapshot
- Started time.Time // when the process started
-
- log zerolog.Logger
- exit chan struct{} // closed when the process has exited
- cmd *exec.Cmd
- reqWr *os.File
- respRd *os.File
- buildDir string
- client *yamux.Session
+ return nil
}
-type startProcParams struct {
- Ctx context.Context
- BuildDir string
- BinPath string
- Meta *meta.Data
- Secrets map[string]string
- RuntimePort int
- DBProxyPort int
- DBClusterID string
- Logger runLogger
+type StartProcGroupParams struct {
+ Ctx context.Context
+ Outputs []builder.BuildOutput
+ Meta *meta.Data
+ Secrets map[string]string
+ ServiceConfigs map[string]string
+ Logger RunLogger
+ Environ []string
+ WorkingDir string
+ IsReload bool
+ Experiments *experiments.Set
}
-// startProc starts a single actual OS process for app.
-func (r *Run) startProc(params *startProcParams) (p *Proc, err error) {
- pid := genID()
- p = &Proc{
- ID: pid,
- Run: r,
- Meta: params.Meta,
- exit: make(chan struct{}),
- buildDir: params.BuildDir,
- log: r.log.With().Str("procID", pid).Logger(),
- }
-
- cmd := exec.Command(params.BinPath)
- cmd.Env = []string{
- "ENCORE_APP_ID=" + r.ID,
- "ENCORE_PROC_ID=" + p.ID,
- "ENCORE_RUNTIME_ADDRESS=localhost:" + strconv.Itoa(params.RuntimePort),
- "ENCORE_SQLDB_ADDRESS=localhost:" + strconv.Itoa(params.DBProxyPort),
- "ENCORE_SQLDB_PASSWORD=" + params.DBClusterID,
- "ENCORE_SECRETS=" + encodeSecretsEnv(params.Secrets),
- }
- p.cmd = cmd
-
- // Proxy stdout and stderr to the given app logger, if any.
- if l := params.Logger; l != nil {
- cmd.Stdout = newLogWriter(r, l.runStdout)
- cmd.Stderr = newLogWriter(r, l.runStderr)
- }
-
- // Set up extra file descriptors for communicating requests/responses:
- // - reqRd is for reading incoming requests (handed over procchild)
- // - reqWr is for writing incoming requests
- // - respRd is for reading responses
- // - respWr is for writing responses (handed over to proc)
- reqRd, reqWr, err1 := os.Pipe()
- respRd, respWr, err2 := os.Pipe()
- defer func() {
- // Close all the files if we return an error.
- if err != nil {
- closeAll(reqRd, reqWr, respRd, respWr)
+const gracefulShutdownTime = 10 * time.Second
+
+// StartProcGroup starts a single actual OS process for app.
+func (r *Run) StartProcGroup(params *StartProcGroupParams) (p *ProcGroup, err error) {
+ pid := GenID()
+
+ userEnv := append([]string{
+ "ENCORE_RUNTIME_LOG=error",
+ // Always include internal messages when developing locally.
+ "ENCORE_API_INCLUDE_INTERNAL_MESSAGE=1",
+ }, params.Environ...)
+
+ daemonProxyAddr, err := netip.ParseAddrPort(strings.ReplaceAll(r.ListenAddr, "localhost", "127.0.0.1"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse listen address: %s", r.ListenAddr)
+ }
+ gatewayBaseURL := fmt.Sprintf("http://%s", daemonProxyAddr)
+ gateways := make(map[string]GatewayConfig)
+ for _, gw := range params.Meta.Gateways {
+ gateways[gw.EncoreName] = GatewayConfig{
+ BaseURL: gatewayBaseURL,
+ Hostnames: []string{"localhost"},
}
- }()
- if err := firstErr(err1, err2); err != nil {
- return nil, err
- } else if err := xos.ArrangeExtraFiles(cmd, reqRd, respWr); err != nil {
- return nil, err
}
- if err := cmd.Start(); err != nil {
- return nil, err
- }
- p.log.Info().Msg("started process")
- defer func() {
+ authKey := genAuthKey()
+ p = newProcGroup(procGroupOptions{
+ ProcID: pid,
+ Run: r,
+ AuthKey: authKey,
+ ConfigGen: &RuntimeConfigGenerator{
+ app: r.App,
+ infraManager: r.ResourceManager,
+ md: params.Meta,
+ AppID: option.Some(r.ID),
+ EnvID: option.Some(pid),
+ TraceEndpoint: option.Some(fmt.Sprintf("http://localhost:%d/trace", r.Mgr.RuntimePort)),
+ AuthKey: authKey,
+ Gateways: gateways,
+ DefinedSecrets: params.Secrets,
+ SvcConfigs: params.ServiceConfigs,
+ DeployID: option.Some(fmt.Sprintf("run_%s", xid.New().String())),
+ IncludeMetaEnv: r.Builder.NeedsMeta(),
+ },
+ Experiments: params.Experiments,
+ Meta: params.Meta,
+ Ctx: params.Ctx,
+ WorkingDir: params.WorkingDir,
+ Logger: params.Logger,
+ })
+
+ if isSingleProc(params.Outputs) {
+ conf, err := p.ConfigGen.AllInOneProc()
if err != nil {
- cmd.Process.Kill()
+ return nil, err
}
- }()
- // Close the files we handed over to the child.
- closeAll(reqRd, respWr)
+ entrypoint := params.Outputs[0].GetEntrypoints()[0]
- rwc := &struct {
- io.ReadCloser
- io.Writer
- }{
- ReadCloser: ioutil.NopCloser(respRd),
- Writer: reqWr,
- }
- p.client, err = yamux.Client(rwc, yamux.DefaultConfig())
- if err != nil {
- return nil, fmt.Errorf("could not initialize connection: %v", err)
- }
+ // Generate the environmental variables for the process
+ procEnv, err := p.ConfigGen.ProcEnvs(conf, entrypoint.UseRuntimeConfigV2)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate environment variables")
+ }
- p.reqWr = reqWr
- p.respRd = respRd
- p.Pid = cmd.Process.Pid
- p.Started = time.Now()
+ env := slices.Clone(userEnv)
+ env = append(env, procEnv...)
- // Monitor the context and close the process when it is done.
- go func() {
- select {
- case <-params.Ctx.Done():
- p.close()
- case <-p.exit:
+ // Otherwise we're running everything inside a single process
+ cmd := entrypoint.Cmd.Expand(params.Outputs[0].GetArtifactDir())
+ if err := p.NewAllInOneProc(cmd, conf.ListenAddr, env); err != nil {
+ return nil, err
+ }
+ } else {
+ var (
+ svcConfs map[string]*ProcConfig
+ gwConfs map[string]*ProcConfig
+ )
+
+ if r.Builder.UseNewRuntimeConfig() {
+ _, svcConfs, gwConfs, err = p.ConfigGen.ProcPerServiceWithNewRuntimeConfig(r.SvcProxy)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ svcConfs, gwConfs, err = p.ConfigGen.ProcPerService(r.SvcProxy)
+ if err != nil {
+ return nil, err
+ }
}
- }()
- go p.waitForExit()
- return p, nil
-}
+ for _, o := range params.Outputs {
+ for _, ep := range o.GetEntrypoints() {
+ cmd := ep.Cmd.Expand(o.GetArtifactDir())
+ // create a process for each service
+ for _, svcName := range ep.Services {
+ // Generate the environmental variables for the process
+ procConf, ok := svcConfs[svcName]
+ if !ok {
+ return nil, errors.Newf("unknown service %q", svcName)
+ }
+ procEnv, err := p.ConfigGen.ProcEnvs(procConf, ep.UseRuntimeConfigV2)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate environment variables")
+ }
+
+ env := slices.Clone(userEnv)
+ env = append(env, procEnv...)
+
+ if err := p.NewProcForService(svcName, procConf.ListenAddr, cmd, env); err != nil {
+ return nil, err
+ }
+ }
-// Done returns a channel that is closed when the process has exited.
-func (p *Proc) Done() <-chan struct{} {
- return p.exit
-}
+ for _, gwName := range ep.Gateways {
+ procConf, ok := gwConfs[gwName]
+ if !ok {
+ return nil, errors.Newf("unknown gateway %q", gwName)
+ }
-// close closes the process and waits for it to shutdown.
-// It can safely be called multiple times.
-func (p *Proc) close() {
- p.reqWr.Close()
- timer := time.NewTimer(10 * time.Second)
- defer timer.Stop()
- select {
- case <-p.exit:
- case <-timer.C:
- // The process didn't exit after 10s
- p.log.Error().Msg("timed out waiting for process to exit; killing")
- p.cmd.Process.Kill()
- <-p.exit
- }
-}
+ procEnv, err := p.ConfigGen.ProcEnvs(procConf, ep.UseRuntimeConfigV2)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate environment variables")
+ }
-func (p *Proc) waitForExit() {
- defer close(p.exit)
- defer closeAll(p.reqWr, p.respRd)
+ env := slices.Clone(userEnv)
+ env = append(env, procEnv...)
- if err := p.cmd.Wait(); err != nil {
- p.log.Error().Err(err).Msg("process exited with error")
- } else {
- p.log.Info().Err(err).Msg("process exited successfully")
+ if err := p.NewProcForGateway(gwName, procConf.ListenAddr, cmd, env); err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
}
- // Flush the logs in case the output did not end in a newline.
- for _, w := range [...]io.Writer{p.cmd.Stdout, p.cmd.Stderr} {
- if w != nil {
- w.(*logWriter).Flush()
- }
+ // Start the processes of the application
+ if err := p.Start(); err != nil {
+ return nil, err
}
-}
+ defer func() {
+ if err != nil {
+ p.Kill()
+ }
+ }()
-// closeAll closes all the given closers, skipping ones that are nil.
-func closeAll(closers ...io.Closer) {
- for _, c := range closers {
- if c != nil {
- c.Close()
+ // Monitor the context and Close the process when it is done.
+ go func() {
+ select {
+ case <-params.Ctx.Done():
+ p.Close()
+ case <-p.Done():
}
- }
-}
+ }()
-// firstErr reports the first non-nil error out of errs.
-// If all are nil, it reports nil.
-func firstErr(errs ...error) error {
- for _, err := range errs {
- if err != nil {
- return err
+ // If this is a live reload, wait for the process to be ready.
+ // This way we ensure requests are always hitting a running server,
+ // in case a batch job or something is running.
+ if params.IsReload {
+ g, ctx := errgroup.WithContext(params.Ctx)
+ for _, gw := range p.Gateways {
+ gw := gw
+ g.Go(func() error {
+ gw.pollUntilProcessIsListening(ctx)
+ return nil
+ })
}
+ _ = g.Wait()
}
- return nil
+
+ return p, nil
}
// logWriter is an io.Writer that buffers incoming logs
@@ -498,7 +692,7 @@ type logWriter struct {
}
func newLogWriter(run *Run, fn func(*Run, []byte)) *logWriter {
- const maxLine = 10 * 1024
+ const maxLine = 100 * 1024
return &logWriter{
run: run,
fn: fn,
@@ -541,9 +735,9 @@ func (w *logWriter) Flush() {
}
}
-// genID generates a random run/process id.
+// GenID generates a random run/process id.
// It panics if it cannot get random bytes.
-func genID() string {
+func GenID() string {
var b [8]byte
if _, err := rand.Read(b[:]); err != nil {
panic("cannot generate random data: " + err.Error())
@@ -574,7 +768,7 @@ func encodeSecretsEnv(secrets map[string]string) string {
buf.WriteString(k)
buf.WriteByte('=')
- buf.WriteString(base64.RawStdEncoding.EncodeToString([]byte(secrets[k])))
+ buf.WriteString(base64.RawURLEncoding.EncodeToString([]byte(secrets[k])))
}
return buf.String()
}
@@ -587,3 +781,46 @@ func usesSecrets(md *meta.Data) bool {
}
return false
}
+
+func genAuthKey() config.EncoreAuthKey {
+ // read a uint32 from crypto/rand to use as the key ID
+ var kidBytes [4]byte
+ if _, err := rand.Read(kidBytes[:]); err != nil {
+ panic("cannot generate random data: " + err.Error())
+ }
+ kid := binary.BigEndian.Uint32(kidBytes[:])
+
+ // kid := mathrand.Uint32()
+ var b [16]byte
+ if _, err := rand.Read(b[:]); err != nil {
+ panic("cannot generate random data: " + err.Error())
+ }
+ return config.EncoreAuthKey{KeyID: kid, Data: b[:]}
+}
+
+// CanDeleteNamespace implements namespace.DeletionHandler.
+func (m *Manager) CanDeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ // Check if any of the active runs are using this namespace.
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ for _, r := range m.runs {
+ if r.NS.ID == ns.ID && r.ctx.Err() == nil {
+ return errors.New("namespace is in use by 'encore run'")
+ }
+ }
+ return nil
+}
+
+// DeleteNamespace implements namespace.DeletionHandler.
+func (m *Manager) DeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ // We don't need to do anything here; we only implement DeletionHandler for
+ // the CanDeleteNamespace check.
+ return nil
+}
+
+func isSingleProc(outputs []builder.BuildOutput) bool {
+ if len(outputs) != 1 {
+ return false
+ }
+ return len(outputs[0].GetEntrypoints()) == 1
+}
diff --git a/cli/daemon/run/run_test.go b/cli/daemon/run/run_test.go
deleted file mode 100644
index 743374f641..0000000000
--- a/cli/daemon/run/run_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package run
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http/httptest"
- "os"
- "path/filepath"
- "testing"
-
- "encr.dev/cli/internal/env"
- "encr.dev/compiler"
- qt "github.com/frankban/quicktest"
- "go.uber.org/goleak"
-)
-
-// TestStartProc tests that (*app).startProc correctly starts Encore processes
-// for sending requests.
-func TestStartProc(t *testing.T) {
- defer goleak.VerifyNone(t, goleak.IgnoreCurrent())
- run := &Run{ID: genID()}
- c := qt.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- build := testBuild(c, "./testdata/echo")
- p, err := run.startProc(&startProcParams{
- Ctx: ctx,
- BuildDir: build.Dir,
- BinPath: build.Exe,
- Meta: build.Parse.Meta,
- RuntimePort: 0,
- DBProxyPort: 0,
- Logger: testRunLogger{t},
- })
- c.Assert(err, qt.IsNil)
- defer p.close()
- run.proc.Store(p)
-
- // Send a simple message and make sure it is echoed back.
- input := struct{ Message string }{Message: "hello"}
- body, _ := json.Marshal(&input)
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("POST", "/echo.Echo", bytes.NewReader(body))
- run.ServeHTTP(w, req)
- c.Assert(w.Code, qt.Equals, 200)
- c.Assert(w.Body.Bytes(), qt.JSONEquals, input)
-}
-
-// TestProcClosedOnCtxCancel tests that the proc is closed when
-// the given ctx is cancelled.
-func TestProcClosedOnCtxCancel(t *testing.T) {
- defer goleak.VerifyNone(t, goleak.IgnoreCurrent())
- app := &Run{ID: genID()}
- c := qt.New(t)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- build := testBuild(c, "./testdata/echo")
- p, err := app.startProc(&startProcParams{
- Ctx: ctx,
- BuildDir: build.Dir,
- BinPath: build.Exe,
- Meta: build.Parse.Meta,
- RuntimePort: 0,
- DBProxyPort: 0,
- Logger: testRunLogger{t},
- })
- c.Assert(err, qt.IsNil)
- cancel()
- <-p.Done()
-}
-
-// testBuild is a helper that compiles the app situated at appRoot
-// and cleans up the build dir during test cleanup.
-func testBuild(c *qt.C, appRoot string) *compiler.Result {
- wd, err := os.Getwd()
- c.Assert(err, qt.IsNil)
- runtimePath := filepath.Join(wd, "../../../compiler/runtime")
- build, err := compiler.Build("./testdata/echo", &compiler.Config{
- EncoreRuntimePath: runtimePath,
- EncoreGoRoot: env.EncoreGoRoot(),
- })
- c.Assert(err, qt.IsNil)
- c.Cleanup(func() {
- os.RemoveAll(build.Dir)
- })
- return build
-}
-
-// testRunLogger implements runLogger by calling t.Log.
-type testRunLogger struct {
- t *testing.T
-}
-
-func (l testRunLogger) runStdout(r *Run, line []byte) {
- line = bytes.TrimSuffix(line, []byte{'\n'})
- l.t.Log(string(line))
-}
-
-func (l testRunLogger) runStderr(r *Run, line []byte) {
- line = bytes.TrimSuffix(line, []byte{'\n'})
- l.t.Log(string(line))
-}
diff --git a/cli/daemon/run/runtime_config2.go b/cli/daemon/run/runtime_config2.go
new file mode 100644
index 0000000000..acf2a2603a
--- /dev/null
+++ b/cli/daemon/run/runtime_config2.go
@@ -0,0 +1,927 @@
+package run
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/netip"
+ "os"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/jackc/pgx/v5"
+ "github.com/rs/xid"
+ "go4.org/syncutil"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ "encore.dev/appruntime/exported/config"
+ encoreEnv "encr.dev/internal/env"
+ "encr.dev/pkg/appfile"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/rtconfgen"
+ "encr.dev/pkg/svcproxy"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+ runtimev1 "encr.dev/proto/encore/runtime/v1"
+)
+
+const (
+ runtimeCfgEnvVar = "ENCORE_RUNTIME_CONFIG"
+ appSecretsEnvVar = "ENCORE_APP_SECRETS"
+ serviceCfgEnvPrefix = "ENCORE_CFG_"
+ listenEnvVar = "ENCORE_LISTEN_ADDR"
+ metaEnvVar = "ENCORE_APP_META"
+)
+
+type RuntimeConfigGenerator struct {
+ initOnce syncutil.Once
+ md *meta.Data
+
+ // The application to generate the config for
+ app interface {
+ PlatformID() string
+ PlatformOrLocalID() string
+ GlobalCORS() (appfile.CORS, error)
+ AppFile() (*appfile.File, error)
+ BuildSettings() (appfile.Build, error)
+ }
+
+ // The infra manager to use
+ infraManager interface {
+ SQLServerConfig() (config.SQLServer, error)
+ PubSubProviderConfig() (config.PubsubProvider, error)
+
+ SQLDatabaseConfig(db *meta.SQLDatabase) (config.SQLDatabase, error)
+ PubSubTopicConfig(topic *meta.PubSubTopic) (config.PubsubProvider, config.PubsubTopic, error)
+ PubSubSubscriptionConfig(topic *meta.PubSubTopic, sub *meta.PubSubTopic_Subscription) (config.PubsubSubscription, error)
+ RedisConfig(redis *meta.CacheCluster) (config.RedisServer, config.RedisDatabase, error)
+ BucketProviderConfig() (config.BucketProvider, string, error)
+ }
+
+ AppID option.Option[string]
+ EnvID option.Option[string]
+ EnvName option.Option[string]
+ EnvType option.Option[runtimev1.Environment_Type]
+ EnvCloud option.Option[runtimev1.Environment_Cloud]
+ TraceEndpoint option.Option[string]
+ DeployID option.Option[string]
+ Gateways map[string]GatewayConfig
+ AuthKey config.EncoreAuthKey
+
+ // Whether to include the metadata as an environment variable.
+ IncludeMetaEnv bool
+
+ // The values of defined secrets.
+ DefinedSecrets map[string]string
+ // The configs, per service.
+ SvcConfigs map[string]string
+
+ conf *rtconfgen.Builder
+ authKeys []*runtimev1.EncoreAuthKey
+}
+
+type GatewayConfig struct {
+ BaseURL string
+ Hostnames []string
+}
+
+func (g *RuntimeConfigGenerator) initialize() error {
+ return g.initOnce.Do(func() error {
+ g.conf = rtconfgen.NewBuilder()
+
+ newRid := func() string { return "res_" + xid.New().String() }
+
+ if deployID, ok := g.DeployID.Get(); ok {
+ g.conf.DeployID(deployID)
+ }
+ g.conf.DeployedAt(time.Now())
+
+ g.conf.Env(&runtimev1.Environment{
+ AppId: g.AppID.GetOrElseF(g.app.PlatformOrLocalID),
+ AppSlug: g.app.PlatformID(),
+ EnvId: g.EnvID.GetOrElse("local"),
+ EnvName: g.EnvName.GetOrElse("local"),
+ EnvType: g.EnvType.GetOrElse(runtimev1.Environment_TYPE_DEVELOPMENT),
+ Cloud: g.EnvCloud.GetOrElse(runtimev1.Environment_CLOUD_LOCAL),
+ })
+
+ toSecret := func(b []byte) *runtimev1.SecretData {
+ return &runtimev1.SecretData{
+ Source: &runtimev1.SecretData_Embedded{Embedded: b},
+ }
+ }
+ ak := g.AuthKey
+ g.authKeys = []*runtimev1.EncoreAuthKey{{Id: ak.KeyID, Data: toSecret(ak.Data)}}
+
+ g.conf.EncorePlatform(&runtimev1.EncorePlatform{
+ PlatformSigningKeys: g.authKeys,
+ EncoreCloud: nil,
+ })
+
+ if traceEndpoint, ok := g.TraceEndpoint.Get(); ok {
+ sampleRate := 1.0
+ if val, err := strconv.ParseFloat(os.Getenv("ENCORE_TRACE_SAMPLING_RATE"), 64); err == nil {
+ sampleRate = min(max(val, 0), 1)
+ }
+ g.conf.TracingProvider(&runtimev1.TracingProvider{
+ Rid: newRid(),
+ Provider: &runtimev1.TracingProvider_Encore{
+ Encore: &runtimev1.TracingProvider_EncoreTracingProvider{
+ TraceEndpoint: traceEndpoint,
+ SamplingRate: &sampleRate,
+ },
+ },
+ })
+ }
+
+ appFile, err := g.app.AppFile()
+ if err != nil {
+ return errors.Wrap(err, "failed to get app's build settings")
+ }
+ for _, svc := range g.md.Svcs {
+ cfg := &runtimev1.HostedService{
+ Name: svc.Name,
+ LogConfig: ptrOrNil(appFile.LogLevel),
+ }
+
+ if appFile.Build.WorkerPooling {
+ n := int32(0)
+ cfg.WorkerThreads = &n
+ }
+ g.conf.ServiceConfig(cfg)
+ }
+
+ g.conf.AuthMethods([]*runtimev1.ServiceAuth{
+ {
+ AuthMethod: &runtimev1.ServiceAuth_EncoreAuth_{
+ EncoreAuth: &runtimev1.ServiceAuth_EncoreAuth{
+ AuthKeys: g.authKeys,
+ },
+ },
+ },
+ })
+
+ g.conf.DefaultGracefulShutdown(&runtimev1.GracefulShutdown{
+ Total: durationpb.New(10 * time.Second),
+ ShutdownHooks: durationpb.New(4 * time.Second),
+ Handlers: durationpb.New(2 * time.Second),
+ })
+
+ for _, gw := range g.md.Gateways {
+ cors, err := g.app.GlobalCORS()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate global CORS config")
+ }
+
+ g.conf.Infra.Gateway(&runtimev1.Gateway{
+ Rid: newRid(),
+ EncoreName: gw.EncoreName,
+ BaseUrl: g.Gateways[gw.EncoreName].BaseURL,
+ Hostnames: g.Gateways[gw.EncoreName].Hostnames,
+ Cors: &runtimev1.Gateway_CORS{
+ Debug: cors.Debug,
+ DisableCredentials: false,
+ ExtraAllowedHeaders: cors.AllowHeaders,
+ ExtraExposedHeaders: cors.ExposeHeaders,
+
+ AllowedOriginsWithCredentials: &runtimev1.Gateway_CORS_UnsafeAllowAllOriginsWithCredentials{
+ UnsafeAllowAllOriginsWithCredentials: true,
+ },
+
+ AllowedOriginsWithoutCredentials: &runtimev1.Gateway_CORSAllowedOrigins{
+ AllowedOrigins: []string{"*"},
+ },
+
+ AllowPrivateNetworkAccess: true,
+ },
+ })
+ }
+
+ if len(g.md.PubsubTopics) > 0 {
+ pubsubConfig, err := g.infraManager.PubSubProviderConfig()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate pubsub provider config")
+ }
+
+ cluster := g.conf.Infra.PubSubCluster(&runtimev1.PubSubCluster{
+ Rid: newRid(),
+ Provider: &runtimev1.PubSubCluster_Nsq{
+ Nsq: &runtimev1.PubSubCluster_NSQ{Hosts: []string{pubsubConfig.NSQ.Host}},
+ },
+ })
+
+ for _, topic := range g.md.PubsubTopics {
+ topicRid := newRid()
+
+ var deliveryGuarantee runtimev1.PubSubTopic_DeliveryGuarantee
+ switch topic.DeliveryGuarantee {
+ case meta.PubSubTopic_AT_LEAST_ONCE:
+ deliveryGuarantee = runtimev1.PubSubTopic_DELIVERY_GUARANTEE_AT_LEAST_ONCE
+ case meta.PubSubTopic_EXACTLY_ONCE:
+ deliveryGuarantee = runtimev1.PubSubTopic_DELIVERY_GUARANTEE_EXACTLY_ONCE
+ default:
+ return errors.Newf("unknown delivery guarantee %q", topic.DeliveryGuarantee)
+ }
+
+ cluster.PubSubTopic(&runtimev1.PubSubTopic{
+ Rid: topicRid,
+ EncoreName: topic.Name,
+ CloudName: topic.Name,
+ DeliveryGuarantee: deliveryGuarantee,
+ OrderingAttr: ptrOrNil(topic.OrderingKey),
+ ProviderConfig: nil,
+ })
+
+ for _, sub := range topic.Subscriptions {
+ cluster.PubSubSubscription(&runtimev1.PubSubSubscription{
+ Rid: newRid(),
+ TopicEncoreName: topic.Name,
+ SubscriptionEncoreName: sub.Name,
+ TopicCloudName: topic.Name,
+ SubscriptionCloudName: sub.Name,
+ PushOnly: false,
+ ProviderConfig: nil,
+ })
+ }
+ }
+ }
+
+ if len(g.md.SqlDatabases) > 0 {
+ srvConfig, err := g.infraManager.SQLServerConfig()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate SQL server config")
+ }
+
+ cluster := g.conf.Infra.SQLCluster(&runtimev1.SQLCluster{
+ Rid: newRid(),
+ })
+
+ var tlsConfig *runtimev1.TLSConfig
+ if srvConfig.ServerCACert != "" {
+ tlsConfig = &runtimev1.TLSConfig{
+ ServerCaCert: &srvConfig.ServerCACert,
+ }
+ }
+
+ cluster.SQLServer(&runtimev1.SQLServer{
+ Rid: newRid(),
+ Kind: runtimev1.ServerKind_SERVER_KIND_PRIMARY,
+ Host: srvConfig.Host,
+ TlsConfig: tlsConfig,
+ })
+
+ for _, db := range g.md.SqlDatabases {
+ if externalDB, ok := g.DefinedSecrets["sqldb::"+db.Name]; ok {
+ var extCfg struct {
+ ConnectionString string `json:"connection_string"`
+ }
+ if err := json.Unmarshal([]byte(externalDB), &extCfg); err != nil {
+ return errors.Wrapf(err, "failed to unmarshal external DB config for %q", db.Name)
+ }
+ pCfg, err := pgx.ParseConfig(extCfg.ConnectionString)
+ if err != nil {
+ return errors.Wrapf(err, "failed to parse external DB connection string for %q", db.Name)
+ }
+ cluster := g.conf.Infra.SQLCluster(&runtimev1.SQLCluster{
+ Rid: newRid(),
+ })
+ cluster.SQLServer(&runtimev1.SQLServer{
+ Rid: newRid(),
+ Kind: runtimev1.ServerKind_SERVER_KIND_PRIMARY,
+ Host: pCfg.Host,
+ TlsConfig: &runtimev1.TLSConfig{
+ DisableCaValidation: true,
+ },
+ })
+ // Generate a role rid based on the cluster+username combination.
+ roleRid := fmt.Sprintf("role:%s:%s", cluster.Val.Rid, pCfg.User)
+ g.conf.Infra.SQLRole(&runtimev1.SQLRole{
+ Rid: roleRid,
+ Username: pCfg.User,
+ Password: toSecret([]byte(pCfg.Password)),
+ ClientCertRid: nil,
+ })
+ cluster.SQLDatabase(&runtimev1.SQLDatabase{
+ Rid: newRid(),
+ EncoreName: db.Name,
+ CloudName: pCfg.Database,
+ ConnPools: nil,
+ }).AddConnectionPool(&runtimev1.SQLConnectionPool{
+ IsReadonly: false,
+ RoleRid: roleRid,
+ MinConnections: int32(0),
+ MaxConnections: int32(0),
+ })
+ } else {
+ dbConfig, err := g.infraManager.SQLDatabaseConfig(db)
+ if err != nil {
+ return errors.Wrap(err, "failed to generate SQL database config")
+ }
+
+ // Generate a role rid based on the cluster+username combination.
+ roleRid := fmt.Sprintf("role:%s:%s", cluster.Val.Rid, dbConfig.User)
+ g.conf.Infra.SQLRole(&runtimev1.SQLRole{
+ Rid: roleRid,
+ Username: dbConfig.User,
+ Password: toSecret([]byte(dbConfig.Password)),
+ ClientCertRid: nil,
+ })
+ cluster.SQLDatabase(&runtimev1.SQLDatabase{
+ Rid: newRid(),
+ EncoreName: dbConfig.EncoreName,
+ CloudName: dbConfig.DatabaseName,
+ ConnPools: nil,
+ }).AddConnectionPool(&runtimev1.SQLConnectionPool{
+ IsReadonly: false,
+ RoleRid: roleRid,
+ MinConnections: int32(dbConfig.MinConnections),
+ MaxConnections: int32(dbConfig.MaxConnections),
+ })
+
+ }
+
+ }
+ }
+
+ if len(g.md.CacheClusters) > 0 {
+ for _, cl := range g.md.CacheClusters {
+ srvConfig, dbConfig, err := g.infraManager.RedisConfig(cl)
+ if err != nil {
+ return errors.Wrap(err, "failed to generate Redis cluster config")
+ }
+
+ cluster := g.conf.Infra.RedisCluster(&runtimev1.RedisCluster{
+ Rid: newRid(),
+ Servers: nil,
+ })
+
+ // Generate a role rid based on the cluster+username combination.
+ roleRid := fmt.Sprintf("role:%s:%s", cluster.Val.Rid, srvConfig.User)
+ g.conf.Infra.RedisRoleFn(roleRid, func() *runtimev1.RedisRole {
+ r := &runtimev1.RedisRole{
+ Rid: roleRid,
+ ClientCertRid: nil,
+ }
+ switch {
+ case srvConfig.User != "" && srvConfig.Password != "":
+ r.Auth = &runtimev1.RedisRole_Acl{Acl: &runtimev1.RedisRole_AuthACL{
+ Username: srvConfig.User,
+ Password: toSecret([]byte(srvConfig.Password)),
+ }}
+ case srvConfig.Password != "":
+ r.Auth = &runtimev1.RedisRole_AuthString{AuthString: toSecret([]byte(srvConfig.Password))}
+ default:
+ r.Auth = nil
+ }
+ return r
+ })
+
+ var tlsConfig *runtimev1.TLSConfig
+ if srvConfig.EnableTLS || srvConfig.ServerCACert != "" {
+ tlsConfig = &runtimev1.TLSConfig{
+ ServerCaCert: ptrOrNil(srvConfig.ServerCACert),
+ }
+ }
+
+ cluster.RedisServer(&runtimev1.RedisServer{
+ Rid: newRid(),
+ Host: srvConfig.Host,
+ Kind: runtimev1.ServerKind_SERVER_KIND_PRIMARY,
+ TlsConfig: tlsConfig,
+ })
+ cluster.RedisDatabase(&runtimev1.RedisDatabase{
+ Rid: newRid(),
+ EncoreName: dbConfig.EncoreName,
+ DatabaseIdx: int32(dbConfig.Database),
+ KeyPrefix: ptrOrNil(dbConfig.KeyPrefix),
+ ConnPools: nil,
+ }).AddConnectionPool(&runtimev1.RedisConnectionPool{
+ IsReadonly: false,
+ RoleRid: roleRid,
+ MinConnections: int32(dbConfig.MinConnections),
+ MaxConnections: int32(dbConfig.MaxConnections),
+ })
+ }
+ }
+
+ if len(g.md.Buckets) > 0 {
+ bktProviderConfig, publicBaseURL, err := g.infraManager.BucketProviderConfig()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate bucket provider config")
+ }
+
+ cluster := g.conf.Infra.BucketCluster(&runtimev1.BucketCluster{
+ Rid: newRid(),
+ Provider: &runtimev1.BucketCluster_Gcs{
+ Gcs: &runtimev1.BucketCluster_GCS{
+ Endpoint: &bktProviderConfig.GCS.Endpoint,
+ Anonymous: true,
+ LocalSign: &runtimev1.BucketCluster_GCS_LocalSignOptions{
+ BaseUrl: publicBaseURL,
+ AccessId: "dummy-sa@encore.local",
+ PrivateKey: reverseString(dummyPrivateKeyReversed),
+ },
+ },
+ },
+ })
+
+ for _, bkt := range g.md.Buckets {
+ bktRid := newRid()
+
+ var publicURL *string
+ if bkt.Public {
+ u := publicBaseURL + "/" + bkt.Name
+ publicURL = &u
+ }
+ cluster.Bucket(&runtimev1.Bucket{
+ Rid: bktRid,
+ EncoreName: bkt.Name,
+ CloudName: bkt.Name,
+ PublicBaseUrl: publicURL,
+ })
+ }
+ }
+
+ for secretName, secretVal := range g.DefinedSecrets {
+ g.conf.Infra.AppSecret(&runtimev1.AppSecret{
+ Rid: newRid(),
+ EncoreName: secretName,
+ Data: toSecret([]byte(secretVal)),
+ })
+ }
+
+ return nil
+ })
+}
+
+type ProcConfig struct {
+ // The runtime config to add to the process, if any.
+ Runtime option.Option[*runtimev1.RuntimeConfig]
+
+ ListenAddr netip.AddrPort
+ ExtraEnv []string
+}
+
+func (g *RuntimeConfigGenerator) ProcPerService(proxy *svcproxy.SvcProxy) (services, gateways map[string]*ProcConfig, err error) {
+ if err := g.initialize(); err != nil {
+ return nil, nil, err
+ }
+
+ services = make(map[string]*ProcConfig)
+ gateways = make(map[string]*ProcConfig)
+
+ newRid := func() string { return "res_" + xid.New().String() }
+
+ sd := &runtimev1.ServiceDiscovery{Services: make(map[string]*runtimev1.ServiceDiscovery_Location)}
+
+ svcListenAddr := make(map[string]netip.AddrPort)
+ for _, svc := range g.md.Svcs {
+ listenAddr, err := freeLocalhostAddress()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to find free localhost address")
+ }
+ svcListenAddr[svc.Name] = listenAddr
+ sd.Services[svc.Name] = &runtimev1.ServiceDiscovery_Location{
+ BaseUrl: proxy.RegisterService(svc.Name, listenAddr),
+ AuthMethods: []*runtimev1.ServiceAuth{
+ {
+ AuthMethod: &runtimev1.ServiceAuth_EncoreAuth_{
+ EncoreAuth: &runtimev1.ServiceAuth_EncoreAuth{
+ AuthKeys: g.authKeys,
+ },
+ },
+ },
+ },
+ }
+ }
+
+ // Set up the service processes.
+ for _, svc := range g.md.Svcs {
+ conf, err := g.conf.Deployment(newRid()).
+ ServiceDiscovery(sd).
+ HostsServices(svc.Name).
+ ReduceWithMeta(g.md).
+ BuildRuntimeConfig()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+
+ usedSecrets := secretsUsedByServices(g.md, svc.Name)
+ listenAddr := svcListenAddr[svc.Name]
+ configEnvs := g.encodeConfigs(svc.Name)
+
+ services[svc.Name] = &ProcConfig{
+ Runtime: option.Some(conf),
+ ListenAddr: listenAddr,
+ ExtraEnv: append([]string{
+ fmt.Sprintf("%s=%s", appSecretsEnvVar, g.encodeSecrets(usedSecrets)),
+ }, configEnvs...),
+ }
+ }
+
+ // Set up the gateways.
+ for _, gw := range g.md.Gateways {
+ conf, err := g.conf.Deployment(newRid()).ServiceDiscovery(sd).HostsGateways(gw.EncoreName).ReduceWithMeta(g.md).BuildRuntimeConfig()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+ listenAddr, err := freeLocalhostAddress()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to find free localhost address")
+ }
+ gateways[gw.EncoreName] = &ProcConfig{
+ Runtime: option.Some(conf),
+ ListenAddr: listenAddr,
+ ExtraEnv: []string{},
+ }
+ }
+
+ return
+}
+
+func (g *RuntimeConfigGenerator) AllInOneProc() (*ProcConfig, error) {
+ if err := g.initialize(); err != nil {
+ return nil, err
+ }
+
+ newRid := func() string { return "res_" + xid.New().String() }
+
+ sd := &runtimev1.ServiceDiscovery{Services: make(map[string]*runtimev1.ServiceDiscovery_Location)}
+
+ d := g.conf.Deployment(newRid()).ServiceDiscovery(sd)
+ for _, gw := range g.md.Gateways {
+ d.HostsGateways(gw.EncoreName)
+ }
+ for _, svc := range g.md.Svcs {
+ d.HostsServices(svc.Name)
+ }
+
+ conf, err := d.ReduceWithMeta(g.md).BuildRuntimeConfig()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+
+ listenAddr, err := freeLocalhostAddress()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to find free localhost address")
+ }
+
+ configEnvs := g.encodeConfigs(fns.Map(g.md.Svcs, func(svc *meta.Service) string { return svc.Name })...)
+
+ return &ProcConfig{
+ Runtime: option.Some(conf),
+ ListenAddr: listenAddr,
+ ExtraEnv: append([]string{
+ fmt.Sprintf("%s=%s", appSecretsEnvVar, encodeSecretsEnv(g.DefinedSecrets)),
+ }, configEnvs...),
+ }, nil
+}
+
+func (g *RuntimeConfigGenerator) ProcPerServiceWithNewRuntimeConfig(proxy *svcproxy.SvcProxy) (conf *runtimev1.RuntimeConfig, services, gateways map[string]*ProcConfig, err error) {
+ if err := g.initialize(); err != nil {
+ return nil, nil, nil, err
+ }
+
+ if len(g.SvcConfigs) > 0 {
+ return nil, nil, nil, errors.New("service configs not yet supported")
+ }
+
+ services = make(map[string]*ProcConfig)
+ gateways = make(map[string]*ProcConfig)
+
+ newRid := func() string { return "res_" + xid.New().String() }
+
+ sd := &runtimev1.ServiceDiscovery{Services: make(map[string]*runtimev1.ServiceDiscovery_Location)}
+
+ svcListenAddr := make(map[string]netip.AddrPort)
+ var svcNames []string
+ for _, svc := range g.md.Svcs {
+ svcNames = append(svcNames, svc.Name)
+ listenAddr, err := freeLocalhostAddress()
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to find free localhost address")
+ }
+ svcListenAddr[svc.Name] = listenAddr
+ sd.Services[svc.Name] = &runtimev1.ServiceDiscovery_Location{
+ BaseUrl: proxy.RegisterService(svc.Name, listenAddr),
+ AuthMethods: []*runtimev1.ServiceAuth{
+ {
+ AuthMethod: &runtimev1.ServiceAuth_EncoreAuth_{
+ EncoreAuth: &runtimev1.ServiceAuth_EncoreAuth{
+ AuthKeys: g.authKeys,
+ },
+ },
+ },
+ },
+ }
+ }
+
+ for _, svc := range g.md.Svcs {
+ conf, err = g.conf.Deployment(newRid()).
+ ServiceDiscovery(sd).
+ HostsServices(svc.Name).
+ ReduceWithMeta(g.md).
+ BuildRuntimeConfig()
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+
+ listenAddr := svcListenAddr[svc.Name]
+ services[svc.Name] = &ProcConfig{
+ Runtime: option.Some(conf),
+ ListenAddr: listenAddr,
+ }
+ }
+
+ // Set up the gateways.
+ for _, gw := range g.md.Gateways {
+ listenAddr, err := freeLocalhostAddress()
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to find free localhost address")
+ }
+
+ conf, err = g.conf.Deployment(newRid()).
+ ServiceDiscovery(sd).
+ HostsGateways(gw.EncoreName).
+ //ReduceWithMeta(g.md).
+ BuildRuntimeConfig()
+ if err != nil {
+ return nil, nil, nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+ gateways[gw.EncoreName] = &ProcConfig{
+ Runtime: option.Some(conf),
+ ListenAddr: listenAddr,
+ }
+ }
+
+ return
+}
+
+func (g *RuntimeConfigGenerator) ForTests(newRuntimeConf bool) (envs []string, err error) {
+ if err := g.initialize(); err != nil {
+ return nil, err
+ }
+
+ newRid := func() string { return "res_" + xid.New().String() }
+
+ sd := &runtimev1.ServiceDiscovery{Services: make(map[string]*runtimev1.ServiceDiscovery_Location)}
+
+ d := g.conf.Deployment(newRid()).ServiceDiscovery(sd)
+ for _, gw := range g.md.Gateways {
+ d.HostsGateways(gw.EncoreName)
+ }
+ for _, svc := range g.md.Svcs {
+ d.HostsServices(svc.Name)
+ }
+
+ conf, err := d.ReduceWithMeta(g.md).BuildRuntimeConfig()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+
+ var runtimeCfgStr string
+ if newRuntimeConf {
+ runtimeCfgBytes, err := proto.Marshal(conf)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal runtime config")
+ }
+ gzipped := gzipBytes(runtimeCfgBytes)
+ runtimeCfgStr = "gzip:" + base64.StdEncoding.EncodeToString(gzipped)
+ } else {
+ // We don't use secretEnvs because for local development we use
+ // plaintext secrets across the board.
+ var secretEnvs map[string][]byte = nil
+
+ runtimeCfg, err := rtconfgen.ToLegacy(conf, secretEnvs)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+ runtimeCfgBytes, err := json.Marshal(runtimeCfg)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal runtime config")
+ }
+ runtimeCfgStr = base64.RawURLEncoding.EncodeToString(runtimeCfgBytes)
+ }
+
+ envs = append(envs,
+ fmt.Sprintf("%s=%s", appSecretsEnvVar, encodeSecretsEnv(g.DefinedSecrets)),
+ fmt.Sprintf("%s=%s", runtimeCfgEnvVar, runtimeCfgStr),
+ )
+
+ svcNames := fns.Map(g.md.Svcs, func(svc *meta.Service) string { return svc.Name })
+ envs = append(envs, g.encodeConfigs(svcNames...)...)
+
+ if g.IncludeMetaEnv {
+ metaBytes, err := proto.Marshal(g.md)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal metadata")
+ }
+ gzipped := gzipBytes(metaBytes)
+ metaEnvStr := "gzip:" + base64.StdEncoding.EncodeToString(gzipped)
+ envs = append(envs, fmt.Sprintf("%s=%s", metaEnvVar, metaEnvStr))
+ }
+
+ if runtimeLibPath := encoreEnv.EncoreRuntimeLib(); runtimeLibPath != "" {
+ envs = append(envs, "ENCORE_RUNTIME_LIB="+runtimeLibPath)
+ }
+
+ return envs, nil
+}
+
+func ptrOrNil[T comparable](val T) *T {
+ var zero T
+ if val == zero {
+ return nil
+ }
+ return &val
+}
+
+func (g *RuntimeConfigGenerator) ProcEnvs(proc *ProcConfig, useRuntimeConfigV2 bool) ([]string, error) {
+ env := append([]string{
+ fmt.Sprintf("%s=%s", listenEnvVar, proc.ListenAddr.String()),
+ }, proc.ExtraEnv...)
+
+ if rt, ok := proc.Runtime.Get(); ok {
+ var runtimeCfgStr string
+
+ if useRuntimeConfigV2 {
+ runtimeCfgBytes, err := proto.Marshal(rt)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal runtime config")
+ }
+ gzipped := gzipBytes(runtimeCfgBytes)
+ runtimeCfgStr = "gzip:" + base64.StdEncoding.EncodeToString(gzipped)
+ } else {
+ // We don't use secretEnvs because for local development we use
+ // plaintext secrets across the board.
+ var secretEnvs map[string][]byte = nil
+
+ runtimeCfg, err := rtconfgen.ToLegacy(rt, secretEnvs)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to generate runtime config")
+ }
+
+ runtimeCfgBytes, err := json.Marshal(runtimeCfg)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal runtime config")
+ }
+ runtimeCfgStr = base64.RawURLEncoding.EncodeToString(runtimeCfgBytes)
+ }
+
+ env = append(env, fmt.Sprintf("%s=%s", runtimeCfgEnvVar, runtimeCfgStr))
+ }
+
+ if g.IncludeMetaEnv {
+ metaBytes, err := proto.Marshal(g.md)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal metadata")
+ }
+ gzipped := gzipBytes(metaBytes)
+ metaEnvStr := "gzip:" + base64.StdEncoding.EncodeToString(gzipped)
+ env = append(env, fmt.Sprintf("%s=%s", metaEnvVar, metaEnvStr))
+ }
+
+ if runtimeLibPath := encoreEnv.EncoreRuntimeLib(); runtimeLibPath != "" {
+ env = append(env, "ENCORE_RUNTIME_LIB="+runtimeLibPath)
+ }
+
+ return env, nil
+}
+
+func (g *RuntimeConfigGenerator) MissingSecrets() []string {
+ var missing []string
+ for _, pkg := range g.md.Pkgs {
+ for _, name := range pkg.Secrets {
+ if _, ok := g.DefinedSecrets[name]; !ok {
+ missing = append(missing, name)
+ }
+ }
+ }
+
+ sort.Strings(missing)
+ missing = slices.Compact(missing)
+ return missing
+}
+
+func (g *RuntimeConfigGenerator) encodeSecrets(secretNames map[string]bool) string {
+ vals := make(map[string]string)
+ for name := range secretNames {
+ vals[name] = g.DefinedSecrets[name]
+ }
+ return encodeSecretsEnv(vals)
+}
+
+func (g *RuntimeConfigGenerator) encodeConfigs(svcNames ...string) []string {
+ envs := make([]string, 0, len(svcNames))
+ for _, svcName := range svcNames {
+ cfgStr, ok := g.SvcConfigs[svcName]
+ if !ok {
+ continue
+ }
+ envs = append(envs,
+ fmt.Sprintf(
+ "%s%s=%s",
+ serviceCfgEnvPrefix,
+ strings.ToUpper(svcName),
+ base64.RawURLEncoding.EncodeToString([]byte(cfgStr)),
+ ),
+ )
+ }
+
+ return envs
+}
+
+// secretsUsedByServices returns the set of secrets that are accessible by the given services, using the metadata for access control.
+func secretsUsedByServices(md *meta.Data, svcNames ...string) (secretNames map[string]bool) {
+ svcNameSet := make(map[string]bool)
+ for _, name := range svcNames {
+ svcNameSet[name] = true
+ }
+
+ secretNames = make(map[string]bool)
+ for _, pkg := range md.Pkgs {
+ if len(pkg.Secrets) > 0 && (pkg.ServiceName == "" || svcNameSet[pkg.ServiceName]) {
+ for _, secret := range pkg.Secrets {
+ secretNames[secret] = true
+ }
+ }
+ }
+ return secretNames
+}
+
+// freeLocalhostAddress returns the first free port number on the system.
+func freeLocalhostAddress() (netip.AddrPort, error) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ defer func() { _ = l.Close() }()
+
+ return l.Addr().(*net.TCPAddr).AddrPort(), nil
+}
+
+func encodeServiceConfigs(svcCfgs map[string]string) []string {
+ envs := make([]string, 0, len(svcCfgs))
+ for serviceName, cfgString := range svcCfgs {
+ envs = append(envs, "ENCORE_CFG_"+strings.ToUpper(serviceName)+"="+base64.RawURLEncoding.EncodeToString([]byte(cfgString)))
+ }
+ slices.Sort(envs)
+ return envs
+}
+
+func gzipBytes(data []byte) []byte {
+ var buf bytes.Buffer
+ w := gzip.NewWriter(&buf)
+ _, _ = w.Write(data)
+ _ = w.Close()
+ return buf.Bytes()
+}
+
+func reverseString(s string) string {
+ runes := []rune(s)
+ for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
+ runes[i], runes[j] = runes[j], runes[i]
+ }
+ return string(runes)
+}
+
+// We lightly obfuscate the PK to trigger fewer of the tools that warn about
+// keys in source code.
+//
+// $ tail -r pk.pem | rev
+const dummyPrivateKeyReversed = `-----YEK ETAVIRP DNE-----
+=AOz3eEM5xAe/71Tfx3sQNkW
+4FXBCChkppSrCoQnR6pBeP31wu0S0UTTNDhNmSYcerdSFbRhyZOzNRnhF9o1h5D5
++gKkhRZkC33z5+0p8aWwOVWJY8MDycHwvEYvtwcXLNZBHI8L8++mhp0uFz5c5sNM
+pPRyurcUY36iDzx7hAJcAGoAvXJwVzTmzXBZtvFPs6Alc5gHti2W1l2bz2mwOV77
+BA9xAW4R6EHVTnqaoXvxvocW5Z9I0ecJzx0NPfkXBriW1lNclAnkoRAYqziasa6C
+WIxePQ2VRFbnLu7XR1M/xqg00GHFV0fTlNPo95lC6tl0PAdoupOX1lwjH3rQnTkB
+Y4BgBKQQJ8F0PPTSMAvyK1bcHP2Iob8UFxyHuPOm11aHYwM4VZvmHm8jX/8vz4eb
+6kbNbEkWzfJbbEen/EJLR1XtzvTdjs9bQnJvhQMZmPGzQalqHcVuilQX+PFV4ezM
+A23w1HCIq6vZqXLO8rXhe8S5hImwVSAKq6TK5dlYPOTIBp66lCQgBKwjkcQcX7tq
+mr44FuVB7hqBMfnCB0kKcs1SuYgmfUQE41JGInsqjdpaFOwzQi4Jcx7TK44p9vn2
+ik6i/hN7JSVA8kMImWIxtL18uVC/Rg0RpM2vcjd+pfgUDifZ1FVYCiL3WlEzDBlZ
+bSmYdd57T70mEEiuV8QmGiIRrk6kZAMP4CQgBKQ4mIYJX2RJQ1j0V+iXwY/bg+N5
+DPEWLB0w6ReZapNy4DSEMD1zm6IWUuo3rGfCsSKUD0xFR/YkauO5Q+GI2gKvmj5V
+MRiysBL/8PCBwKiFKo1MFjCUfbV/ks49/OJYSOi9WIJiXEg5Tm56BDTH6I8rNdU1
+lGIimbKIuzEBWUHsyDQgBKQQ8O/PDCI/SJSPYjkxw1fpX022hUvVW9pvtmd6v0vX
+M5kMBkT60IwTWhF0DoAx4Uyn4rlPiJy5TUwjC0po/aCRV+ug5C+wIRTCtVCpqRyz
+GeB4U/3WXHmSulzK5Dw4ADfbWSP0dAbNNOaFI4y6u+acEl5MFt3GN/jieITLsZNK
+X18B7zHj7LR2f5k3xiJJ/7uNFl8SCcnVquvEI1qslUSTLEPCNoiy5iX/VVTmVNwv
+dUi92s5oFMyJOFW5joggeeQ55BN6EsjQTnj/XetnpPe5wf5vvptHg5HOcUjJPmIJ
+vsGpMXoyCh3mzdQPMUJM9Ha8DKlACadqTjdid9ZsAAYLAEggCEAABMgAvulUiO2B
+FkdtezbN/f5vpPbr4knO22xylfkUp5Uw0W/HxtntXXobF42guEEiie49zki5fPHK
+vAMC7bOERRLV4v35Dd9QV/KFe0FxqEfm8bFDM6FoA4c0qnkDaKbMhdvxxs0wVFRm
+BukfBCLOt+W/XyFhZvUKkxgbcOjXV7HRFQGI+GZnrf00qbCRNOCdlYLoYX1kf3pQ
+eNY6o9ZCJxIDO+dUATCoP3tmP4hvonrjGfpek99D4Ye3+iDwg0AxDW+bt9qoRFew
+VdOuGmooPaDDxn95q5IghRhrvrEaHpkN/EZiNEAJWQkZa9wkxGye5T9hMZRBjUkt
+wGPTyf02fuGquCQABIoAAEgAjSggwcKBCSAAFEQAB0w9GikhqkgBNADABIQvEIIM
+-----YEK ETAVIRP NIGEB-----`
diff --git a/cli/daemon/run/testdata/echo/echo/echo.go b/cli/daemon/run/testdata/echo/echo/echo.go
deleted file mode 100644
index 1e79fd447a..0000000000
--- a/cli/daemon/run/testdata/echo/echo/echo.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package echo
-
-import "context"
-
-type Data struct {
- Message string
-}
-
-// Echo echoes back the request data.
-//encore:api public
-func Echo(ctx context.Context, params *Data) (*Data, error) {
- return params, nil
-}
diff --git a/cli/daemon/run/testdata/echo/go.mod b/cli/daemon/run/testdata/echo/go.mod
deleted file mode 100644
index 6bd3a78c37..0000000000
--- a/cli/daemon/run/testdata/echo/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module encore.app
diff --git a/cli/daemon/run/tests.go b/cli/daemon/run/tests.go
index 7f67887d8c..52a56d9012 100644
--- a/cli/daemon/run/tests.go
+++ b/cli/daemon/run/tests.go
@@ -2,94 +2,229 @@ package run
import (
"context"
+ "fmt"
"io"
- "os"
- "strconv"
+ "runtime"
+ "strings"
- "encr.dev/cli/daemon/internal/appfile"
- "encr.dev/cli/internal/env"
- "encr.dev/compiler"
- "encr.dev/parser"
+ "github.com/cockroachdb/errors"
+ "github.com/rs/xid"
+
+ "encore.dev/appruntime/exported/experiments"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/run/infra"
+ "encr.dev/cli/daemon/secret"
+ "encr.dev/internal/optracker"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/cueutil"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ "encr.dev/pkg/paths"
+ "encr.dev/pkg/vcs"
+ runtimev1 "encr.dev/proto/encore/runtime/v1"
)
-// Check checks the app for errors.
-func (mgr *Manager) Check(ctx context.Context, appRoot, relwd string) error {
- // TODO: We should check that all secret keys are defined as well.
- cfg := &compiler.Config{
- Version: "", // not needed until we start storing trace metadata
- WorkingDir: relwd,
- CgoEnabled: true,
- EncoreRuntimePath: env.EncoreRuntimePath(),
- EncoreGoRoot: env.EncoreGoRoot(),
+// TestParams groups the parameters for the Test method.
+type TestParams struct {
+ *TestSpecParams
+
+ // Stdout and Stderr are where "go test" output should be written.
+ Stdout, Stderr io.Writer
+}
+
+// Test runs the tests.
+func (mgr *Manager) Test(ctx context.Context, params TestParams) (err error) {
+ expSet, err := params.App.Experiments(params.Environ)
+ if err != nil {
+ return err
}
- result, err := compiler.Build(appRoot, cfg)
- if err == nil {
- os.RemoveAll(result.Dir)
+ bld := builderimpl.Resolve(params.App.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+
+ spec, err := mgr.testSpec(ctx, bld, expSet, params.TestSpecParams)
+ if err != nil {
+ return err
}
- return err
+
+ workingDir := paths.RootedFSPath(params.App.Root(), params.WorkingDir)
+ return bld.RunTests(ctx, builder.RunTestsParams{
+ Spec: spec,
+ WorkingDir: workingDir,
+ Stdout: params.Stdout,
+ Stderr: params.Stderr,
+ })
}
-// TestParams groups the parameters for the Test method.
-type TestParams struct {
- // AppRoot is the application root.
- AppRoot string
+// TestSpecParams are the parameters for computing a test spec.
+type TestSpecParams struct {
+ // App is the app to test.
+ App *apps.Instance
- // AppID is the unique app id, as defined by the manifest.
- AppID string
+ // NS is the namespace to use.
+ NS *namespace.Namespace
+
+ // Secrets are the secrets to use.
+ Secrets *secret.LoadResult
+
+ // Args are the arguments to pass to the test command.
+ Args []string
// WorkingDir is the working dir, for formatting
// error messages with relative paths.
WorkingDir string
- // DBClusterID is the database cluster id to connect to.
- DBClusterID string
+ // Environ are the environment variables to set when running the tests,
+ // in the same format as os.Environ().
+ Environ []string
- // Parse is the parse result for the initial run of the app.
- // If nil the app is parsed before starting.
- Parse *parser.Result
-
- // Args are the arguments to pass to "go test".
- Args []string
+ // CodegenDebug, if true, specifies to keep the output
+ // around for codegen debugging purposes.
+ CodegenDebug bool
+}
- // Stdout and Stderr are where "go test" output should be written.
- Stdout, Stderr io.Writer
+type TestSpecResponse struct {
+ Command string
+ Args []string
+ Environ []string
}
-// Test runs the tests.
-func (mgr *Manager) Test(ctx context.Context, params TestParams) (err error) {
- appSlug, err := appfile.Slug(params.AppRoot)
+// TestSpec returns how to run the tests.
+func (mgr *Manager) TestSpec(ctx context.Context, params TestSpecParams) (*TestSpecResponse, error) {
+ expSet, err := params.App.Experiments(params.Environ)
if err != nil {
- return err
+ return nil, err
}
+ bld := builderimpl.Resolve(params.App.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+ spec, err := mgr.testSpec(ctx, bld, expSet, ¶ms)
+ if err != nil {
+ return nil, err
+ }
+ return &TestSpecResponse{
+ Command: spec.Command,
+ Args: spec.Args,
+ Environ: spec.Environ,
+ }, nil
+}
+
+// testSpec returns how to run the tests.
+func (mgr *Manager) testSpec(ctx context.Context, bld builder.Impl, expSet *experiments.Set, params *TestSpecParams) (*builder.TestSpecResult, error) {
var secrets map[string]string
- if appSlug != "" {
- data, err := mgr.Secret.Get(ctx, appSlug)
+ if params.Secrets != nil {
+ secretData, err := params.Secrets.Get(ctx, expSet)
if err != nil {
- return err
+ return nil, err
+ }
+ secrets = secretData.Values
+ // remove db override secrets for tests
+ for k, _ := range secrets {
+ if strings.HasPrefix(k, "sqldb::") {
+ delete(secrets, k)
+ }
+ }
+ }
+
+ vcsRevision := vcs.GetRevision(params.App.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: params.Environ,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: params.CodegenDebug,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: params.App,
+ Experiments: expSet,
+ WorkingDir: params.WorkingDir,
+ ParseTests: true,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := params.App.CacheMetadata(parse.Meta); err != nil {
+ return nil, errors.Wrap(err, "cache metadata")
+ }
+
+ rm := infra.NewResourceManager(params.App, mgr.ClusterMgr, mgr.ObjectsMgr, mgr.PublicBuckets, params.NS, nil, mgr.DBProxyPort, true)
+
+ jobs := optracker.NewAsyncBuildJobs(ctx, params.App.PlatformOrLocalID(), nil)
+ rm.StartRequiredServices(jobs, parse.Meta)
+
+ // Note: jobs.Wait must be called before generateConfig.
+ if err := jobs.Wait(); err != nil {
+ return nil, err
+ }
+
+ gateways := make(map[string]GatewayConfig)
+ gatewayBaseURL := fmt.Sprintf("http://localhost:%d", mgr.RuntimePort)
+ for _, gw := range parse.Meta.Gateways {
+ gateways[gw.EncoreName] = GatewayConfig{
+ BaseURL: gatewayBaseURL,
+ Hostnames: []string{"localhost"},
}
- secrets = data.Values
}
- cfg := &compiler.Config{
- Version: "", // not needed until we start storing trace metadata
- WorkingDir: params.WorkingDir,
- CgoEnabled: true,
- EncoreRuntimePath: env.EncoreRuntimePath(),
- EncoreGoRoot: env.EncoreGoRoot(),
- Test: &compiler.TestConfig{
- Env: []string{
- "ENCORE_ENV_ID=test",
- "ENCORE_PROC_ID=test",
- "ENCORE_RUNTIME_ADDRESS=localhost:" + strconv.Itoa(mgr.RuntimePort),
- "ENCORE_SQLDB_ADDRESS=localhost:" + strconv.Itoa(mgr.DBProxyPort),
- "ENCORE_SQLDB_PASSWORD=" + params.DBClusterID,
- "ENCORE_SECRETS=" + encodeSecretsEnv(secrets),
- },
- Args: params.Args,
- Stdout: params.Stdout,
- Stderr: params.Stderr,
+ cfg, err := bld.ServiceConfigs(ctx, builder.ServiceConfigsParams{
+ Parse: parse,
+ CueMeta: &cueutil.Meta{
+ APIBaseURL: gatewayBaseURL,
+ EnvName: "local",
+ EnvType: cueutil.EnvType_Test,
+ CloudType: cueutil.CloudType_Local,
},
+ })
+ if err != nil {
+ return nil, err
}
- return compiler.Test(ctx, params.AppRoot, cfg)
+
+ authKey := genAuthKey()
+ configGen := &RuntimeConfigGenerator{
+ app: params.App,
+ infraManager: rm,
+ md: parse.Meta,
+ AppID: option.Some(params.App.PlatformOrLocalID()),
+ EnvID: option.Some("test"),
+ TraceEndpoint: option.Some(fmt.Sprintf("http://localhost:%d/trace", mgr.RuntimePort)),
+ AuthKey: authKey,
+ Gateways: gateways,
+ DefinedSecrets: secrets,
+ SvcConfigs: cfg.Configs,
+ EnvName: option.Some("test"),
+ EnvType: option.Some(runtimev1.Environment_TYPE_TEST),
+ DeployID: option.Some(fmt.Sprintf("clitest_%s", xid.New().String())),
+ IncludeMetaEnv: bld.NeedsMeta(),
+ }
+
+ env, err := configGen.ForTests(bld.UseNewRuntimeConfig())
+ if err != nil {
+ return nil, err
+ }
+ env = append(env, encodeServiceConfigs(cfg.Configs)...)
+
+ return bld.TestSpec(ctx, builder.TestSpecParams{
+ Compile: builder.CompileParams{
+ Build: buildInfo,
+ App: params.App,
+ Parse: parse,
+ OpTracker: nil,
+ Experiments: expSet,
+ WorkingDir: params.WorkingDir,
+ },
+ Env: append(params.Environ, env...),
+ Args: params.Args,
+ })
}
diff --git a/cli/daemon/run/watch.go b/cli/daemon/run/watch.go
index 7108495f9e..3b453d1194 100644
--- a/cli/daemon/run/watch.go
+++ b/cli/daemon/run/watch.go
@@ -2,57 +2,70 @@ package run
import (
"path/filepath"
- "time"
+ "strings"
- "github.com/rjeczalik/notify"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/pkg/watcher"
)
// watch watches the given app for changes, and reports
// them on c.
func (mgr *Manager) watch(run *Run) error {
- evs := make(chan notify.EventInfo)
- if err := notify.Watch(filepath.Join(run.Root, "..."), evs, notify.All); err != nil {
+ sub, err := run.App.Watch(func(i *apps.Instance, event []watcher.Event) {
+ if IgnoreEvents(event) {
+ return
+ }
+
+ mgr.RunStdout(run, []byte("Changes detected, recompiling...\n"))
+ if err := run.Reload(); err != nil {
+ if errList := AsErrorList(err); errList != nil {
+ mgr.RunError(run, errList)
+ } else {
+ errStr := err.Error()
+ if !strings.HasSuffix(errStr, "\n") {
+ errStr += "\n"
+ }
+ mgr.RunStderr(run, []byte(errStr))
+ }
+ } else {
+ mgr.RunStdout(run, []byte("Reloaded successfully.\n"))
+ }
+ })
+ if err != nil {
return err
}
go func() {
<-run.Done()
- notify.Stop(evs)
+ run.App.Unwatch(sub)
}()
- go func() {
- for {
- select {
- case <-run.Done():
- return
- case ev := <-evs:
- if ignoreEvent(run.Root, ev) {
- continue
- }
- // We've seen that some editors like vim rename the .go files to another extension,
- // which breaks our parser since it doesn't recognize the file as a .go file.
- // This race is annoying, but in practice a 100ms delay is imperceptible since
- // the user is busy working in their editor.
- time.Sleep(100 * time.Millisecond)
- mgr.runStdout(run, []byte("Changes detected, recompiling...\n"))
- if _, err := run.Reload(); err != nil {
- mgr.runStderr(run, []byte(err.Error()))
- } else {
- mgr.runStdout(run, []byte("Reloaded successfully.\n"))
- }
- }
- }
- }()
return nil
}
-func ignoreEvent(appRoot string, ev notify.EventInfo) bool {
- path := ev.Path()
+// IgnoreEvents will return true if _all_ events are on files that should be ignored
+// as the do not impact the running app, or are the result of Encore itself generating code.
+func IgnoreEvents(events []watcher.Event) bool {
+ for _, event := range events {
+ if !ignoreEvent(event) {
+ return false
+ }
+ }
+ return true
+}
+
+func ignoreEvent(ev watcher.Event) bool {
+ filename := filepath.Base(ev.Path)
+ if strings.HasPrefix(strings.ToLower(filename), "encore.gen.") {
+ // Ignore generated code
+ return true
+ }
- // Ignore non-Go files
- ext := filepath.Ext(path)
+ // Ignore files which wouldn't impact the running app
+ ext := filepath.Ext(ev.Path)
switch ext {
- case ".go", ".sql", ".mod", ".sum", ".app":
+ case ".go", ".sql", ".mod", ".sum", ".work", ".app", ".cue",
+ ".ts", ".js", ".tsx", ".jsx", ".mts", ".mjs", ".cjs", ".cts":
return false
default:
return true
diff --git a/cli/daemon/runtime/runtime.go b/cli/daemon/runtime/runtime.go
deleted file mode 100644
index c0d8ff04e5..0000000000
--- a/cli/daemon/runtime/runtime.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "net/http"
- "time"
-
- "encr.dev/cli/daemon/run"
- "encr.dev/cli/daemon/runtime/trace"
- "encr.dev/proto/encore/server/remote"
- "github.com/rs/zerolog/log"
-)
-
-type server struct {
- runMgr *run.Manager
- ts *trace.Store
- rc remote.RemoteClient
-}
-
-func NewServer(runMgr *run.Manager, ts *trace.Store, rc remote.RemoteClient) http.Handler {
- s := &server{runMgr: runMgr, ts: ts, rc: rc}
- return s
-}
-
-// ServeHTTP implements http.Handler.
-func (s *server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- switch req.URL.Path {
- case "/trace":
- s.RecordTrace(w, req)
- default:
- http.Error(w, "Not Found", http.StatusNotFound)
- }
-}
-
-func (s *server) RecordTrace(w http.ResponseWriter, req *http.Request) {
- pid := req.Header.Get("X-Encore-Proc-ID")
- if pid == "" {
- http.Error(w, "missing X-Encore-Proc-ID header", http.StatusBadRequest)
- return
- }
- traceID, err := parseTraceID(req.Header.Get("X-Encore-Trace-ID"))
- if err != nil {
- http.Error(w, "invalid X-Encore-Trace-ID header: "+err.Error(), http.StatusBadRequest)
- return
- }
-
- proc := s.runMgr.FindProc(pid)
- if proc == nil {
- http.Error(w, "process "+pid+" not running", http.StatusBadRequest)
- return
- }
-
- data, err := ioutil.ReadAll(req.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- reqs, err := trace.Parse(traceID, data)
- if err != nil {
- log.Error().Err(err).Msg("runtime: could not parse trace")
- http.Error(w, "could not parse trace: "+err.Error(), http.StatusBadRequest)
- return
- }
-
- if len(reqs) == 0 {
- // Probably a 401 Unauthorized; drop it for now
- // since we can't visualize it nicely
- return
- }
-
- tm := &trace.TraceMeta{
- ID: traceID,
- Reqs: reqs,
- AppID: proc.Run.AppID,
- Date: time.Now(),
- Meta: proc.Meta,
- }
-
- err = s.ts.Store(req.Context(), tm)
- if err != nil {
- http.Error(w, "could not record trace:"+err.Error(), http.StatusInternalServerError)
- return
- }
-}
-
-func parseTraceID(s string) (id trace.ID, err error) {
- parsedID, err := base64.RawURLEncoding.DecodeString(s)
- if err != nil {
- return id, err
- }
- if len(parsedID) != len(id) {
- return id, fmt.Errorf("bad length")
- }
- copy(id[:], parsedID)
- return id, nil
-}
diff --git a/cli/daemon/runtime/trace/trace.go b/cli/daemon/runtime/trace/trace.go
deleted file mode 100644
index 879781b8be..0000000000
--- a/cli/daemon/runtime/trace/trace.go
+++ /dev/null
@@ -1,699 +0,0 @@
-package trace
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "sync"
- "time"
-
- tracepb "encr.dev/proto/encore/engine/trace"
- metapb "encr.dev/proto/encore/parser/meta/v1"
- "github.com/rs/zerolog/log"
-)
-
-type ID [16]byte
-
-type TraceMeta struct {
- ID ID
- Reqs []*tracepb.Request
- AppID string
- EnvID string
- Date time.Time
- Meta *metapb.Data
-}
-
-// A Store stores traces received from running applications.
-type Store struct {
- trmu sync.Mutex
- traces map[string][]*TraceMeta
-
- lnmu sync.Mutex
- ln map[chan<- *TraceMeta]struct{}
-}
-
-func NewStore() *Store {
- return &Store{
- traces: make(map[string][]*TraceMeta),
- ln: make(map[chan<- *TraceMeta]struct{}),
- }
-}
-
-func (st *Store) Listen(ch chan<- *TraceMeta) {
- st.lnmu.Lock()
- st.ln[ch] = struct{}{}
- st.lnmu.Unlock()
-}
-
-func (st *Store) Store(ctx context.Context, tr *TraceMeta) error {
- st.trmu.Lock()
- st.traces[tr.AppID] = append(st.traces[tr.AppID], tr)
- st.trmu.Unlock()
-
- st.lnmu.Lock()
- defer st.lnmu.Unlock()
- for ch := range st.ln {
- // Don't block trying to send
- select {
- case ch <- tr:
- default:
- }
- }
- return nil
-}
-
-func (st *Store) List(appID string) []*TraceMeta {
- st.trmu.Lock()
- tr := st.traces[appID]
- st.trmu.Unlock()
- return tr
-}
-
-func Parse(traceID ID, data []byte) ([]*tracepb.Request, error) {
- id := &tracepb.TraceID{
- Low: bin.Uint64(traceID[:8]),
- High: bin.Uint64(traceID[8:]),
- }
- tp := &traceParser{
- traceReader: traceReader{buf: data},
- traceID: id,
- reqMap: make(map[uint64]*tracepb.Request),
- txMap: make(map[uint64]*tracepb.DBTransaction),
- queryMap: make(map[uint64]*tracepb.DBQuery),
- callMap: make(map[uint64]interface{}),
- goMap: make(map[goKey]*tracepb.Goroutine),
- httpMap: make(map[uint64]*tracepb.HTTPCall),
- }
- if err := tp.Parse(); err != nil {
- return nil, err
- }
- return tp.reqs, nil
-}
-
-type goKey struct {
- spanID uint64
- goid uint32
-}
-
-type traceParser struct {
- traceReader
- traceID *tracepb.TraceID
- reqs []*tracepb.Request
- reqMap map[uint64]*tracepb.Request
- txMap map[uint64]*tracepb.DBTransaction
- queryMap map[uint64]*tracepb.DBQuery
- callMap map[uint64]interface{} // *RPCCall or *AuthCall
- httpMap map[uint64]*tracepb.HTTPCall
- goMap map[goKey]*tracepb.Goroutine
-}
-
-func (tp *traceParser) Parse() error {
- for i := 0; !tp.Done(); i++ {
- ev := tp.Byte()
- ts := tp.Uint64()
- size := int(tp.Uint32())
- startOff := tp.Offset()
-
- var err error
- switch ev {
- case 0x01:
- err = tp.requestStart(ts)
- case 0x02:
- err = tp.requestEnd(ts)
- case 0x03:
- err = tp.goroutineStart(ts)
- case 0x04:
- err = tp.goroutineEnd(ts)
- case 0x05:
- err = tp.goroutineClear(ts)
- case 0x06:
- err = tp.transactionStart(ts)
- case 0x07:
- err = tp.transactionEnd(ts)
- case 0x08:
- err = tp.queryStart(ts)
- case 0x09:
- err = tp.queryEnd(ts)
- case 0x0A:
- err = tp.callStart(ts)
- case 0x0B:
- err = tp.callEnd(ts)
- case 0x0C, 0x0D:
- // Skip these events for now
- tp.Skip(size)
-
- case 0x0E:
- err = tp.httpStart(ts)
- case 0x0F:
- err = tp.httpEnd(ts)
- case 0x10:
- err = tp.httpBodyClosed(ts)
-
- default:
- log.Error().Int("idx", i).Hex("event", []byte{ev}).Msg("trace: unknown event type, skipping")
- tp.Skip(size)
- err = nil
- }
- if err != nil {
- return fmt.Errorf("event #%d: parsing event=%x: %v", i, ev, err)
- }
-
- if tp.Overflow() {
- return fmt.Errorf("event #%d: invalid trace format (reader overflow parsing event %x)", i, ev)
- } else if off, want := tp.Offset(), startOff+size; off < want {
- log.Error().Int("idx", i).Hex("event", []byte{ev}).Int("remainingBytes", want-off).Msg("trace: parser did not consume whole frame, skipping ahead")
- tp.Skip(want - off)
- } else if off > want {
- return fmt.Errorf("event #%d: parser (event=%x) exceeded frame size by %d bytes", i, ev, off-want)
- }
- }
-
- return nil
-}
-
-func (tp *traceParser) requestStart(ts uint64) error {
- var typ tracepb.Request_Type
- switch b := tp.Byte(); b {
- case 0x01:
- typ = tracepb.Request_RPC
- case 0x02:
- typ = tracepb.Request_AUTH
- default:
- return fmt.Errorf("unknown request type %x", b)
- }
-
- req := &tracepb.Request{
- TraceId: tp.traceID,
- SpanId: tp.Uint64(),
- ParentSpanId: tp.Uint64(),
- StartTime: tp.Uint64(),
- // EndTime not set yet
- Goid: uint32(tp.UVarint()),
- CallLoc: int32(tp.UVarint()),
- DefLoc: int32(tp.UVarint()),
- Uid: tp.String(),
- Type: typ,
- }
- // We use event timestamps instead
- req.StartTime = ts
-
- for n, i := tp.UVarint(), uint64(0); i < n; i++ {
- size := tp.UVarint()
- if size > (10 << 20) {
- return fmt.Errorf("input too large: %d bytes", size)
- }
- input := make([]byte, size)
- tp.Bytes(input)
- req.Inputs = append(req.Inputs, input)
- }
- tp.reqs = append(tp.reqs, req)
- tp.reqMap[req.SpanId] = req
- return nil
-}
-
-func (tp *traceParser) requestEnd(ts uint64) error {
- spanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span: %v", spanID)
- }
- // dur := ts - rd.startTs
- req.EndTime = ts
-
- if tp.Byte() == 0 {
- // No error
- for n, i := tp.UVarint(), uint64(0); i < n; i++ {
- size := tp.UVarint()
- if size > (10 << 20) {
- return fmt.Errorf("input too large: %d bytes", size)
- }
- output := make([]byte, size)
- tp.Bytes(output)
- req.Outputs = append(req.Outputs, output)
- }
- } else {
- msg := tp.ByteString()
- if len(msg) == 0 {
- msg = []byte("unknown error")
- }
- req.Err = msg
- }
- return nil
-}
-
-func (tp *traceParser) goroutineStart(ts uint64) error {
- spanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span id: %v", spanID)
- }
- goid := tp.Uint32()
- g := &tracepb.Goroutine{
- Goid: goid,
- CallLoc: 0, // not yet supported
- StartTime: ts,
- }
- k := goKey{spanID: spanID, goid: goid}
- req.Events = append(req.Events, &tracepb.Event{
- Data: &tracepb.Event_Goroutine{Goroutine: g},
- })
- tp.goMap[k] = g
- return nil
-}
-
-func (tp *traceParser) goroutineEnd(ts uint64) error {
- spanID := tp.Uint64()
- goid := tp.Uint32()
- k := goKey{spanID: spanID, goid: goid}
- g, ok := tp.goMap[k]
- if !ok {
- return fmt.Errorf("unknown goroutine id: %v", goid)
- }
- g.EndTime = ts
- delete(tp.goMap, k)
- return nil
-}
-
-func (tp *traceParser) goroutineClear(ts uint64) error {
- spanID := tp.Uint64()
- goid := tp.Uint32()
- k := goKey{spanID: spanID, goid: goid}
- g, ok := tp.goMap[k]
- if !ok {
- return fmt.Errorf("unknown goroutine id: %v/%v", spanID, goid)
- }
- g.EndTime = ts
- delete(tp.goMap, k)
- return nil
-}
-
-func (tp *traceParser) transactionStart(ts uint64) error {
- txid := tp.UVarint()
- spanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span: %v", spanID)
- }
- goid := uint32(tp.UVarint())
- tx := &tracepb.DBTransaction{
- Goid: goid,
- StartLoc: int32(tp.UVarint()),
- StartTime: ts,
- }
- tp.txMap[txid] = tx
- req.Events = append(req.Events, &tracepb.Event{
- Data: &tracepb.Event_Tx{Tx: tx},
- })
- return nil
-}
-
-func (tp *traceParser) transactionEnd(ts uint64) error {
- txid := tp.UVarint()
- _ = tp.Uint64() // spanID
- tx, ok := tp.txMap[txid]
- if !ok {
- return fmt.Errorf("unknown transaction id: %v", txid)
- }
- _ = uint32(tp.UVarint()) // goid
- compl := tp.Byte()
- endLoc := int32(tp.UVarint())
- errMsg := tp.ByteString()
-
- // It's possible to get multiple transaction end events.
- // Ignore them for now; we will expose this information later.
- if tx.EndTime == 0 {
- tx.EndTime = ts
- tx.EndLoc = endLoc
- tx.Err = errMsg
- switch compl {
- case 0:
- tx.Completion = tracepb.DBTransaction_ROLLBACK
- case 1:
- tx.Completion = tracepb.DBTransaction_COMMIT
- default:
- return fmt.Errorf("unknown completion type: %x", compl)
- }
- }
- return nil
-}
-
-func (tp *traceParser) queryStart(ts uint64) error {
- qid := tp.UVarint()
- spanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span: %v", spanID)
- }
- txid := tp.UVarint()
- goid := uint32(tp.UVarint())
- q := &tracepb.DBQuery{
- Goid: goid,
- CallLoc: int32(tp.UVarint()),
- StartTime: ts,
- Query: tp.ByteString(),
- }
- tp.queryMap[qid] = q
-
- if txid != 0 {
- tx, ok := tp.txMap[txid]
- if !ok {
- return fmt.Errorf("unknown transaction id: %v", txid)
- }
- tx.Queries = append(tx.Queries, q)
- } else {
- req.Events = append(req.Events, &tracepb.Event{
- Data: &tracepb.Event_Query{Query: q},
- })
- }
-
- return nil
-}
-
-func (tp *traceParser) queryEnd(ts uint64) error {
- qid := tp.UVarint()
- q, ok := tp.queryMap[qid]
- if !ok {
- return fmt.Errorf("unknown query id: %v", qid)
- }
- q.EndTime = ts
- q.Err = tp.ByteString()
- return nil
-}
-
-func (tp *traceParser) callStart(ts uint64) error {
- callID := tp.UVarint()
- spanID := tp.Uint64()
- childSpanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span: %v", spanID)
- }
- c := &tracepb.RPCCall{
- SpanId: childSpanID,
- Goid: uint32(tp.UVarint()),
- CallLoc: int32(tp.UVarint()),
- DefLoc: int32(tp.UVarint()),
- StartTime: ts,
- }
- tp.callMap[callID] = c
- req.Events = append(req.Events, &tracepb.Event{
- Data: &tracepb.Event_Rpc{Rpc: c},
- })
- return nil
-}
-
-func (tp *traceParser) callEnd(ts uint64) error {
- callID := tp.UVarint()
- errMsg := tp.ByteString()
- c, ok := tp.callMap[callID].(*tracepb.RPCCall)
- if !ok {
- return fmt.Errorf("unknown call: %v ", callID)
- }
- c.EndTime = ts
- c.Err = errMsg
- delete(tp.callMap, callID)
- return nil
-}
-
-func (tp *traceParser) httpStart(ts uint64) error {
- callID := tp.UVarint()
- spanID := tp.Uint64()
- childSpanID := tp.Uint64()
- req, ok := tp.reqMap[spanID]
- if !ok {
- return fmt.Errorf("unknown request span: %v", spanID)
- }
- c := &tracepb.HTTPCall{
- SpanId: childSpanID,
- Goid: uint32(tp.UVarint()),
- Method: tp.String(),
- Url: tp.String(),
- StartTime: ts,
- }
- tp.httpMap[callID] = c
- req.Events = append(req.Events, &tracepb.Event{
- Data: &tracepb.Event_Http{Http: c},
- })
- return nil
-}
-
-func (tp *traceParser) httpEnd(ts uint64) error {
- callID := tp.UVarint()
- errMsg := tp.ByteString()
- status := tp.UVarint()
- c, ok := tp.httpMap[callID]
- if !ok {
- return fmt.Errorf("unknown call: %v ", callID)
- }
- c.EndTime = ts
- c.Err = errMsg
- c.StatusCode = uint32(status)
-
- numEvents := tp.UVarint()
- c.Events = make([]*tracepb.HTTPTraceEvent, 0, numEvents)
- for i := 0; i < int(numEvents); i++ {
- ev, err := tp.httpEvent()
- if err != nil {
- return err
- }
- c.Events = append(c.Events, ev)
- }
-
- return nil
-}
-
-func (tp *traceParser) httpBodyClosed(ts uint64) error {
- callID := tp.UVarint()
- _ = tp.ByteString() // close error
- c, ok := tp.httpMap[callID]
- if !ok {
- return fmt.Errorf("unknown call: %v ", callID)
- }
- c.BodyClosedTime = ts
- delete(tp.httpMap, callID)
- return nil
-}
-
-func (tp *traceParser) httpEvent() (*tracepb.HTTPTraceEvent, error) {
- code := tracepb.HTTPTraceEventCode(tp.Byte())
- ts := tp.Int64()
- ev := &tracepb.HTTPTraceEvent{
- Code: code,
- Time: uint64(ts),
- }
-
- switch code {
- case tracepb.HTTPTraceEventCode_GET_CONN:
- ev.Data = &tracepb.HTTPTraceEvent_GetConn{
- GetConn: &tracepb.HTTPGetConnData{
- HostPort: tp.String(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_GOT_CONN:
- ev.Data = &tracepb.HTTPTraceEvent_GotConn{
- GotConn: &tracepb.HTTPGotConnData{
- Reused: tp.Bool(),
- WasIdle: tp.Bool(),
- IdleDurationNs: tp.Int64(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_GOT_FIRST_RESPONSE_BYTE:
- // no data
-
- case tracepb.HTTPTraceEventCode_GOT_1XX_RESPONSE:
- ev.Data = &tracepb.HTTPTraceEvent_Got_1XxResponse{
- Got_1XxResponse: &tracepb.HTTPGot1XxResponseData{
- Code: int32(tp.Varint()),
- },
- }
-
- case tracepb.HTTPTraceEventCode_DNS_START:
- ev.Data = &tracepb.HTTPTraceEvent_DnsStart{
- DnsStart: &tracepb.HTTPDNSStartData{
- Host: tp.String(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_DNS_DONE:
- data := &tracepb.HTTPDNSDoneData{
- Err: tp.ByteString(),
- }
- addrs := int(tp.UVarint())
- for j := 0; j < addrs; j++ {
- data.Addrs = append(data.Addrs, &tracepb.DNSAddr{
- Ip: tp.ByteString(),
- })
- }
- ev.Data = &tracepb.HTTPTraceEvent_DnsDone{DnsDone: data}
-
- case tracepb.HTTPTraceEventCode_CONNECT_START:
- ev.Data = &tracepb.HTTPTraceEvent_ConnectStart{
- ConnectStart: &tracepb.HTTPConnectStartData{
- Network: tp.String(),
- Addr: tp.String(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_CONNECT_DONE:
- ev.Data = &tracepb.HTTPTraceEvent_ConnectDone{
- ConnectDone: &tracepb.HTTPConnectDoneData{
- Network: tp.String(),
- Addr: tp.String(),
- Err: tp.ByteString(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_TLS_HANDSHAKE_START:
- // no data
-
- case tracepb.HTTPTraceEventCode_TLS_HANDSHAKE_DONE:
- ev.Data = &tracepb.HTTPTraceEvent_TlsHandshakeDone{
- TlsHandshakeDone: &tracepb.HTTPTLSHandshakeDoneData{
- Err: tp.ByteString(),
- TlsVersion: tp.Uint32(),
- CipherSuite: tp.Uint32(),
- ServerName: tp.String(),
- NegotiatedProtocol: tp.String(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_WROTE_HEADERS:
- // no data
-
- case tracepb.HTTPTraceEventCode_WROTE_REQUEST:
- ev.Data = &tracepb.HTTPTraceEvent_WroteRequest{
- WroteRequest: &tracepb.HTTPWroteRequestData{
- Err: tp.ByteString(),
- },
- }
-
- case tracepb.HTTPTraceEventCode_WAIT_100_CONTINUE:
- // no data
-
- default:
- return nil, fmt.Errorf("unknown http event %v", code)
- }
- return ev, nil
-}
-
-var bin = binary.LittleEndian
-
-type traceReader struct {
- buf []byte
- off int
- err bool
-}
-
-func (tr *traceReader) Offset() int {
- return tr.off
-}
-
-func (tr *traceReader) Done() bool {
- return tr.off >= len(tr.buf)
-}
-
-func (tr *traceReader) Overflow() bool {
- return tr.err
-}
-
-func (tr *traceReader) Bytes(b []byte) {
- n := copy(b, tr.buf[tr.off:])
- tr.off += n
- if len(b) > n {
- tr.err = true
- }
-}
-
-func (tr *traceReader) Skip(n int) {
- tr.off += n
- if tr.off > len(tr.buf) {
- tr.off = len(tr.buf)
- tr.err = true
- }
-}
-
-func (tr *traceReader) Byte() byte {
- var buf [1]byte
- tr.Bytes(buf[:])
- return buf[0]
-}
-
-func (tr *traceReader) Bool() bool {
- return tr.Byte() != 0
-}
-
-func (tr *traceReader) String() string {
- return string(tr.ByteString())
-}
-
-func (tr *traceReader) ByteString() []byte {
- size := tr.UVarint()
- b := make([]byte, int(size))
- tr.Bytes(b)
- return b
-}
-
-func (tr *traceReader) Time() time.Time {
- ns := tr.Int64()
- return time.Unix(0, ns)
-}
-
-func (tr *traceReader) Int32() int32 {
- u := tr.Uint32()
- var v int32
- if u&1 == 0 {
- v = int32(u >> 1)
- } else {
- v = ^int32(u >> 1)
- }
- return v
-}
-
-func (tr *traceReader) Uint32() uint32 {
- var buf [4]byte
- tr.Bytes(buf[:])
- return bin.Uint32(buf[:])
-}
-
-func (tr *traceReader) Int64() int64 {
- u := tr.Uint64()
- var v int64
- if u&1 == 0 {
- v = int64(u >> 1)
- } else {
- v = ^int64(u >> 1)
- }
- return v
-}
-
-func (tr *traceReader) Uint64() uint64 {
- var buf [8]byte
- tr.Bytes(buf[:])
- return bin.Uint64(buf[:])
-}
-
-func (tr *traceReader) Varint() int64 {
- u := tr.UVarint()
- var v int64
- if u&1 == 0 {
- v = int64(u >> 1)
- } else {
- v = ^int64(u >> 1)
- }
- return v
-}
-
-func (tr *traceReader) UVarint() uint64 {
- var u uint64
- for i := 0; tr.off < len(tr.buf); i += 7 {
- b := tr.buf[tr.off]
- u |= uint64(b&^0x80) << i
- tr.off++
- if b&0x80 == 0 {
- break
- }
- }
- return u
-}
diff --git a/cli/daemon/schema.go b/cli/daemon/schema.go
new file mode 100644
index 0000000000..42ccf8e3a7
--- /dev/null
+++ b/cli/daemon/schema.go
@@ -0,0 +1,170 @@
+package daemon
+
+import (
+ "fmt"
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
+ schema "encr.dev/proto/encore/parser/schema/v1"
+)
+
+// genSchema generates a JSON payload to match the schema.
+func genSchema(meta *meta.Data, decl *schema.Type) []byte {
+ if decl == nil {
+ return nil
+ }
+ r := &schemaRenderer{
+ Stream: jsoniter.NewStream(jsoniter.ConfigDefault, nil, 256),
+ meta: meta,
+ seenDecls: make(map[uint32]*schema.Decl),
+ }
+ return r.Render(decl)
+}
+
+type schemaRenderer struct {
+ *jsoniter.Stream
+ meta *meta.Data
+ seenDecls map[uint32]*schema.Decl
+ typeArgs []*schema.Type
+}
+
+func (r *schemaRenderer) Render(d *schema.Type) []byte {
+ r.renderType(d)
+ return r.Buffer()
+}
+
+func (r *schemaRenderer) renderType(typ *schema.Type) {
+ switch typ := typ.Typ.(type) {
+ case *schema.Type_Struct:
+ r.renderStruct(typ.Struct)
+ case *schema.Type_Map:
+ r.renderMap(typ.Map)
+ case *schema.Type_List:
+ r.renderList(typ.List)
+ case *schema.Type_Builtin:
+ r.renderBuiltin(typ.Builtin)
+ case *schema.Type_Named:
+ r.renderNamed(typ.Named)
+ case *schema.Type_Pointer:
+ r.renderType(typ.Pointer.Base)
+ case *schema.Type_Union:
+ r.renderType(typ.Union.Types[0])
+ case *schema.Type_Literal:
+ switch v := typ.Literal.Value.(type) {
+ case *schema.Literal_Str:
+ r.WriteString(v.Str)
+ case *schema.Literal_Int:
+ r.WriteInt(int(v.Int))
+ case *schema.Literal_Float:
+ r.WriteFloat64(v.Float)
+ case *schema.Literal_Boolean:
+ r.WriteBool(v.Boolean)
+ case *schema.Literal_Null:
+ r.WriteNil()
+ default:
+ panic(fmt.Sprintf("unknown literal type %T", v))
+ }
+ case *schema.Type_TypeParameter:
+ if idx := typ.TypeParameter.ParamIdx; len(r.typeArgs) > int(idx) {
+ r.renderType(r.typeArgs[idx])
+ } else {
+ r.WriteNil()
+ }
+ case *schema.Type_Config:
+ // Config is invisible here
+ r.renderType(typ.Config.Elem)
+ default:
+ panic(fmt.Sprintf("unknown schema type %T", typ))
+ }
+}
+
+func (r *schemaRenderer) renderStruct(s *schema.Struct) {
+ r.WriteObjectStart()
+ written := false
+ for _, f := range s.Fields {
+ n := f.JsonName
+ if n == "-" {
+ continue
+ } else if n == "" {
+ n = f.Name
+ }
+
+ if written {
+ r.WriteMore()
+ }
+ r.WriteObjectField(n)
+ r.renderType(f.Typ)
+ written = true
+ }
+ r.WriteObjectEnd()
+}
+
+func (r *schemaRenderer) renderMap(m *schema.Map) {
+ r.WriteObjectStart()
+ r.renderType(m.Key)
+ r.WriteRaw(": ")
+ r.renderType(m.Value)
+ r.WriteObjectEnd()
+}
+
+func (r *schemaRenderer) renderList(l *schema.List) {
+ r.WriteArrayStart()
+ r.renderType(l.Elem)
+ r.WriteArrayEnd()
+}
+
+func (r *schemaRenderer) renderBuiltin(b schema.Builtin) {
+ switch b {
+ case schema.Builtin_ANY:
+ r.WriteString("")
+ case schema.Builtin_BOOL:
+ r.WriteBool(true)
+ case schema.Builtin_INT, schema.Builtin_INT8, schema.Builtin_INT16, schema.Builtin_INT32, schema.Builtin_INT64,
+ schema.Builtin_UINT, schema.Builtin_UINT8, schema.Builtin_UINT16, schema.Builtin_UINT32, schema.Builtin_UINT64:
+ r.WriteInt(1)
+ case schema.Builtin_FLOAT32, schema.Builtin_FLOAT64:
+ r.WriteRaw("2.3")
+ case schema.Builtin_STRING:
+ r.WriteString("hello")
+ case schema.Builtin_BYTES:
+ r.WriteString("YmFzZTY0Cg==") // "base64"
+ case schema.Builtin_TIME:
+ s, _ := time.Now().MarshalText()
+ r.WriteString(string(s))
+ case schema.Builtin_UUID:
+ r.WriteString("7d42f515-3517-4e76-be13-30880443546f")
+ case schema.Builtin_JSON:
+ r.WriteObjectStart()
+ r.WriteObjectField("some json data")
+ r.WriteBool(true)
+ r.WriteObjectEnd()
+ case schema.Builtin_USER_ID:
+ r.WriteString("userID")
+ default:
+ r.WriteString("")
+ }
+}
+
+func (r *schemaRenderer) renderNamed(n *schema.Named) {
+ if _, ok := r.seenDecls[n.Id]; ok {
+ // Already seen this name before
+ r.WriteNil()
+ return
+ }
+
+ // Store type arguments in scope. Restore the previous
+ // type arguments when we're done.
+ prevTypeArgs := r.typeArgs
+ defer func() {
+ r.typeArgs = prevTypeArgs
+ }()
+ r.typeArgs = n.TypeArguments
+
+ // Avoid infinite recursion
+ decl := r.meta.Decls[n.Id]
+ r.seenDecls[n.Id] = decl
+ r.renderType(decl.Type)
+ delete(r.seenDecls, n.Id)
+}
diff --git a/cli/daemon/secret/secret.go b/cli/daemon/secret/secret.go
index 3844318b8f..e32bcaee1d 100644
--- a/cli/daemon/secret/secret.go
+++ b/cli/daemon/secret/secret.go
@@ -2,27 +2,37 @@
package secret
import (
+ "bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"sync"
"time"
- "encr.dev/proto/encore/server/remote"
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/cuecontext"
+ "cuelang.org/go/cue/load"
"github.com/rs/zerolog/log"
+ "go4.org/syncutil"
"golang.org/x/sync/singleflight"
+
+ "encore.dev/appruntime/exported/experiments"
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/pkg/xos"
)
// New returns a new manager.
-func New(rc remote.RemoteClient) *Manager {
- return &Manager{rc: rc, cache: make(map[string]*Data)}
+func New() *Manager {
+ return &Manager{cache: make(map[string]*Data)}
}
// Manager manages the secrets cache for running Encore apps.
type Manager struct {
- rc remote.RemoteClient
group singleflight.Group
pollOnce sync.Once
@@ -32,116 +42,182 @@ type Manager struct {
// Data is a snapshot of an Encore app's development secret values.
type Data struct {
- // Synced is when the values were last synced.
+ // Synced is when the values were last synced,
+ // or the zero value if no sync has taken place.
Synced time.Time
// Values is a key-value map of defined secrets.
Values map[string]string
}
-// Get gets the secrets for the given app.
-func (f *Manager) Get(ctx context.Context, appSlug string) (*Data, error) {
- f.pollOnce.Do(f.startPolling)
+type LoadResult struct {
+ mgr *Manager
+ app *apps.Instance
- // Do we have the secrets in our cache?
- f.mu.Lock()
- data, ok := f.cache[appSlug]
- f.mu.Unlock()
- if ok {
- return data, nil
+ once syncutil.Once
+ ch <-chan singleflight.Result
+ initial singleflight.Result
+
+ localSecretMu sync.Mutex
+}
+
+// Load loads the secrets for appSlug.
+// If appSlug is empty, (*LoadResult).Get resolves to empty secret data.
+func (mgr *Manager) Load(app *apps.Instance) *LoadResult {
+ mgr.pollOnce.Do(mgr.startPolling)
+
+ // Ignore cases when the app isn't linked.
+ if app.PlatformID() == "" {
+ return &LoadResult{mgr: mgr, app: app}
}
- // Do we have them on disk?
- if data, err := f.readFromDisk(appSlug); err == nil {
- f.mu.Lock()
- f.cache[appSlug] = data
- f.mu.Unlock()
- return data, nil
+ ch := mgr.fetch(app.PlatformID(), false)
+ return &LoadResult{mgr: mgr, app: app, ch: ch}
+}
+
+// Get returns the result of the prefetch.
+// It blocks until the initial fetch is ready or until ctx is cancelled.
+// For subsequent calls to Get (such as during live reload), it returns any
+// more recent data that has been subsequently cached.
+func (lr *LoadResult) Get(ctx context.Context, expSet *experiments.Set) (data *Data, err error) {
+ defer func() {
+ if err == nil {
+ // load.Instances in cue is not safe for concurrent access.
+ // https://github.com/cue-lang/cue/issues/1746
+ lr.localSecretMu.Lock()
+ defer lr.localSecretMu.Unlock()
+ // Return a new data object so we don't write the overrides to the cache.
+ data, err = applyLocalOverrides(lr.app, data)
+ }
+ }()
+
+ if lr == nil || lr.app.PlatformID() == "" {
+ return &Data{}, nil
+ }
+
+ // Fetch the initial result the first time.
+ err = lr.once.Do(func() error {
+ select {
+ case lr.initial = <-lr.ch:
+ // The fetch was successful so mark the Once as completed.
+ return nil
+ case <-ctx.Done():
+ // We timed out before the fetch completed.
+ return ctx.Err()
+ }
+ })
+ if err != nil {
+ return nil, err
}
- return f.fetch(appSlug)
+ initial, _ := lr.initial.Val.(*Data)
+ haveInitial := lr.initial.Err == nil
+ cached, haveCache := lr.mgr.loadFromCache(lr.app.PlatformID())
+
+ switch {
+ case haveCache && haveInitial:
+ // Which is most recent?
+ if initial.Synced.After(cached.Synced) {
+ return initial, nil
+ } else {
+ return cached, nil
+ }
+
+ case haveCache:
+ return cached, nil
+
+ case haveInitial:
+ return initial, nil
+
+ default:
+ // We have a prefetch error; return it.
+ return nil, lr.initial.Err
+ }
}
// UpdateKey updates the cached secret key to the given value.
-func (f *Manager) UpdateKey(appSlug, key, value string) {
- f.mu.Lock()
- defer f.mu.Unlock()
- if data, ok := f.cache[appSlug]; ok {
+func (mgr *Manager) UpdateKey(appSlug, key, value string) {
+ mgr.mu.Lock()
+ defer mgr.mu.Unlock()
+ if data, ok := mgr.cache[appSlug]; ok {
vals := make(map[string]string)
for k, v := range data.Values {
vals[k] = v
}
vals[key] = value
- f.cache[appSlug] = &Data{
+ mgr.cache[appSlug] = &Data{
Synced: time.Now(),
Values: vals,
}
- if err := f.writeToDisk(appSlug, data); err != nil {
+ if err := mgr.writeToDisk(appSlug, data); err != nil {
log.Error().Err(err).Msg("failed to write secrets to disk cache")
}
}
}
-// Prefetch fires off a background task to prefetch secrets for appSlug.
-func (f *Manager) Prefetch(appSlug string) {
- // Ignore cases when the app isn't linked.
- if appSlug != "" {
- go f.fetch(appSlug)
- }
-}
-
// fetch fetches secrets from the server.
// mu must not be held when running.
-func (f *Manager) fetch(appSlug string) (*Data, error) {
- data, err, _ := f.group.Do(appSlug, func() (interface{}, error) {
+func (mgr *Manager) fetch(appSlug string, poll bool) <-chan singleflight.Result {
+ return mgr.group.DoChan(appSlug, func() (any, error) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
- resp, err := f.rc.GetSecrets(ctx, &remote.GetSecretsRequest{
- AppSlug: appSlug,
- })
+ secrets, err := platform.GetLocalSecretValues(ctx, appSlug, poll)
if err != nil {
return nil, fmt.Errorf("fetch secrets for %s: %v", appSlug, err)
}
data := &Data{
Synced: time.Now(),
- Values: make(map[string]string),
- }
- for _, s := range resp.Secrets {
- data.Values[s.Key] = s.Value
+ Values: secrets,
}
// Update our caches
- f.mu.Lock()
- f.cache[appSlug] = data
- f.mu.Unlock()
- if err := f.writeToDisk(appSlug, data); err != nil {
+ mgr.mu.Lock()
+ mgr.cache[appSlug] = data
+ mgr.mu.Unlock()
+ if err := mgr.writeToDisk(appSlug, data); err != nil {
log.Error().Err(err).Msg("failed to write secrets to disk cache")
}
return data, nil
})
- if err != nil {
- return nil, err
+}
+
+func (mgr *Manager) loadFromCache(appSlug string) (*Data, bool) {
+ // Do we have the secrets in our cache?
+ mgr.mu.Lock()
+ data, ok := mgr.cache[appSlug]
+ mgr.mu.Unlock()
+ if ok {
+ return data, true
}
- return data.(*Data), nil
+
+ // Do we have them on disk?
+ if data, err := mgr.readFromDisk(appSlug); err == nil {
+ mgr.mu.Lock()
+ mgr.cache[appSlug] = data
+ mgr.mu.Unlock()
+ return data, true
+ }
+ return nil, false
}
// startPolling begins polling for secret updates every 5 minutes for the apps
// that have been run.
-func (f *Manager) startPolling() {
+func (mgr *Manager) startPolling() {
go func() {
for range time.Tick(5 * time.Minute) {
var slugs []string
- f.mu.Lock()
- for s := range f.cache {
+ mgr.mu.Lock()
+ for s := range mgr.cache {
slugs = append(slugs, s)
}
- f.mu.Unlock()
+ mgr.mu.Unlock()
for _, s := range slugs {
- if _, err := f.fetch(s); err != nil {
- log.Error().Err(err).Str("appID", s).Msg("failed to sync secrets")
+ res := <-mgr.fetch(s, true)
+ if res.Err != nil {
+ log.Error().Err(res.Err).Str("app_id", s).Msg("failed to sync secrets")
} else {
- log.Info().Str("appID", s).Msg("successfully synced app secrets")
+ log.Info().Str("app_id", s).Msg("successfully synced app secrets")
}
}
}
@@ -150,14 +226,14 @@ func (f *Manager) startPolling() {
// writeToDisk serializes the secret data and writes it to disk
// readable only for the current user.
-func (f *Manager) writeToDisk(appSlug string, data *Data) (err error) {
+func (mgr *Manager) writeToDisk(appSlug string, data *Data) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("write secrets %s: %v", appSlug, err)
}
}()
- path, err := f.secretsPath(appSlug)
+ path, err := mgr.secretsPath(appSlug)
if err != nil {
return err
}
@@ -174,18 +250,18 @@ func (f *Manager) writeToDisk(appSlug string, data *Data) (err error) {
if err != nil {
return err
}
- return os.WriteFile(path, out, 0600)
+ return xos.WriteFile(path, out, 0600)
}
// readFromDisk reads the cached secrets from disk.
-func (f *Manager) readFromDisk(appSlug string) (data *Data, err error) {
+func (mgr *Manager) readFromDisk(appSlug string) (data *Data, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("read secrets %s: %v", appSlug, err)
}
}()
- path, err := f.secretsPath(appSlug)
+ path, err := mgr.secretsPath(appSlug)
if err != nil {
return nil, err
}
@@ -199,10 +275,62 @@ func (f *Manager) readFromDisk(appSlug string) (data *Data, err error) {
}
// secretsPath returns the file path to where the given app's secrets are stored on disk.
-func (f *Manager) secretsPath(appSlug string) (string, error) {
+func (mgr *Manager) secretsPath(appSlug string) (string, error) {
dir, err := os.UserCacheDir()
if err != nil {
return "", err
}
return filepath.Join(dir, "encore", "secrets", appSlug+".json"), nil
}
+
+// applyLocalOverrides parses the local secrets override file, if any,
+// and returns a new Data object with the overrides applied.
+//
+// If there are no overrides src is returned directly.
+// The original src data object is never modified.
+func applyLocalOverrides(app *apps.Instance, src *Data) (*Data, error) {
+ const name = ".secrets.local.cue"
+ data, err := os.ReadFile(filepath.Join(app.Root(), name))
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return src, nil
+ }
+ return nil, err
+ }
+
+ updated := &Data{
+ Synced: src.Synced,
+ Values: make(map[string]string, len(src.Values)),
+ }
+ for k, v := range src.Values {
+ updated.Values[k] = v
+ }
+
+ ctx := cuecontext.New()
+ loadCfg := &load.Config{
+ Stdin: bytes.NewReader(data),
+ }
+
+ inst := load.Instances([]string{"-"}, loadCfg)[0]
+ if inst.Err != nil {
+ return nil, fmt.Errorf("parse local secrets: %v", inst.Err)
+ }
+ secrets := ctx.BuildInstance(inst)
+ if err := secrets.Err(); err != nil {
+ return nil, fmt.Errorf("parse local secrets: %v", err)
+ }
+
+ it, err := secrets.Fields(cue.Hidden(false), cue.Concrete(true))
+ if err != nil {
+ return nil, fmt.Errorf("parse local secrets: %v", err)
+ }
+ for it.Next() {
+ key := it.Selector().String()
+ val, err := it.Value().String()
+ if err != nil {
+ return nil, fmt.Errorf("parse local secrets: secret key %s is not a string", key)
+ }
+ updated.Values[key] = val
+ }
+ return updated, nil
+}
diff --git a/cli/daemon/sqldb/cluster.go b/cli/daemon/sqldb/cluster.go
index 014f68ec1f..94a3396f0b 100644
--- a/cli/daemon/sqldb/cluster.go
+++ b/cli/daemon/sqldb/cluster.go
@@ -1,44 +1,58 @@
package sqldb
import (
- "bytes"
"context"
- "encoding/json"
- "errors"
"fmt"
- "os/exec"
+ "net"
+ "strconv"
+ "strings"
"sync"
+ "sync/atomic"
"time"
- "encr.dev/cli/daemon/internal/runlog"
- meta "encr.dev/proto/encore/parser/meta/v1"
+ "github.com/cockroachdb/errors"
+ "github.com/jackc/pgx/v5"
"github.com/rs/zerolog"
"go4.org/syncutil"
"golang.org/x/sync/errgroup"
+ "encr.dev/internal/optracker"
+ meta "encr.dev/proto/encore/parser/meta/v1"
+
// stdlib registers the "pgx" driver to database/sql.
- _ "github.com/jackc/pgx/v4/stdlib"
+ _ "github.com/jackc/pgx/v5/stdlib"
)
// Cluster represents a running database Cluster.
type Cluster struct {
- ID string // cluster ID
- Memfs bool // use an an in-memory filesystem?
+ ID ClusterID // cluster ID
+ Memfs bool // use an in-memory filesystem?
+ Password string // randomly generated password for this cluster
- HostPort string // available after Ready() is done
-
- log zerolog.Logger
+ driver Driver
+ log zerolog.Logger
startOnce syncutil.Once
// started is closed when the cluster has been successfully started.
started chan struct{}
+ // cachedStatus is the cached cluster status; it should be accessed
+ // via status().
+ cachedStatus atomic.Pointer[ClusterStatus]
+
+ Roles EncoreRoles // set by Start
+
// Ctx is canceled when the cluster is being torn down.
Ctx context.Context
cancel func() // for canceling Ctx
- mu sync.Mutex
- dbs map[string]*DB // name -> db
+ mu sync.Mutex
+ dbs map[string]*DB // name -> db
+ isExternal func(name string) bool
+}
+
+func (c *Cluster) Stop() {
+ // no-op
}
// Ready returns a channel that is closed when the cluster is up and running.
@@ -46,10 +60,11 @@ func (c *Cluster) Ready() <-chan struct{} {
return c.started
}
-// Start creates the container if necessary and starts it.
+// Start creates the cluster if necessary and starts it.
// If the cluster is already running it does nothing.
-func (c *Cluster) Start(log runlog.Log) error {
- return c.startOnce.Do(func() (err error) {
+func (c *Cluster) Start(ctx context.Context, tracker *optracker.OpTracker) (*ClusterStatus, error) {
+ var status *ClusterStatus
+ err := c.startOnce.Do(func() (err error) {
c.log.Debug().Msg("starting cluster")
defer func() {
if err == nil {
@@ -60,162 +75,195 @@ func (c *Cluster) Start(log runlog.Log) error {
}
}()
- // Ensure the docker image exists first.
- if err := pullImage(log, dockerImage); err != nil {
- return fmt.Errorf("pull docker image %s: %v", dockerImage, err)
+ st, err := c.driver.CreateCluster(ctx, &CreateParams{
+ ClusterID: c.ID,
+ Memfs: c.Memfs,
+ Tracker: tracker,
+ }, c.log)
+ if err != nil {
+ return errors.WithStack(err)
}
+ status = st
+ c.cachedStatus.Store(st)
+ go c.pollStatus()
- ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
- defer cancel()
+ // Setup the roles
+ c.Roles, err = c.setupRoles(ctx, st)
- cname := containerName(c.ID)
- status, err := c.Status(ctx)
- if err != nil {
- c.log.Error().Err(err).Msg("failed to get container status")
- return err
- }
+ return err
+ })
- switch status.Status {
- case Running:
- c.HostPort = status.HostPort
- c.log.Debug().Str("hostport", c.HostPort).Msg("cluster already running")
- return nil
+ if err != nil {
+ return nil, errors.WithStack(err)
+ } else if status == nil {
+ // We've already set it up; query the current status
+ return c.Status(ctx)
+ }
+ return status, nil
+}
- case Stopped:
- c.log.Debug().Msg("cluster stopped, restarting")
- if out, err := exec.CommandContext(ctx, "docker", "start", cname).CombinedOutput(); err != nil {
- return fmt.Errorf("could not start sqldb container: %s (%v)", string(out), err)
- }
- // Grab the port
- status, err = c.Status(ctx)
- if err != nil {
- return err
- }
- c.HostPort = status.HostPort
- c.log.Debug().Str("hostport", c.HostPort).Msg("cluster started")
- return nil
-
- case NotFound:
- c.log.Debug().Msg("cluster not found, creating")
- args := []string{
- "run",
- "-d",
- "-p", "5432",
- "-e", "POSTGRES_USER=encore",
- "-e", "POSTGRES_PASSWORD=" + c.ID,
- "-e", "POSTGRES_DB=postgres",
- "--name", cname,
- }
- if c.Memfs {
- args = append(args,
- "--mount", "type=tmpfs,destination=/var/lib/postgresql/data",
- dockerImage,
- "-c", "fsync=off",
- )
- } else {
- args = append(args, dockerImage)
- }
+// setupRoles ensures the necessary database roles exist
+// for admin/write/read access.
+func (c *Cluster) setupRoles(ctx context.Context, st *ClusterStatus) (EncoreRoles, error) {
+ uri := st.ConnURI(st.Config.RootDatabase, st.Config.Superuser)
+ conn, err := pgx.Connect(ctx, uri)
+ if err != nil {
+ return nil, fmt.Errorf("connect: %v", err)
+ }
+ defer conn.Close(context.Background())
- cmd := exec.CommandContext(ctx, "docker", args...)
- if out, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf("could not start sql database as docker container: %s: %v", out, err)
+ roles, err := c.determineRoles(ctx, st, conn)
+ if err != nil {
+ return nil, fmt.Errorf("determine roles: %v", err)
+ }
+
+ for _, role := range roles {
+ sanitizedUsername := (pgx.Identifier{role.Username}).Sanitize()
+ c.log.Debug().Str("role", role.Username).Msg("creating role")
+ _, err := conn.Exec(ctx, `
+ CREATE USER `+sanitizedUsername+`
+ WITH LOGIN ENCRYPTED PASSWORD `+quoteString(role.Password)+`
+ `)
+ if err != nil {
+ var exists bool
+ err2 := conn.QueryRow(context.Background(), `
+ SELECT COALESCE(MAX(oid), 0) > 0 AS exists
+ FROM pg_roles
+ WHERE rolname = $1
+ `, role.Username).Scan(&exists)
+ if err2 != nil {
+ c.log.Error().Err(err2).Str("role", role.Username).Msg("unable to lookup role")
+ return nil, fmt.Errorf("get role %q: %v", role.Username, err2)
+ } else if !exists {
+ c.log.Error().Err(err).Str("role", role.Username).Msg("unable to create role")
+ return nil, fmt.Errorf("create role %q: %v", role.Username, err)
}
+ c.log.Debug().Str("role", role.Username).Msg("role already exists")
+ }
- // Now that the container is running, grab the host port.
- status, err := c.Status(ctx)
+ // Add cluster-level permissions.
+ switch role.Type {
+ case RoleAdmin:
+ // Grant admins the ability to create databases.
+ _, err := conn.Exec(ctx, `
+ ALTER USER `+sanitizedUsername+` CREATEDB CREATEROLE
+ `)
if err != nil {
- return err
+ c.log.Error().Err(err).Str("role", role.Username).Msg("unable to grant CREATEDB")
+ return nil, fmt.Errorf("grant CREATEDB to %q: %v", role.Username, err)
}
- c.HostPort = status.HostPort
- c.log.Debug().Str("hostport", c.HostPort).Msg("cluster created")
- return nil
-
- default:
- return fmt.Errorf("unknown cluster status %q", status.Status)
}
- })
+ }
+
+ return roles, nil
}
-// initDBs adds the databases from md to the cluster's database map.
-// It does not create or migrate them.
-func (c *Cluster) initDBs(md *meta.Data, reinit bool) {
- if md == nil {
- return
- }
+// determineRoles determines the roles to create based on the server version.
+func (c *Cluster) determineRoles(ctx context.Context, st *ClusterStatus, conn *pgx.Conn) (EncoreRoles, error) {
+ // We always support an admin role (PostgreSQL 11+)
+
+ // We support read/write roles on PostgreSQL 14+ only,
+ // as support for predefined roles was added then.
+ var supportsPredefinedRoles bool
+ {
+ var version string
+ if err := conn.QueryRow(ctx, "SHOW server_version").Scan(&version); err != nil {
+ return nil, fmt.Errorf("determine server version: %v", err)
+ }
+ c.log.Debug().Str("version", version).Msg("got postgres server version")
- // Create the databases we need in our cluster map.
- c.mu.Lock()
- for _, svc := range md.Svcs {
- if len(svc.Migrations) > 0 {
- db, ok := c.dbs[svc.Name]
- if ok && reinit {
- db.CloseConns()
- }
- if !ok || reinit {
- c.initDB(svc.Name)
- }
+ major, _, _ := strings.Cut(version, ".")
+ if n, err := strconv.Atoi(major); err != nil {
+ return nil, fmt.Errorf("determine server version: %v", err)
+ } else if n >= 14 {
+ supportsPredefinedRoles = true
}
}
- c.mu.Unlock()
+
+ // For legacy databases, just use the predefined admin role that we set up before.
+ roles := EncoreRoles{st.Config.Superuser}
+ if supportsPredefinedRoles {
+ // Otherwise if we support predefined roles, add more roles to use.
+ roles = append(roles,
+ Role{RoleAdmin, "encore-admin", "admin"},
+ Role{RoleWrite, "encore-write", "write"},
+ Role{RoleRead, "encore-read", "read"},
+ )
+ }
+ return roles, nil
}
// initDB initializes the database for svc and adds it to c.dbs.
// The cluster mutex must be held.
-func (c *Cluster) initDB(name string) *DB {
+func (c *Cluster) initDB(encoreName string) *DB {
+ driverName := encoreName
+ if !c.driver.Meta().ClusterIsolation {
+ driverName += fmt.Sprintf("-%s-%s", c.ID.NS.App.PlatformOrLocalID(), c.ID.Type)
+
+ // Add the namespace id, as long as it's not the default namespace
+ // (for backwards compatibility).
+ if c.ID.NS.Name != "default" {
+ driverName += "-" + string(c.ID.NS.ID)
+ }
+ }
+
dbCtx, cancel := context.WithCancel(c.Ctx)
db := &DB{
- Name: name,
- Cluster: c,
+ EncoreName: encoreName,
+ Cluster: c,
+ driverName: driverName,
+
+ // Use a template database when running tests.
+ template: c.ID.Type == Test,
Ctx: dbCtx,
cancel: cancel,
ready: make(chan struct{}),
- log: c.log.With().Str("db", name).Logger(),
+ log: c.log.With().Str("db", encoreName).Logger(),
}
- c.dbs[name] = db
+ c.dbs[encoreName] = db
return db
}
-// Create creates the given databases.
-func (c *Cluster) Create(ctx context.Context, appRoot string, md *meta.Data) error {
+// Setup sets up the given databases.
+func (c *Cluster) Setup(ctx context.Context, appRoot string, md *meta.Data) error {
c.log.Debug().Msg("creating cluster")
g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(50)
c.mu.Lock()
- for _, svc := range md.Svcs {
- if len(svc.Migrations) == 0 {
+
+ for _, dbMeta := range md.SqlDatabases {
+ dbMeta := dbMeta
+ db, ok := c.dbs[dbMeta.Name]
+ if c.isExternal(dbMeta.Name) {
continue
}
-
- svc := svc
- db, ok := c.dbs[svc.Name]
if !ok {
- c.mu.Unlock()
- return fmt.Errorf("database %s not initialized", svc.Name)
+ db = c.initDB(dbMeta.Name)
}
- g.Go(func() error { return db.Setup(ctx, appRoot, svc, false, false) })
+ g.Go(func() error { return db.Setup(ctx, appRoot, dbMeta, false, false) })
}
c.mu.Unlock()
return g.Wait()
}
-// CreateAndMigrate creates and migrates the given databases.
-func (c *Cluster) CreateAndMigrate(ctx context.Context, appRoot string, md *meta.Data) error {
+// SetupAndMigrate creates and migrates the given databases.
+func (c *Cluster) SetupAndMigrate(ctx context.Context, appRoot string, dbs []*meta.SQLDatabase) error {
c.log.Debug().Msg("creating and migrating cluster")
g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(50)
c.mu.Lock()
- for _, svc := range md.Svcs {
- if len(svc.Migrations) == 0 {
+ for _, dbMeta := range dbs {
+ if c.IsExternalDB(dbMeta.Name) {
continue
}
-
- svc := svc
- db, ok := c.dbs[svc.Name]
+ dbMeta := dbMeta
+ db, ok := c.dbs[dbMeta.Name]
if !ok {
- c.mu.Unlock()
- return fmt.Errorf("database %s not initialized", svc.Name)
+ db = c.initDB(dbMeta.Name)
}
- g.Go(func() error { return db.Setup(ctx, appRoot, svc, true, false) })
+ g.Go(func() error { return db.Setup(ctx, appRoot, dbMeta, true, false) })
}
c.mu.Unlock()
return g.Wait()
@@ -229,28 +277,43 @@ func (c *Cluster) GetDB(name string) (*DB, bool) {
return db, ok
}
-// Recreate recreates the databases for the given services.
-// If services is the nil slice it recreates all databases.
-func (c *Cluster) Recreate(ctx context.Context, appRoot string, services []string, md *meta.Data) error {
+func (c *Cluster) IsExternalDB(name string) bool {
+ if c.isExternal == nil {
+ return false
+ }
+ return c.isExternal(name)
+}
+
+// Recreate recreates the databases for the given database names.
+// If databaseNames is the nil slice it recreates all databases.
+func (c *Cluster) Recreate(ctx context.Context, appRoot string, databaseNames []string, md *meta.Data) error {
c.log.Debug().Msg("recreating cluster")
var filter map[string]bool
- if services != nil {
+ if databaseNames != nil {
filter = make(map[string]bool)
- for _, svc := range services {
- filter[svc] = true
+ for _, name := range databaseNames {
+ filter[name] = true
}
}
g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(50)
c.mu.Lock()
- for _, svc := range md.Svcs {
- svc := svc
- if len(svc.Migrations) > 0 && (filter == nil || filter[svc.Name]) {
- db, ok := c.dbs[svc.Name]
+ for _, dbMeta := range md.SqlDatabases {
+ dbMeta := dbMeta
+ if filter == nil || filter[dbMeta.Name] {
+ db, ok := c.dbs[dbMeta.Name]
+ if c.isExternal(dbMeta.Name) {
+ if filter[dbMeta.Name] {
+ c.mu.Unlock()
+ return fmt.Errorf("cannot reset %q: resetting external databases is disabled", dbMeta.Name)
+ }
+ continue
+ }
if !ok {
- db = c.initDB(svc.Name)
+ db = c.initDB(dbMeta.Name)
}
- g.Go(func() error { return db.Setup(ctx, appRoot, svc, true, true) })
+ g.Go(func() error { return db.Setup(ctx, appRoot, dbMeta, true, true) })
}
}
c.mu.Unlock()
@@ -259,85 +322,124 @@ func (c *Cluster) Recreate(ctx context.Context, appRoot string, services []strin
return err
}
-// Status reports the status of the cluster.
+// Status reports the cluster's status.
func (c *Cluster) Status(ctx context.Context) (*ClusterStatus, error) {
- cname := containerName(c.ID)
- out, err := exec.CommandContext(ctx, "docker", "container", "inspect", cname).CombinedOutput()
- if err == exec.ErrNotFound {
- return nil, errors.New("docker not found: is it installed and in your PATH?")
- } else if err != nil {
- // Docker returns a non-zero exit code if the container does not exist.
- // Try to tell this apart from an error by parsing the output.
- if bytes.Contains(out, []byte("No such container")) {
- return &ClusterStatus{Status: NotFound}, nil
- }
- return nil, fmt.Errorf("docker container inspect failed: %s (%v)", out, err)
+ if st := c.cachedStatus.Load(); st != nil {
+ return st, nil
}
+ return c.updateStatusFromDriver(ctx)
+}
- var resp []struct {
- Name string
- State struct {
- Running bool
- }
- NetworkSettings struct {
- Ports map[string][]struct {
- HostIP string
- HostPort string
- }
+func (c *Cluster) updateStatusFromDriver(ctx context.Context) (*ClusterStatus, error) {
+ st, err := c.driver.ClusterStatus(ctx, c.ID)
+ if err == nil {
+ c.cachedStatus.Store(st)
+ }
+ return st, err
+}
+
+// pollStatus polls the driver for status changes.
+func (c *Cluster) pollStatus() {
+ ch := time.NewTicker(10 * time.Second)
+ defer ch.Stop()
+
+ for {
+ select {
+ case <-ch.C:
+ ctx, cancel := context.WithTimeout(c.Ctx, 5*time.Second)
+ _, _ = c.updateStatusFromDriver(ctx)
+ cancel()
+
+ case <-c.Ctx.Done():
+ return
}
}
- if err := json.Unmarshal(out, &resp); err != nil {
- return nil, fmt.Errorf("parse `docker container inspect` response: %v", err)
+}
+
+// Info reports information about a cluster.
+func (c *Cluster) Info(ctx context.Context) (*ClusterInfo, error) {
+ st, err := c.Start(ctx, nil)
+ if err != nil {
+ return nil, err
}
- for _, c := range resp {
- if c.Name == "/"+cname {
- status := &ClusterStatus{Status: Stopped}
- if c.State.Running {
- status.Status = Running
- }
- ports := c.NetworkSettings.Ports["5432/tcp"]
- if len(ports) > 0 {
- status.HostPort = ports[0].HostIP + ":" + ports[0].HostPort
- }
- return status, nil
+
+ info := &ClusterInfo{ClusterStatus: st}
+ info.Encore = c.Roles
+ return info, nil
+}
+
+// ClusterInfo returns information about a cluster.
+type ClusterInfo struct {
+ *ClusterStatus
+
+ // Encore contains the roles to use to connect for an Encore app.
+ // It is set if and only if the cluster is running.
+ Encore EncoreRoles
+}
+
+// ConnURI reports the connection URI to connect to the given database
+// in the cluster, authenticating with the given role.
+func (s *ClusterStatus) ConnURI(database string, r Role) string {
+ uri := fmt.Sprintf("user=%s password=%s dbname=%s", r.Username, r.Password, database)
+
+ // Handle different ways of expressing the host
+ cfg := s.Config
+ if strings.HasPrefix(cfg.Host, "/") {
+ uri += " host=" + cfg.Host // unix socket
+ } else if host, port, err := net.SplitHostPort(cfg.Host); err == nil {
+ uri += fmt.Sprintf(" host=%s port=%s", host, port) // host:port
+ } else {
+ uri += " host=" + cfg.Host // hostname
+ }
+
+ return uri
+}
+
+// EncoreRoles describes the credentials to use when connecting
+// to the cluster as an Encore user.
+type EncoreRoles []Role
+
+func (roles EncoreRoles) Superuser() (Role, bool) { return roles.find(RoleSuperuser) }
+func (roles EncoreRoles) Admin() (Role, bool) { return roles.find(RoleAdmin) }
+func (roles EncoreRoles) Write() (Role, bool) { return roles.find(RoleWrite) }
+func (roles EncoreRoles) Read() (Role, bool) { return roles.find(RoleRead) }
+
+func (roles EncoreRoles) First(typs ...RoleType) (Role, bool) {
+ for _, typ := range typs {
+ if r, ok := roles.find(typ); ok {
+ return r, true
+ }
+ }
+ return Role{}, false
+}
+
+func (roles EncoreRoles) find(typ RoleType) (Role, bool) {
+ for _, r := range roles {
+ if r.Type == typ {
+ return r, true
}
}
- return &ClusterStatus{Status: NotFound}, nil
+ return Role{}, false
}
-// ContainerStatus represents the status of a container.
-type ContainerStatus string
+type RoleType string
+
+func (r RoleType) String() string { return string(r) }
const (
- // Running indicates the cluster container is running.
- Running ContainerStatus = "running"
- // Stopped indicates the container cluster exists but is not running.
- Stopped ContainerStatus = "stopped"
- // NotFound indicates the container cluster does not exist.
- NotFound ContainerStatus = "notfound"
+ RoleSuperuser RoleType = "superuser"
+ RoleAdmin RoleType = "admin"
+ RoleWrite RoleType = "write"
+ RoleRead RoleType = "read"
)
-// ClusterStatus rerepsents the status of a database cluster.
-type ClusterStatus struct {
- // Status is the status of the underlying container.
- Status ContainerStatus
- // HostPort is the host and port for connecting to the database.
- // It is only set when Status == Running.
- HostPort string
+type Role struct {
+ Type RoleType
+ Username string
+ Password string
}
-// containerName computes the container name for a given clusterID.
-func containerName(clusterID string) string {
- return "sqldb-" + clusterID
-}
-
-func pullImage(log runlog.Log, image string) error {
- if err := exec.Command("docker", "image", "inspect", image).Run(); err == nil {
- return nil
- }
- fmt.Fprintf(log.Stderr(), "Docker image %q does not exist locally, pulling...\n", image)
- cmd := exec.Command("docker", "pull", image)
- cmd.Stdout = log.Stdout()
- cmd.Stderr = log.Stderr()
- return cmd.Run()
+// quoteString quotes a string for use in SQL.
+func quoteString(str string) string {
+ return "'" + strings.ReplaceAll(str, "'", "''") + "'"
}
diff --git a/cli/daemon/sqldb/cluster_test.go b/cli/daemon/sqldb/cluster_test.go
deleted file mode 100644
index a0ce243761..0000000000
--- a/cli/daemon/sqldb/cluster_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package sqldb
-
-import (
- "context"
- "crypto/rand"
- "encoding/base32"
- "os/exec"
- "testing"
-
- "encr.dev/cli/daemon/internal/runlog"
- qt "github.com/frankban/quicktest"
-)
-
-func TestClusterManager_StartDelete(t *testing.T) {
- if testing.Short() {
- t.Log("skipping cluster test when running in short mode")
- t.SkipNow()
- }
- c := qt.New(t)
- cm := NewClusterManager()
- ctx := context.Background()
- clusterID := genClusterID(c)
- cl := cm.Init(ctx, &InitParams{ClusterID: clusterID})
- err := cl.Start(runlog.OS())
- c.Assert(err, qt.IsNil)
- c.Assert(cl, qt.Not(qt.IsNil))
-
- cname := containerName(clusterID)
- err = exec.Command("docker", "container", "inspect", cname).Run()
- c.Assert(err, qt.IsNil)
-
- err = cm.Delete(ctx, clusterID)
- c.Assert(err, qt.IsNil)
- out, err := exec.Command("docker", "container", "inspect", cname).CombinedOutput()
- c.Assert(err, qt.Not(qt.IsNil))
- c.Assert(string(out), qt.Contains, "No such container")
-}
-
-func TestClusterManager_Get(t *testing.T) {
- if testing.Short() {
- t.Log("skipping cluster test when running in short mode")
- t.SkipNow()
- }
-
- c := qt.New(t)
- cm := NewClusterManager()
- cl := testCluster(c, cm)
- cluster, ok := cm.Get(cl.ID)
- c.Assert(ok, qt.IsTrue)
- c.Assert(cluster, qt.Equals, cl)
- c.Assert(cluster, qt.Not(qt.IsNil))
- c.Assert(cluster.ID, qt.Equals, cl.ID)
-}
-
-func testCluster(c *qt.C, cm *ClusterManager) *Cluster {
- ctx := context.Background()
- clusterID := genClusterID(c)
- cl := cm.Init(ctx, &InitParams{ClusterID: clusterID})
- err := cl.Start(runlog.OS())
- c.Assert(err, qt.IsNil)
- c.Assert(cl, qt.Not(qt.IsNil))
- c.Cleanup(func() {
- err := cm.Delete(context.Background(), clusterID)
- c.Assert(err, qt.IsNil)
- })
- return cl
-}
-
-var encoding = base32.NewEncoding("23456789abcdefghikmnopqrstuvwxyz").WithPadding(base32.NoPadding)
-
-func genClusterID(c *qt.C) string {
- var data [3]byte
- _, err := rand.Read(data[:])
- c.Assert(err, qt.IsNil)
- return "sqldb-internal-test-" + encoding.EncodeToString(data[:])
-}
diff --git a/cli/daemon/sqldb/db.go b/cli/daemon/sqldb/db.go
index 392254c6f4..cf4b6e60e8 100644
--- a/cli/daemon/sqldb/db.go
+++ b/cli/daemon/sqldb/db.go
@@ -4,20 +4,30 @@ import (
"context"
"database/sql"
"fmt"
+ "io/fs"
+ "path/filepath"
"sync"
"time"
- meta "encr.dev/proto/encore/parser/meta/v1"
+ "github.com/cockroachdb/errors"
"github.com/golang-migrate/migrate/v4"
+ "github.com/golang-migrate/migrate/v4/database"
"github.com/golang-migrate/migrate/v4/database/postgres"
- "github.com/jackc/pgx/v4"
+ "github.com/golang-migrate/migrate/v4/source"
+ "github.com/jackc/pgx/v5"
"github.com/rs/zerolog"
+
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/option"
+ meta "encr.dev/proto/encore/parser/meta/v1"
)
// DB represents a single database instance within a cluster.
type DB struct {
- Name string // database name
- Cluster *Cluster
+ EncoreName string
+ Cluster *Cluster
+
+ driverName string
// Ctx is canceled when the database is being torn down.
Ctx context.Context
@@ -31,16 +41,32 @@ type DB struct {
migrated bool
+ // template indicates the database is backed by a template database.
+ template bool
+
log zerolog.Logger
}
+// ApplicationCloudName reports the "cloud name" of the application-facing database.
+func (db *DB) ApplicationCloudName() string {
+ return db.driverName
+}
+
+// TemplateCloudName reports the "cloud name" of the template database, if any.
+func (db *DB) TemplateCloudName() option.Option[string] {
+ if db.template {
+ return option.Some(db.driverName + "_template")
+ }
+ return option.None[string]()
+}
+
// Ready returns a channel that is closed when the database is up and running.
func (db *DB) Ready() <-chan struct{} {
return db.ready
}
// Setup sets up the database, (re)creating it if necessary and running schema migrations.
-func (db *DB) Setup(ctx context.Context, appRoot string, svc *meta.Service, migrate, recreate bool) (err error) {
+func (db *DB) Setup(ctx context.Context, appRoot string, dbMeta *meta.SQLDatabase, migrate, recreate bool) (err error) {
db.log.Debug().Msg("setting up database")
db.setupMu.Lock()
defer db.setupMu.Unlock()
@@ -57,43 +83,89 @@ func (db *DB) Setup(ctx context.Context, appRoot string, svc *meta.Service, migr
}()
if recreate {
- if err := db.Drop(ctx); err != nil {
- return fmt.Errorf("drop db %s: %v", db.Name, err)
+ if err := db.drop(ctx); err != nil {
+ return err
}
}
- if err := db.Create(ctx); err != nil {
- return fmt.Errorf("create db %s: %v", db.Name, err)
- }
- if migrate || recreate || !db.migrated {
- if err := db.Migrate(ctx, appRoot, svc); err != nil {
- // Only report an error if we asked to migrate or recreate.
- // Otherwise we might fail to open a database shell when there
- // is a migration issue.
- if migrate || recreate {
- return fmt.Errorf("migrate db %s: %v", db.Name, err)
+
+ setupDB := func(cloudName string) error {
+ if err := db.doCreate(ctx, cloudName, option.None[string]()); err != nil {
+ return errors.Wrapf(err, "create db %s: %v", cloudName, err)
+ }
+
+ if err := db.ensureRoles(ctx, cloudName, db.Cluster.Roles...); err != nil {
+ return fmt.Errorf("ensure db roles %s: %v", cloudName, err)
+ }
+
+ if migrate || recreate || !db.migrated {
+ if err := db.doMigrate(ctx, cloudName, appRoot, dbMeta); err != nil {
+ // Only report an error if we asked to migrate or recreate.
+ // Otherwise we might fail to open a database shell when there
+ // is a migration issue.
+ if migrate || recreate {
+ return fmt.Errorf("migrate db %s: %v", cloudName, err)
+ }
}
}
+ return nil
+ }
+
+ // First set up the database with the application name.
+ if err := setupDB(db.ApplicationCloudName()); err != nil {
+ return err
}
+
+ if tmplName, ok := db.TemplateCloudName().Get(); ok {
+ // If we want a template database, rename the application database to the template name.
+ // We do it this way in case the migrations assume the database is named according to the application name.
+
+ // Terminate the connections to the template database to prevent "database is being accessed by other users" errors.
+ _ = db.terminateConnectionsToDB(ctx, db.ApplicationCloudName())
+ if err := db.renameDB(ctx, db.ApplicationCloudName(), tmplName); err != nil {
+ return fmt.Errorf("rename db %s to %s: %v", db.ApplicationCloudName(), tmplName, err)
+ }
+
+ // Then create the application database based on the template
+ if err := db.doCreate(ctx, db.ApplicationCloudName(), option.Some(tmplName)); err != nil {
+ return errors.Wrapf(err, "create db %s: %v", db.ApplicationCloudName(), err)
+ }
+
+ // Ensure the application database has the right roles, too.
+ if err := db.ensureRoles(ctx, db.ApplicationCloudName(), db.Cluster.Roles...); err != nil {
+ return fmt.Errorf("ensure db roles %s: %v", db.ApplicationCloudName(), err)
+ }
+ }
+
return nil
}
-// Create creates the database in the cluster if it does not already exist.
-// It reports whether the database was initialized for the first time
-// in this process.
-func (db *DB) Create(ctx context.Context) error {
- adm, err := db.connectAdminDB(ctx)
+func (db *DB) doCreate(ctx context.Context, cloudName string, template option.Option[string]) error {
+ adm, err := db.connectSuperuser(ctx)
if err != nil {
return err
}
- defer adm.Close(context.Background())
+ defer func() { _ = adm.Close(context.Background()) }()
// Does it already exist?
var dummy int
- err = adm.QueryRow(ctx, "SELECT 1 FROM pg_database WHERE datname = $1", db.Name).Scan(&dummy)
- if err == pgx.ErrNoRows {
+ err = adm.QueryRow(ctx, "SELECT 1 FROM pg_database WHERE datname = $1", cloudName).Scan(&dummy)
+ owner, ok := db.Cluster.Roles.First(RoleAdmin, RoleSuperuser)
+ if !ok {
+ return errors.New("unable to find admin or superuser roles")
+ }
+
+ if errors.Is(err, pgx.ErrNoRows) {
db.log.Debug().Msg("creating database")
- name := (pgx.Identifier{db.Name}).Sanitize() // sanitize database name, to be safe
- _, err = adm.Exec(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER encore;", name))
+ // Sanitize names since this query does not support query params
+ dbName := (pgx.Identifier{cloudName}).Sanitize()
+ ownerName := (pgx.Identifier{owner.Username}).Sanitize()
+
+ // Use the template if one is provided.
+ var tmplSnippet string
+ if tmplName, ok := template.Get(); ok {
+ tmplSnippet = fmt.Sprintf("WITH TEMPLATE %s", (pgx.Identifier{tmplName}).Sanitize())
+ }
+ _, err = adm.Exec(ctx, fmt.Sprintf("CREATE DATABASE %s %s OWNER %s;", dbName, tmplSnippet, ownerName))
}
if err != nil {
db.log.Error().Err(err).Msg("failed to create database")
@@ -101,8 +173,93 @@ func (db *DB) Create(ctx context.Context) error {
return err
}
+func (db *DB) renameDB(ctx context.Context, from, to string) error {
+ adm, err := db.connectSuperuser(ctx)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = adm.Close(context.Background()) }()
+
+ _, err = adm.Exec(ctx, fmt.Sprintf("ALTER DATABASE %s RENAME TO %s",
+ (pgx.Identifier{from}).Sanitize(),
+ (pgx.Identifier{to}).Sanitize(),
+ ))
+ return err
+}
+
+// ensureRoles ensures the roles have been granted access to this database.
+func (db *DB) ensureRoles(ctx context.Context, cloudName string, roles ...Role) error {
+ adm, err := db.connectSuperuser(ctx)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = adm.Close(context.Background()) }()
+
+ db.log.Debug().Msg("revoking public access")
+ safeDBName := (pgx.Identifier{cloudName}).Sanitize()
+ _, err = adm.Exec(ctx, "REVOKE ALL ON DATABASE "+safeDBName+" FROM public")
+ if err != nil {
+ return fmt.Errorf("revoke public: %v", err)
+ }
+
+ for _, role := range roles {
+ var stmt string
+ safeRoleName := (pgx.Identifier{role.Username}).Sanitize()
+ switch role.Type {
+ case RoleSuperuser:
+ // Already granted; nothing to do
+ continue
+ case RoleAdmin:
+ stmt = fmt.Sprintf("GRANT ALL ON DATABASE %s TO %s;", safeDBName, safeRoleName)
+ case RoleWrite:
+ stmt = fmt.Sprintf(`
+ GRANT TEMP, CONNECT ON DATABASE %s TO %s;
+ GRANT pg_read_all_data TO %s;
+ GRANT pg_write_all_data TO %s;
+ `, safeDBName, safeRoleName, safeRoleName, safeRoleName)
+ case RoleRead:
+ stmt = fmt.Sprintf(`
+ GRANT TEMP, CONNECT ON DATABASE %s TO %s;
+ GRANT pg_read_all_data TO %s;
+ `, safeDBName, safeRoleName, safeRoleName)
+ default:
+ return fmt.Errorf("unknown role type %q", role.Type)
+ }
+
+ db.log.Debug().Str("role", role.Username).Str("db", cloudName).Msg("granting access to role")
+
+ // We've observed race conditions in Postgres to grant access. Retry a few times.
+ {
+ var err error
+ for i := 0; i < 5; i++ {
+ _, err = adm.Exec(ctx, stmt)
+ if err == nil {
+ break
+ }
+ db.log.Debug().Str("role", role.Username).Str("db", cloudName).Err(err).Msg("error granting role, retrying")
+ time.Sleep(250 * time.Millisecond)
+ }
+ if err != nil {
+ return fmt.Errorf("grant %s role %s: %v", role.Type, role.Username, err)
+ }
+ }
+
+ db.log.Debug().Str("role", role.Username).Str("db", cloudName).Msg("successfully granted access")
+ }
+ return nil
+}
+
// Migrate migrates the database.
-func (db *DB) Migrate(ctx context.Context, appRoot string, svc *meta.Service) (err error) {
+func (db *DB) doMigrate(ctx context.Context, cloudName, appRoot string, dbMeta *meta.SQLDatabase) (err error) {
+ if db.Cluster.ID.Type == Shadow {
+ db.log.Debug().Msg("not applying migrations to shadow cluster")
+ return nil
+ }
+ if len(dbMeta.Migrations) == 0 || dbMeta.MigrationRelPath == nil {
+ db.log.Debug().Msg("no database migrations to run, skipping")
+ return nil
+ }
+
db.log.Debug().Msg("running database migrations")
defer func() {
if err != nil {
@@ -113,55 +270,177 @@ func (db *DB) Migrate(ctx context.Context, appRoot string, svc *meta.Service) (e
}
}()
- uri := fmt.Sprintf("postgresql://encore:%s@%s/%s?sslmode=disable", db.Cluster.ID, db.Cluster.HostPort, db.Name)
- conn, err := sql.Open("pgx", uri)
+ info, err := db.Cluster.Info(ctx)
if err != nil {
return err
+ } else if info.Status != Running {
+ return errors.New("cluster not running")
}
- defer conn.Close()
- instance, err := postgres.WithInstance(conn, &postgres.Config{})
+ admin, ok := info.Encore.First(RoleAdmin, RoleSuperuser)
+ if !ok {
+ return errors.New("unable to find superuser or admin roles")
+ }
+ uri := info.ConnURI(cloudName, admin)
+ db.log.Debug().Str("uri", uri).Msg("running migrations")
+ pool, err := sql.Open("pgx", uri)
if err != nil {
return err
}
+ defer fns.CloseIgnore(pool)
- s := &src{
- appRoot: appRoot,
- svcRelPath: svc.RelPath,
- migrations: svc.Migrations,
- }
- m, err := migrate.NewWithInstance("src", s, db.Name, instance)
+ path := filepath.Join(appRoot, *dbMeta.MigrationRelPath)
+ mdSrc := NewMetadataSource(NewOsMigrationReader(path), dbMeta.Migrations)
+ conn, err := pool.Conn(ctx)
if err != nil {
- return err
+ return errors.Wrap(err, "failed to connect to postgres")
}
+ err = RunMigration(ctx, cloudName, dbMeta.AllowNonSequentialMigrations, conn, mdSrc)
- if err := m.Up(); err == migrate.ErrNoChange {
- db.log.Debug().Msg("database already up to date")
+ // If we have removed a migration that failed to apply we can get an ErrNoChange error
+ // after forcing the migration down to the previous version.
+ if errors.Is(err, migrate.ErrNoChange) {
+ db.log.Info().Msg("database already up to date")
return nil
} else if err != nil {
+ return fmt.Errorf("could not migrate database %s: %v", cloudName, err)
+ }
+ db.log.Info().Msg("migration completed")
+ return nil
+}
+
+func (db *DB) ListAppliedMigrations(ctx context.Context) (map[uint64]bool, error) {
+ conn, err := db.connectToDB(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer fns.CloseIgnore(conn)
+ return LoadAppliedVersions(ctx, conn, "public", "schema_migrations")
+}
+
+func RunMigration(ctx context.Context, dbName string, allowNonSeq bool, conn *sql.Conn, mdSrc *MetadataSource) (err error) {
+ var (
+ dbDriver database.Driver
+ srcDriver source.Driver
+ )
+ if allowNonSeq {
+ dbDriver, srcDriver, err = NonSequentialMigrator(ctx, conn, mdSrc)
+ if err != nil {
+ return errors.Wrap(err, "failed to connect to postgres")
+ }
+ } else {
+ dbDriver, err = postgres.WithConnection(ctx, conn, &postgres.Config{})
+ if err != nil {
+ return errors.Wrap(err, "failed to connect to postgres")
+ }
+ srcDriver = mdSrc
+ }
+
+ curVersion, _, err := dbDriver.Version()
+ if err != nil {
+ return errors.Wrap(err, "failed to get current version")
+ } else if curVersion < -1 {
+ return errors.Newf("invalid current version (%d) for db %s", curVersion, dbName)
+ }
+
+ m, err := migrate.NewWithInstance("src", srcDriver, "postgres", dbDriver)
+ if err != nil {
+ return errors.Wrap(err, "failed to create migration instance")
+ }
+
+ err = m.Up()
+ if errors.Is(err, migrate.ErrNoChange) {
+ return err
+ }
+
+ // If we have a dirty migration, reset the dirty flag and try again.
+ // This is safe since all migrations run inside transactions.
+ var dirty migrate.ErrDirty
+ if errors.As(err, &dirty) {
+ // Find the version that preceded the dirty version so
+ // we can force the migration to that version and then
+ // re-apply the migration.
+ var prevVer uint
+ prevVer, err = srcDriver.Prev(uint(dirty.Version))
+ targetVer := int(prevVer)
+ if errors.Is(err, fs.ErrNotExist) {
+ // If Prev returns ErrNotExist, the original migration might
+ // have been deleted. In this case, we'll need to search for
+ // the version that is the closest lower version starting at the
+ // first version.
+ targetVer, err = findClosestLowerVersion(srcDriver.First, dirty.Version, srcDriver.Next)
+ if err != nil {
+ return errors.Wrapf(err, "could not automatically reset the schema_migrations "+
+ "dirty flag for database %s. Please reset it manually by connecting "+
+ "to the database modify the schema_migrations table", dbName)
+ }
+ } else if err != nil {
+ return errors.Wrap(err, "failed to find previous version")
+ }
+
+ if err = m.Force(targetVer); err == nil {
+ err = m.Up()
+ }
+ }
+ return errors.Wrap(err, "failed to migrate database")
+}
+
+func findClosestLowerVersion(first func() (uint, error), dirtyVer int, next func(i uint) (uint, error)) (int, error) {
+ firstVer, err := first()
+ // If the first version doesn't exist, we can't reset the dirty flag
+ // and we'll need to return an error.
+ if err != nil {
+ return 0, errors.Wrapf(err, "failed to find first version")
+ }
+ // otherwise we'll need to find the version that is the closest lower version
+ rtn := database.NilVersion
+ for nextVer := firstVer; err == nil && nextVer < uint(dirtyVer); nextVer, err = next(nextVer) {
+ rtn = int(nextVer)
+ }
+ return rtn, nil
+}
+
+func (db *DB) drop(ctx context.Context) error {
+ if err := db.doDrop(ctx, db.ApplicationCloudName()); err != nil {
+ return errors.Wrapf(err, "drop database %s", db.ApplicationCloudName())
+ }
+ if name, ok := db.TemplateCloudName().Get(); ok {
+ if err := db.doDrop(ctx, name); err != nil {
+ return errors.Wrapf(err, "drop database %s", name)
+ }
+ }
+ return nil
+}
+
+func (db *DB) terminateConnectionsToDB(ctx context.Context, cloudName string) error {
+ adm, err := db.connectSuperuser(ctx)
+ if err != nil {
return err
}
+ defer func() { _ = adm.Close(context.Background()) }()
+
+ // Drop all connections to prevent "database is being accessed by other users" errors.
+ _, _ = adm.Exec(ctx, "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1", cloudName)
return nil
}
-// Drop drops the database in the cluster if it exists.
-func (db *DB) Drop(ctx context.Context) error {
- adm, err := db.connectAdminDB(ctx)
+func (db *DB) doDrop(ctx context.Context, cloudName string) error {
+ adm, err := db.connectSuperuser(ctx)
if err != nil {
return err
}
- defer adm.Close(context.Background())
+ defer func() { _ = adm.Close(context.Background()) }()
var dummy int
- err = adm.QueryRow(ctx, "SELECT 1 FROM pg_database WHERE datname = $1", db.Name).Scan(&dummy)
+ err = adm.QueryRow(ctx, "SELECT 1 FROM pg_database WHERE datname = $1", cloudName).Scan(&dummy)
if err == nil {
// Drop all connections to prevent "database is being accessed by other users" errors.
- adm.Exec(ctx, "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1", db.Name)
+ _, _ = adm.Exec(ctx, "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1", cloudName)
- name := (pgx.Identifier{db.Name}).Sanitize() // sanitize database name, to be safe
+ name := (pgx.Identifier{cloudName}).Sanitize() // sanitize database name, to be safe
_, err = adm.Exec(ctx, fmt.Sprintf("DROP DATABASE %s;", name))
db.log.Debug().Err(err).Msgf("dropped database")
- } else if err == pgx.ErrNoRows {
+ } else if errors.Is(err, pgx.ErrNoRows) {
return nil
}
@@ -177,9 +456,9 @@ func (db *DB) CloseConns() {
db.cancel()
}
-// connectAdminDB creates a connection to the admin database for the cluster.
+// connectSuperuser creates a superuser connection to the root database for the cluster.
// On success the returned conn must be closed by the caller.
-func (db *DB) connectAdminDB(ctx context.Context) (*pgx.Conn, error) {
+func (db *DB) connectSuperuser(ctx context.Context) (*pgx.Conn, error) {
// Wait for the cluster to be setup
select {
case <-ctx.Done():
@@ -187,27 +466,52 @@ func (db *DB) connectAdminDB(ctx context.Context) (*pgx.Conn, error) {
case <-db.Cluster.started:
}
- hostPort := db.Cluster.HostPort
- if hostPort == "" {
- return nil, fmt.Errorf("internal error: missing HostPort for cluster %s", db.Cluster.ID)
+ info, err := db.Cluster.Info(ctx)
+ if err != nil {
+ return nil, err
+ } else if info.Status != Running {
+ return nil, fmt.Errorf("cluster not running")
}
+ uri := info.ConnURI(info.Config.RootDatabase, info.Config.Superuser)
+
// Wait for the connection to be established; this might take a little bit
// when we're racing with spinning up a Docker container.
- var err error
for i := 0; i < 40; i++ {
var conn *pgx.Conn
- conn, err = pgx.Connect(ctx, "postgresql://encore:"+db.Cluster.ID+"@"+hostPort+"/postgres?sslmode=disable")
+ conn, err = pgx.Connect(ctx, uri)
if err == nil {
return conn, nil
} else if ctx.Err() != nil {
// We'll never succeed once the context has been canceled.
// Give up straight away.
- db.log.Debug().Err(err).Msgf("failed to connect to admin db")
+ db.log.Debug().Err(err).Msgf("failed to connect to superuser db")
return nil, err
}
time.Sleep(250 * time.Millisecond)
}
db.log.Debug().Err(err).Msgf("failed to connect to admin db")
- return nil, fmt.Errorf("failed to connect to admin database: %v", err)
+ return nil, fmt.Errorf("failed to connect to superuser database: %v", err)
+}
+
+// Connects as a superuser or admin to the database. Fails fast if the cluster
+// is not running yet.
+// On success the returned conn must be closed by the caller.
+func (db *DB) connectToDB(ctx context.Context) (*sql.Conn, error) {
+ info, err := db.Cluster.Info(ctx)
+ if err != nil {
+ return nil, err
+ }
+ uri := info.ConnURI(db.EncoreName, info.Config.Superuser)
+ pool, err := sql.Open("pgx", uri)
+ if err != nil {
+ return nil, err
+ }
+ defer fns.CloseIgnore(pool)
+
+ conn, err := pool.Conn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
}
diff --git a/cli/daemon/sqldb/db_test.go b/cli/daemon/sqldb/db_test.go
new file mode 100644
index 0000000000..4ed7449c72
--- /dev/null
+++ b/cli/daemon/sqldb/db_test.go
@@ -0,0 +1,73 @@
+package sqldb
+
+import (
+ "io/fs"
+ "testing"
+
+ qt "github.com/frankban/quicktest"
+ _ "github.com/golang-migrate/migrate/v4/source/file" // for running migrations from the filesystem
+)
+
+func TestFindClosestVersion(t *testing.T) {
+ c := qt.New(t)
+ testCases := map[string]struct {
+ versions []uint
+ dirty int
+ expected int
+ expectedErr bool
+ }{
+ "first": {
+ versions: []uint{1, 2, 3},
+ dirty: 1,
+ expected: -1,
+ },
+ "middle": {
+ versions: []uint{1, 2, 3},
+ dirty: 2,
+ expected: 1,
+ },
+ "last": {
+ versions: []uint{1, 2, 3},
+ dirty: 3,
+ expected: 2,
+ },
+ "deleted": {
+ versions: []uint{1, 2, 4},
+ dirty: 3,
+ expected: 2,
+ },
+ "deleted_first": {
+ versions: []uint{2, 3, 4},
+ dirty: 1,
+ expected: -1,
+ },
+ "empty": {
+ dirty: 5,
+ expectedErr: true,
+ },
+ }
+
+ for name, tc := range testCases {
+ c.Run(name, func(c *qt.C) {
+ result, err := findClosestLowerVersion(func() (uint, error) {
+ if len(tc.versions) == 0 {
+ return 0, fs.ErrNotExist
+ }
+ return tc.versions[0], nil
+ }, tc.dirty, func(version uint) (uint, error) {
+ for _, v := range tc.versions {
+ if v > version {
+ return v, nil
+ }
+ }
+ return 0, fs.ErrNotExist
+ })
+ if tc.expectedErr {
+ c.Assert(err, qt.IsNotNil)
+ } else {
+ c.Assert(err, qt.IsNil)
+ c.Assert(result, qt.Equals, tc.expected)
+ }
+ })
+ }
+}
diff --git a/cli/daemon/sqldb/docker/docker.go b/cli/daemon/sqldb/docker/docker.go
new file mode 100644
index 0000000000..2ffa8c56af
--- /dev/null
+++ b/cli/daemon/sqldb/docker/docker.go
@@ -0,0 +1,403 @@
+package docker
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/cockroachdb/errors"
+ "github.com/jackc/pgx/v5"
+ "github.com/rs/zerolog"
+
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/sqldb"
+ "encr.dev/pkg/idents"
+)
+
+type Driver struct{}
+
+var _ sqldb.Driver = (*Driver)(nil)
+
+const (
+ DefaultSuperuserUsername = "postgres"
+ DefaultSuperuserPassword = "postgres"
+ DefaultRootDatabase = "postgres"
+ defaultDataDir = "/var/lib/postgresql/data"
+)
+
+func (d *Driver) CreateCluster(ctx context.Context, p *sqldb.CreateParams, log zerolog.Logger) (status *sqldb.ClusterStatus, err error) {
+ // Ensure the docker image exists first.
+ {
+ checkExistsCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+ if ok, err := ImageExists(checkExistsCtx); err != nil {
+ return nil, errors.Wrap(err, "check docker image")
+ } else if !ok {
+ log.Debug().Msg("PostgreSQL image does not exist, pulling")
+ pullOp := p.Tracker.Add("Pulling PostgreSQL docker image", time.Now())
+ if err := PullImage(context.Background()); err != nil {
+ log.Error().Err(err).Msg("failed to pull PostgreSQL image")
+ p.Tracker.Fail(pullOp, err)
+ return nil, errors.Wrap(err, "pull docker image")
+ } else {
+ p.Tracker.Done(pullOp, 0)
+ log.Info().Msg("successfully pulled sqldb image")
+ }
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+
+ // If we return with a connection, wait until we can connect.
+ defer func() {
+ if err != nil {
+ return
+ }
+ // Wait for the database to come up; this might take a little bit
+ // when we're racing with spinning up a Docker container.
+ uri := status.ConnURI(status.Config.RootDatabase, status.Config.Superuser)
+
+ const sleepTime = 250 * time.Millisecond
+ const maxLoops = (30 * time.Second) / sleepTime
+ for i := 0; i < int(maxLoops); i++ {
+ var conn *pgx.Conn
+ connCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ conn, err = pgx.Connect(connCtx, uri)
+ cancel()
+
+ if err == nil {
+ _ = conn.Close(ctx)
+ return
+ } else if ctx.Err() != nil {
+ // We'll never succeed once the context has been canceled.
+ // Give up straight away.
+ log.Debug().Err(err).Msgf("failed to connect to db")
+ err = errors.Wrap(err, "database did not come up")
+ } else if errors.Is(err, io.ErrUnexpectedEOF) {
+ // This is a transient error that can happen when the database first initialises
+ err = errors.Wrap(err, "database is not ready yet")
+ } else {
+ err = errors.WithStack(err)
+ }
+ time.Sleep(250 * time.Millisecond)
+ }
+ }()
+
+ cid := p.ClusterID
+ cnames := containerNames(cid)
+ status, existingContainerName, err := d.clusterStatus(ctx, cid)
+ if err != nil {
+ log.Error().Err(err).Msg("failed to get container status")
+ return nil, errors.WithStack(err)
+ }
+
+ // waitForPort waits for the port to become available before returning.
+ waitForPort := func() (*sqldb.ClusterStatus, error) {
+ for i := 0; i < 20; i++ {
+ status, err = d.ClusterStatus(ctx, cid)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to wait for port")
+ }
+ if status.Config.Host != "" {
+ log.Debug().Str("hostport", status.Config.Host).Msg("cluster started")
+ return status, nil
+ }
+ time.Sleep(500 * time.Millisecond)
+ }
+ return nil, errors.New("timed out waiting for cluster to start")
+ }
+
+ switch status.Status {
+ case sqldb.Running:
+ log.Debug().Str("hostport", status.Config.Host).Msg("cluster already running")
+ return status, nil
+
+ case sqldb.Stopped:
+ log.Debug().Msg("cluster stopped, restarting")
+
+ if out, err := exec.CommandContext(ctx, "docker", "start", existingContainerName).CombinedOutput(); err != nil {
+ return nil, errors.Wrapf(err, "could not start sqldb container: %s", string(out))
+ }
+ return waitForPort()
+
+ case sqldb.NotFound:
+ log.Debug().Msg("cluster not found, creating")
+ args := []string{
+ "run",
+ "-d",
+ "-p", "5432",
+ "--shm-size=1gb",
+ "-e", "POSTGRES_USER=" + DefaultSuperuserUsername,
+ "-e", "POSTGRES_PASSWORD=" + DefaultSuperuserPassword,
+ "-e", "POSTGRES_DB=" + DefaultRootDatabase,
+ "-e", "PGDATA=" + defaultDataDir,
+ "--name", cnames[0],
+ }
+ if p.Memfs {
+ args = append(args,
+ "--mount", "type=tmpfs,destination="+defaultDataDir,
+ Image,
+ "-c", "fsync=off",
+ )
+ } else {
+ volumeName := clusterVolumeNames(p.ClusterID.NS)[0] // guaranteed to be non-empty
+ if err := d.createVolumeIfNeeded(ctx, volumeName); err != nil {
+ return nil, errors.Wrap(err, "create data volume")
+ }
+ args = append(args,
+ "-v", fmt.Sprintf("%s:%s", volumeName, defaultDataDir),
+ Image)
+ }
+
+ cmd := exec.CommandContext(ctx, "docker", args...)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return nil, errors.Wrapf(err, "could not start sql database as docker container: %s", out)
+ }
+
+ log.Debug().Msg("cluster created")
+ return waitForPort()
+
+ default:
+ return nil, errors.Newf("unknown cluster status %q", status.Status)
+ }
+}
+
+func (d *Driver) ClusterStatus(ctx context.Context, id sqldb.ClusterID) (*sqldb.ClusterStatus, error) {
+ status, _, err := d.clusterStatus(ctx, id)
+ return status, errors.WithStack(err)
+}
+
+func (d *Driver) CheckRequirements(ctx context.Context) error {
+ if _, err := exec.LookPath("docker"); err != nil {
+ return errors.New("This application requires docker to run since it uses an SQL database. Install docker first.")
+ } else if !isDockerRunning(ctx) {
+ return errors.New("The docker daemon is not running. Start it first.")
+ }
+ return nil
+}
+
+// clusterStatus reports both the standard ClusterStatus but also the container name we actually resolved to.
+func (d *Driver) clusterStatus(ctx context.Context, id sqldb.ClusterID) (status *sqldb.ClusterStatus, containerName string, err error) {
+ var output []byte
+
+ // Try the candidate container names in order.
+ cnames := containerNames(id)
+ for _, cname := range cnames {
+ var err error
+ out, err := exec.CommandContext(ctx, "docker", "container", "inspect", cname).CombinedOutput()
+ if errors.Is(err, exec.ErrNotFound) {
+ return nil, "", errors.New("docker not found: is it installed and in your PATH?")
+ } else if err != nil {
+ // Docker returns a non-zero exit code if the container does not exist.
+ // Try to tell this apart from an error by parsing the output.
+ if bytes.Contains(out, []byte("No such container")) {
+ continue
+ }
+ // Podman has slightly different output when a container is not found.
+ if bytes.Contains(out, []byte("no such container")) {
+ continue
+ }
+ return nil, "", errors.Wrapf(err, "docker container inspect failed: %s", out)
+ } else {
+ // Found our container; use it.
+ output, containerName = out, cname
+ break
+ }
+ }
+ if output == nil {
+ return &sqldb.ClusterStatus{Status: sqldb.NotFound}, containerName, nil
+ }
+
+ var resp []struct {
+ Name string
+ State struct {
+ Running bool
+ }
+ Config struct {
+ Env []string
+ }
+ NetworkSettings struct {
+ Ports map[string][]struct {
+ HostIP string
+ HostPort string
+ }
+ }
+ }
+ if err := json.Unmarshal(output, &resp); err != nil {
+ return nil, "", errors.Wrap(err, "parse `docker container inspect` response")
+ }
+ for _, c := range resp {
+ // Docker prefixes `/` to the container name, Podman doesn't.
+ if c.Name == "/"+containerName || c.Name == containerName {
+ status := &sqldb.ClusterStatus{Status: sqldb.Stopped, Config: &sqldb.ConnConfig{
+ // Defaults if we don't find anything else configured.
+ Superuser: sqldb.Role{
+ Type: sqldb.RoleSuperuser,
+ Username: DefaultSuperuserUsername,
+ Password: DefaultSuperuserPassword,
+ },
+ RootDatabase: DefaultRootDatabase,
+ }}
+ if c.State.Running {
+ status.Status = sqldb.Running
+ }
+ ports := c.NetworkSettings.Ports["5432/tcp"]
+ if len(ports) > 0 {
+ hostIP := ports[0].HostIP
+
+ // Podman can keep HostIP empty or 0.0.0.0.
+ // https://github.com/containers/podman/issues/17780
+ if hostIP == "" || hostIP == "0.0.0.0" {
+ hostIP = "127.0.0.1"
+ }
+
+ status.Config.Host = hostIP + ":" + ports[0].HostPort
+ }
+
+ // Read the Postgres config from the docker container's environment.
+ for _, env := range c.Config.Env {
+ if name, value, ok := strings.Cut(env, "="); ok {
+ switch name {
+ case "POSTGRES_USER":
+ status.Config.Superuser.Username = value
+ case "POSTGRES_PASSWORD":
+ status.Config.Superuser.Password = value
+ case "POSTGRES_DB":
+ status.Config.RootDatabase = value
+ }
+ }
+ }
+
+ return status, containerName, nil
+ }
+ }
+ return &sqldb.ClusterStatus{Status: sqldb.NotFound}, containerName, nil
+}
+
+func (d *Driver) CanDestroyCluster(ctx context.Context, id sqldb.ClusterID) error {
+ // Check that we can communicate with Docker.
+ if !isDockerRunning(ctx) {
+ return errors.New("cannot delete sql database: docker is not running")
+ }
+ return nil
+}
+
+func (d *Driver) DestroyCluster(ctx context.Context, id sqldb.ClusterID) error {
+ cnames := containerNames(id)
+ for _, cname := range cnames {
+ out, err := exec.CommandContext(ctx, "docker", "rm", "-f", cname).CombinedOutput()
+ if err != nil {
+ if bytes.Contains(out, []byte("No such container")) {
+ continue
+ }
+ return errors.Wrapf(err, "could not delete cluster: %s", out)
+ }
+ }
+ return nil
+}
+
+func (d *Driver) DestroyNamespaceData(ctx context.Context, ns *namespace.Namespace) error {
+ candidates := clusterVolumeNames(ns)
+ for _, c := range candidates {
+ if err := exec.CommandContext(ctx, "docker", "volume", "rm", "-f", c).Run(); err != nil {
+ if strings.Contains(strings.ToLower(err.Error()), "no such volume") {
+ continue
+ }
+ return errors.Wrapf(err, "could not delete volume %s", c)
+ }
+ }
+ return nil
+}
+
+func (d *Driver) createVolumeIfNeeded(ctx context.Context, name string) error {
+ if err := exec.CommandContext(ctx, "docker", "volume", "inspect", name).Run(); err == nil {
+ return nil
+ }
+ out, err := exec.CommandContext(ctx, "docker", "volume", "create", name).CombinedOutput()
+ return errors.Wrapf(err, "create volume %s: %s", name, out)
+}
+
+func (d *Driver) Meta() sqldb.DriverMeta {
+ return sqldb.DriverMeta{ClusterIsolation: true}
+}
+
+// containerNames computes the container name candidates for a given clusterID.
+func containerNames(id sqldb.ClusterID) []string {
+ // candidates returns possible candidate names for a given app id.
+ candidates := func(appID string) (names []string) {
+ base := "sqldb-" + appID
+
+ if id.Type != sqldb.Run {
+ base += "-" + string(id.Type)
+ }
+
+ // Convert the namespace to kebab case to remove invalid characters like ':'.
+ nsName := idents.Convert(string(id.NS.Name), idents.KebabCase)
+
+ names = []string{base + "-" + nsName + "-" + string(id.NS.ID)}
+ // If this is the default namespace look up the container without
+ // the namespace suffix as well, for backwards compatibility.
+ if id.NS.Name == "default" {
+ names = append(names, base)
+ }
+ return names
+ }
+
+ var names []string
+ if pid := id.NS.App.PlatformID(); pid != "" {
+ names = append(names, candidates(pid)...)
+ }
+ names = append(names, candidates(id.NS.App.LocalID())...)
+ return names
+}
+
+// ImageExists reports whether the docker image exists.
+func ImageExists(ctx context.Context) (ok bool, err error) {
+ out, err := exec.CommandContext(ctx, "docker", "image", "inspect", Image).CombinedOutput()
+ switch {
+ case err == nil:
+ return true, nil
+ case bytes.Contains(out, []byte("No such image")):
+ return false, nil
+ // Podman has a different error message.
+ case bytes.Contains(out, []byte("failed to find image")):
+ return false, nil
+ default:
+ return false, errors.WithStack(errors.Wrapf(err, "docker image inspect failed: %s", Image))
+ }
+}
+
+// PullImage pulls the image.
+func PullImage(ctx context.Context) error {
+ cmd := exec.CommandContext(ctx, "docker", "pull", Image)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+const Image = "encoredotdev/postgres:15"
+
+func isDockerRunning(ctx context.Context) bool {
+ err := exec.CommandContext(ctx, "docker", "info").Run()
+ return err == nil
+}
+
+// clusterVolumeNames reports the candidate names for the docker volume.
+func clusterVolumeNames(ns *namespace.Namespace) (candidates []string) {
+ nsName := idents.Convert(string(ns.Name), idents.KebabCase)
+ suffix := fmt.Sprintf("%s-%s", ns.ID, nsName)
+
+ for _, id := range [...]string{ns.App.PlatformID(), ns.App.LocalID()} {
+ if id != "" {
+ candidates = append(candidates, fmt.Sprintf("sqldb-%s-%s", id, suffix))
+ }
+ }
+ return candidates
+}
diff --git a/cli/daemon/sqldb/driver.go b/cli/daemon/sqldb/driver.go
new file mode 100644
index 0000000000..0d06159806
--- /dev/null
+++ b/cli/daemon/sqldb/driver.go
@@ -0,0 +1,113 @@
+package sqldb
+
+import (
+ "context"
+ "errors"
+
+ "github.com/rs/zerolog"
+
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/internal/optracker"
+)
+
+var ErrUnsupported = errors.New("unsupported operation")
+
+// A Driver abstracts away how a cluster is actually operated.
+type Driver interface {
+ // CreateCluster creates (if necessary) and starts (if necessary) a new cluster using the driver,
+ // and returns its status.
+ // err is nil if and only if the cluster could not be started.
+ CreateCluster(ctx context.Context, p *CreateParams, log zerolog.Logger) (*ClusterStatus, error)
+
+ // CanDestroyCluster reports whether the cluster could be destroyed, if desired.
+ // If a Driver doesn't support destroying the cluster it reports ErrUnsupported.
+ CanDestroyCluster(ctx context.Context, id ClusterID) error
+
+ // DestroyCluster destroys a cluster with the given id.
+ // If a Driver doesn't support destroying the cluster it reports ErrUnsupported.
+ DestroyCluster(ctx context.Context, id ClusterID) error
+
+ // DestroyNamespaceData destroys the data associated with a namespace.
+ // If a Driver doesn't support destroying data it reports ErrUnsupported.
+ DestroyNamespaceData(ctx context.Context, ns *namespace.Namespace) error
+
+ // ClusterStatus reports the current status of a cluster.
+ ClusterStatus(ctx context.Context, id ClusterID) (*ClusterStatus, error)
+
+ // CheckRequirements checks whether all the requirements are met
+ // to use the driver.
+ CheckRequirements(ctx context.Context) error
+
+ // Meta reports driver metadata.
+ Meta() DriverMeta
+}
+
+type DriverMeta struct {
+ // ClusterIsolation reports whether clusters are isolated by the driver.
+ // If false, database names will be prefixed with the cluster id.
+ ClusterIsolation bool
+}
+
+type ConnConfig struct {
+ // Host is the host address to connect to the database.
+ // It is only set when Status == Running.
+ Host string
+
+ // Superuser is the role to use to connect as the superuser,
+ // for creating and managing Encore databases.
+ Superuser Role
+ RootDatabase string // root database to connect to
+}
+
+type ClusterType string
+
+const (
+ Run ClusterType = "run"
+ Shadow ClusterType = "shadow"
+ Test ClusterType = "test"
+)
+
+func (ct ClusterType) Memfs() bool {
+ switch ct {
+ case Run:
+ return false
+ case Shadow, Test:
+ return true
+ default:
+ return false
+ }
+}
+
+// CreateParams are the params to (*ClusterManager).Create.
+type CreateParams struct {
+ ClusterID ClusterID
+
+ // Memfs, if true, configures the database container to use an
+ // in-memory filesystem as opposed to persisting the database to disk.
+ Memfs bool
+
+ // Tracker allows tracking the progress of the operation.
+ Tracker *optracker.OpTracker
+}
+
+// Status represents the status of a container.
+type Status string
+
+const (
+ // Running indicates the cluster is running.
+ Running Status = "running"
+ // Stopped indicates the container exists but is not running.
+ Stopped Status = "stopped"
+ // NotFound indicates the container does not exist.
+ NotFound Status = "notfound"
+)
+
+// ClusterStatus represents the status of a database cluster.
+type ClusterStatus struct {
+ // Status is the status of the underlying container.
+ Status Status
+
+ // Config is how to connect to the cluster.
+ // It is non-nil if Status == Running.
+ Config *ConnConfig
+}
diff --git a/cli/daemon/sqldb/external/external.go b/cli/daemon/sqldb/external/external.go
new file mode 100644
index 0000000000..47e95316b7
--- /dev/null
+++ b/cli/daemon/sqldb/external/external.go
@@ -0,0 +1,68 @@
+// Package external implements a cluster driver for an external cluster.
+package external
+
+import (
+ "context"
+
+ "github.com/rs/zerolog"
+
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/sqldb"
+)
+
+type Driver struct {
+ Host string // "host", "host:port", "/path/to/unix.socket",
+ Database string // database name
+ SuperuserUsername string
+ SuperuserPassword string
+}
+
+var _ sqldb.Driver = (*Driver)(nil)
+
+func (d *Driver) CreateCluster(ctx context.Context, p *sqldb.CreateParams, log zerolog.Logger) (*sqldb.ClusterStatus, error) {
+ // The external driver does not actually create the cluster; just return the status.
+ return d.ClusterStatus(ctx, p.ClusterID)
+}
+
+func (d *Driver) ClusterStatus(ctx context.Context, id sqldb.ClusterID) (*sqldb.ClusterStatus, error) {
+ st := &sqldb.ClusterStatus{
+ Status: sqldb.Running,
+ Config: &sqldb.ConnConfig{
+ Host: d.Host,
+ Superuser: sqldb.Role{
+ Type: sqldb.RoleSuperuser,
+ Username: def(d.SuperuserUsername, "postgres"),
+ Password: def(d.SuperuserPassword, "postgres"),
+ },
+ RootDatabase: def(d.Database, "postgres"),
+ },
+ }
+ return st, nil
+}
+
+func (d *Driver) CanDestroyCluster(ctx context.Context, id sqldb.ClusterID) error {
+ return sqldb.ErrUnsupported
+}
+
+func (d *Driver) DestroyCluster(ctx context.Context, id sqldb.ClusterID) error {
+ return sqldb.ErrUnsupported
+}
+
+func (d *Driver) DestroyNamespaceData(ctx context.Context, ns *namespace.Namespace) error {
+ return sqldb.ErrUnsupported
+}
+
+func (d *Driver) CheckRequirements(ctx context.Context) error {
+ return nil
+}
+
+func (d *Driver) Meta() sqldb.DriverMeta {
+ return sqldb.DriverMeta{ClusterIsolation: false}
+}
+
+func def(val, orDefault string) string {
+ if val == "" {
+ val = orDefault
+ }
+ return val
+}
diff --git a/cli/daemon/sqldb/manager.go b/cli/daemon/sqldb/manager.go
index cf994e76cf..e447e85683 100644
--- a/cli/daemon/sqldb/manager.go
+++ b/cli/daemon/sqldb/manager.go
@@ -1,103 +1,204 @@
-// Package sqldb runs and manages connections for Encore applications.
package sqldb
import (
- "bytes"
"context"
+ "crypto/rand"
+ "encoding/base64"
"fmt"
- "os/exec"
"sync"
- meta "encr.dev/proto/encore/parser/meta/v1"
+ "github.com/cockroachdb/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/sync/singleflight"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/cli/daemon/secret"
)
// NewClusterManager creates a new ClusterManager.
-func NewClusterManager() *ClusterManager {
+func NewClusterManager(driver Driver, apps *apps.Manager, ns *namespace.Manager, secretMgr *secret.Manager) *ClusterManager {
log := log.Logger
return &ClusterManager{
log: log,
- clusters: make(map[string]*Cluster),
+ driver: driver,
+ apps: apps,
+ ns: ns,
+ clusters: make(map[clusterKey]*Cluster),
backendKeyData: make(map[uint32]*Cluster),
+ secretMgr: secretMgr,
}
}
// A ClusterManager manages running local sqldb clusters.
type ClusterManager struct {
log zerolog.Logger
+ driver Driver
+ apps *apps.Manager
+ ns *namespace.Manager
startGroup singleflight.Group
+ secretMgr *secret.Manager
mu sync.Mutex
- clusters map[string]*Cluster // cluster id -> cluster
+ clusters map[clusterKey]*Cluster
// backendKeyData maps the secret data to a cluster,
// for forwarding cancel requests to the right cluster.
// Access is guarded by mu.
backendKeyData map[uint32]*Cluster
}
-// InitParams are the params to (*ClusterManager).Init.
-type InitParams struct {
- // ClusterID is the unique id of the cluster.
- ClusterID string
+// ClusterID uniquely identifies a cluster.
+type ClusterID struct {
+ NS *namespace.Namespace
+ Type ClusterType
+}
- // Meta is the metadata used to initialize databases.
- // If nil no databases are initialized.
- Meta *meta.Data
+// clusterKey is the key to use to store a cluster in the cluster map.
+type clusterKey string
- // Memfs, if true, configures the database container to use an
- // in-memory filesystem as opposed to persisting the database to disk.
- Memfs bool
+func (id ClusterID) clusterKey() clusterKey {
+ return clusterKey(fmt.Sprintf("%s-%s", id.NS.ID, id.Type))
+}
+
+func GetClusterID(app *apps.Instance, typ ClusterType, ns *namespace.Namespace) ClusterID {
+ return ClusterID{ns, typ}
+}
- // Reinit forces all databases to be reinitialized, even if they already exist.
- Reinit bool
+// Ready reports whether the cluster manager is ready and all requirements are met.
+func (cm *ClusterManager) Ready() error {
+ return cm.driver.CheckRequirements(context.Background())
}
-// Init initializes a database cluster but does not start it.
+// Create creates a database cluster but does not start it.
// If the cluster already exists it is returned.
// It does not perform any database migrations.
-func (cm *ClusterManager) Init(ctx context.Context, params *InitParams) *Cluster {
- cid := params.ClusterID
+func (cm *ClusterManager) Create(ctx context.Context, params *CreateParams) *Cluster {
cm.mu.Lock()
defer cm.mu.Unlock()
- c, ok := cm.clusters[cid]
+
+ c, ok := cm.get(params.ClusterID)
+ if ok {
+ if status, err := c.Status(ctx); err != nil || status.Status != Running {
+ // The cluster is no longer running; recreate it to clear our cached state.
+ c.cancel()
+ ok = false
+ }
+ }
+
if !ok {
ctx, cancel := context.WithCancel(context.Background())
+ key := params.ClusterID.clusterKey()
+ passwd := genPassword()
+ secretLoader := cm.secretMgr.Load(params.ClusterID.NS.App)
+
c = &Cluster{
- ID: params.ClusterID,
- Memfs: params.Memfs,
- Ctx: ctx,
- cancel: cancel,
- started: make(chan struct{}),
- log: cm.log.With().Str("cluster", params.ClusterID).Logger(),
- dbs: make(map[string]*DB),
+ ID: params.ClusterID,
+ Memfs: params.Memfs,
+ Password: passwd,
+ Ctx: ctx,
+ driver: cm.driver,
+ cancel: cancel,
+ started: make(chan struct{}),
+ log: cm.log.With().Interface("cluster", params.ClusterID).Logger(),
+ dbs: make(map[string]*DB),
+ isExternal: func(name string) bool {
+ // Don't use external databases for Memfs clusters (tests/shadows).
+ if params.Memfs {
+ return false
+ }
+ secrets, err := secretLoader.Get(ctx, nil)
+ if err != nil {
+ c.log.Error().Err(err).Msg("failed to load secrets for external database check")
+ return false
+ }
+ _, ok := secrets.Values["sqldb::"+name]
+ return ok
+ },
}
- cm.clusters[cid] = c
+
+ cm.clusters[key] = c
}
- c.initDBs(params.Meta, params.Reinit)
+
return c
}
+// LookupPassword looks up a cluster based on its password.
+func (cm *ClusterManager) LookupPassword(password string) (*Cluster, bool) {
+ cm.mu.Lock()
+ defer cm.mu.Unlock()
+
+ for _, c := range cm.clusters {
+ if c.Password == password {
+ return c, true
+ }
+ }
+ return nil, false
+}
+
// Get retrieves the cluster keyed by id.
-func (cm *ClusterManager) Get(clusterID string) (*Cluster, bool) {
+func (cm *ClusterManager) Get(id ClusterID) (*Cluster, bool) {
cm.mu.Lock()
- c, ok := cm.clusters[clusterID]
- cm.mu.Unlock()
+ defer cm.mu.Unlock()
+ return cm.get(id)
+}
+
+// get retrieves the cluster keyed by id.
+// cm.mu must be held.
+func (cm *ClusterManager) get(id ClusterID) (*Cluster, bool) {
+ c, ok := cm.clusters[id.clusterKey()]
return c, ok
}
-// Delete forcibly deletes the cluster.
-func (cm *ClusterManager) Delete(ctx context.Context, clusterID string) error {
- cname := containerName(clusterID)
- out, err := exec.CommandContext(ctx, "docker", "rm", "-f", cname).CombinedOutput()
- if err != nil {
- if bytes.Contains(out, []byte("No such container")) {
- return nil
- }
- return fmt.Errorf("could not delete cluster: %s (%v)", out, err)
+// CanDeleteNamespace implements namespace.DeletionHandler.
+func (cm *ClusterManager) CanDeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ c, ok := cm.Get(GetClusterID(app, Run, ns))
+ if !ok {
+ return nil
+ }
+
+ err := c.driver.CanDestroyCluster(ctx, c.ID)
+ if errors.Is(err, ErrUnsupported) {
+ err = nil
}
return nil
}
-const dockerImage = "postgres:11-alpine"
+// DeleteNamespace implements namespace.DeletionHandler.
+func (cm *ClusterManager) DeleteNamespace(ctx context.Context, app *apps.Instance, ns *namespace.Namespace) error {
+ // Find all clusters matching this namespace.
+ // Use a closure for the lock to avoid holding it while we destroy the clusters.
+ var clusters []*Cluster
+ (func() {
+ cm.mu.Lock()
+ defer cm.mu.Unlock()
+ for _, c := range cm.clusters {
+ if c.ID.NS.ID == ns.ID {
+ clusters = append(clusters, c)
+ }
+ }
+ })()
+
+ // Destroy the clusters.
+ for _, c := range clusters {
+ if err := c.driver.DestroyCluster(ctx, c.ID); err != nil && !errors.Is(err, ErrUnsupported) {
+ return errors.Wrapf(err, "destroy cluster %s", c.ID)
+ }
+ c.cancel()
+ }
+
+ // If that succeeded, destroy the namespace data.
+ err := cm.driver.DestroyNamespaceData(ctx, ns)
+ if errors.Is(err, ErrUnsupported) {
+ err = nil
+ }
+ return err
+}
+
+func genPassword() string {
+ var data [8]byte
+ if _, err := rand.Read(data[:]); err != nil {
+ log.Fatal().Err(err).Msg("unable to generate random data")
+ }
+ return base64.RawURLEncoding.EncodeToString(data[:])
+}
diff --git a/cli/daemon/sqldb/migrate.go b/cli/daemon/sqldb/migrate.go
index 2610b1db40..f0fdad4050 100644
--- a/cli/daemon/sqldb/migrate.go
+++ b/cli/daemon/sqldb/migrate.go
@@ -2,71 +2,346 @@ package sqldb
import (
"bytes"
+ "context"
+ "database/sql"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
+ "slices"
+ "strings"
- meta "encr.dev/proto/encore/parser/meta/v1"
+ "github.com/cockroachdb/errors"
+ "github.com/golang-migrate/migrate/v4/database"
+ "github.com/golang-migrate/migrate/v4/database/postgres"
"github.com/golang-migrate/migrate/v4/source"
+ "github.com/hashicorp/go-multierror"
+ "github.com/lib/pq"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
)
-type src struct {
- appRoot string
- svcRelPath string
+// MigrationReader is an interface for reading migration files. It has two main
+// implementations: OsMigrationReader and ZipFSMigrationReader.
+type MigrationReader interface {
+ Read(*meta.DBMigration) (r io.ReadCloser, err error)
+}
+
+// The OsMigrationReader reads migrations from the local filesystem.
+func NewOsMigrationReader(path string) *OsMigrationReader {
+ return &OsMigrationReader{path: path}
+}
+
+type OsMigrationReader struct {
+ path string
+}
+
+func (src *OsMigrationReader) Read(m *meta.DBMigration) (r io.ReadCloser, err error) {
+ fpath := filepath.Join(src.path, m.Filename)
+ data, err := os.ReadFile(fpath)
+ if err != nil {
+ return nil, err
+ }
+ return io.NopCloser(bytes.NewReader(data)), nil
+}
+
+// MultiReadCloser is a helper wrapper which extends the io.MultiReader to also
+// close the underlying closeable readers. It's used by the MetadataSource to
+// append a statement to mark a migration as successful.
+func MultiReadCloser(r ...io.Reader) io.ReadCloser {
+ return &multiReadCloser{
+ readers: r,
+ multiReader: io.MultiReader(r...),
+ }
+}
+
+type multiReadCloser struct {
+ readers []io.Reader
+ multiReader io.Reader
+}
+
+func (m multiReadCloser) Read(p []byte) (n int, err error) {
+ return m.multiReader.Read(p)
+}
+
+func (m multiReadCloser) Close() error {
+ var errs []error
+ for _, r := range m.readers {
+ if c, ok := r.(io.Closer); !ok {
+ continue
+ } else if err := c.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return errors.Join(errs...)
+}
+
+var _ io.ReadCloser = (*multiReadCloser)(nil)
+
+// NewMetadataSource creates a new MetadataSource instance.
+func NewMetadataSource(reader MigrationReader, migrations []*meta.DBMigration) *MetadataSource {
+ src := &MetadataSource{
+ MigrationReader: reader,
+ migrations: migrations,
+ }
+ src.validate()
+ return src
+}
+
+func (src *MetadataSource) validate() {
+ if src.err != nil {
+ return
+ }
+ seen := make(map[uint64]struct{})
+ for _, m := range src.migrations {
+ if _, ok := seen[m.Number]; ok {
+ src.err = fmt.Errorf("duplicate migration identifier %q", m.Filename)
+ return
+ }
+ seen[m.Number] = struct{}{}
+ }
+}
+
+// MetadataSource is a source.Driver implementation that keeps a list of migrations retrieved from
+// the Encore metadata. It relies on a MigrationReader to read the migration files.
+type MetadataSource struct {
+ MigrationReader
migrations []*meta.DBMigration
+ err error
+}
+
+func (src *MetadataSource) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
+ m, err := src.migration(version, 0)
+ if err != nil {
+ return nil, "", err
+ }
+ r, err = src.Read(m)
+ if err != nil {
+ return nil, "", err
+ }
+ // This is used to make sure that a migration is marked successful in the
+ // same statement as it's run. Otherwise we may end up with a finished migration
+ // which is marked dirty because the SetVersion is run as a separate statement.
+ statement := fmt.Sprintf(
+ ";\ninsert into schema_migrations (version, dirty) values (%d, false) ON CONFLICT (version) DO UPDATE SET dirty = false;",
+ version)
+ return MultiReadCloser(
+ r,
+ strings.NewReader(statement),
+ ), m.Description, nil
}
-func (src *src) Open(url string) (source.Driver, error) {
+func (src *MetadataSource) Open(url string) (source.Driver, error) {
return nil, fmt.Errorf("driver.Open is not implemented")
}
-func (src *src) Close() error {
+func (src *MetadataSource) Close() error {
return nil
}
-func (src *src) First() (version uint, err error) {
+func (src *MetadataSource) First() (version uint, err error) {
if len(src.migrations) == 0 {
return 0, os.ErrNotExist
}
return uint(src.migrations[0].Number), nil
}
-func (src *src) Prev(version uint) (prevVersion uint, err error) {
- idx := src.verIdx(version, -1)
- if idx < 0 || idx >= len(src.migrations) {
- return 0, os.ErrNotExist
+func (src *MetadataSource) Prev(version uint) (prevVersion uint, err error) {
+ m, err := src.migration(version, -1)
+ if err != nil {
+ return 0, err
}
- return uint(src.migrations[idx].Number), nil
+ return uint(m.Number), nil
}
-func (src *src) Next(version uint) (nextVersion uint, err error) {
- idx := src.verIdx(version, +1)
- if idx >= len(src.migrations) {
- return 0, os.ErrNotExist
+func (src *MetadataSource) Next(version uint) (nextVersion uint, err error) {
+ m, err := src.migration(version, +1)
+ if err != nil {
+ return 0, err
}
- return uint(src.migrations[idx].Number), nil
+ return uint(m.Number), nil
+}
+
+func (src *MetadataSource) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
+ return nil, "", os.ErrNotExist
}
-func (src *src) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
- idx := src.verIdx(version, 0)
+func (src *MetadataSource) migration(version uint, offset int) (*meta.DBMigration, error) {
+ if src.err != nil {
+ return nil, src.err
+ }
+ idx := slices.IndexFunc(src.migrations, func(m *meta.DBMigration) bool {
+ return m.Number == uint64(version)
+ })
+ if idx < 0 {
+ return nil, os.ErrNotExist
+ }
+ idx += offset
if idx < 0 || idx >= len(src.migrations) {
- return nil, "", os.ErrNotExist
+ return nil, os.ErrNotExist
}
- m := src.migrations[idx]
- filepath := filepath.Join(src.appRoot, src.svcRelPath, "migrations", m.Filename)
- data, err := os.ReadFile(filepath)
+ return src.migrations[idx], nil
+}
+
+type nonSequentialDbDriver struct {
+ *postgres.Postgres
+ source *nonSequentialSource
+ schemaName string
+ migrationsTable string
+ conn *sql.Conn
+ appliedVersions map[uint64]bool
+}
+
+type nonSequentialSource struct {
+ *MetadataSource
+ dbDriver *nonSequentialDbDriver
+}
+
+// NonSequentialMigrator creates a new migrator that doesn't require migrations to be sequential.
+// It does this by keeping track of applied migrations in a table and using that to determine the
+// current version and which migrations need to be applied. It's effectively extending the logic of
+// the go-migrate library to support non-sequential migrations and is semi-compatible since it's using the
+// same underlying table.
+func NonSequentialMigrator(ctx context.Context, conn *sql.Conn, mdSource *MetadataSource) (database.Driver, source.Driver, error) {
+ src := &nonSequentialSource{
+ MetadataSource: mdSource,
+ }
+ db := &nonSequentialDbDriver{
+ conn: conn,
+ migrationsTable: "schema_migrations",
+ source: src,
+ }
+ src.dbDriver = db
+ query := `SELECT CURRENT_SCHEMA()`
+ if err := conn.QueryRowContext(ctx, query).Scan(&db.schemaName); err != nil {
+ return nil, nil, &database.Error{OrigErr: err, Query: []byte(query)}
+ }
+
+ if len(db.schemaName) == 0 {
+ return nil, nil, postgres.ErrNoSchema
+ }
+
+ p, err := postgres.WithConnection(ctx, conn, &postgres.Config{
+ MigrationsTable: db.migrationsTable,
+ SchemaName: db.schemaName,
+ })
if err != nil {
- return nil, "", err
+ return nil, nil, errors.Wrap(err, "failed to create migration instance")
+ }
+ db.Postgres = p
+ if err := db.loadAppliedVersions(); err != nil {
+ return nil, nil, errors.Wrap(err, "failed to load applied versions")
}
- return ioutil.NopCloser(bytes.NewReader(data)), m.Description, nil
+ return db, src, nil
}
-func (src *src) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
- return nil, "", os.ErrNotExist
+func (p *nonSequentialDbDriver) Version() (version int, dirty bool, err error) {
+ if len(p.appliedVersions) == 0 {
+ return database.NilVersion, false, nil
+ }
+ var ok bool
+ prevVersion := database.NilVersion
+ for _, mg := range p.source.migrations {
+ dirty, ok = p.appliedVersions[mg.Number]
+ if !ok {
+ return prevVersion, false, nil
+ } else if dirty {
+ return int(mg.Number), true, nil
+ }
+ prevVersion = int(mg.Number)
+ }
+ return prevVersion, false, nil
+}
+
+func (p *nonSequentialDbDriver) SetVersion(version int, dirty bool) error {
+ // In PSQL, all migrations are applied within the same statement/transaction.
+ // If the migration fails to apply, it is automatically rolled back.
+ // Therefore, we don't need to worry about marking a migration as dirty.
+ if dirty {
+ return nil
+ }
+ tx, err := p.conn.BeginTx(context.Background(), &sql.TxOptions{})
+ if err != nil {
+ return &database.Error{OrigErr: err, Err: "transaction start failed"}
+ }
+
+ if version >= 0 {
+ query := `INSERT INTO ` + pq.QuoteIdentifier(p.schemaName) + `.` + pq.QuoteIdentifier(p.migrationsTable) + ` (version, dirty) VALUES ($1, $2) ON CONFLICT (version) DO UPDATE SET dirty = $2`
+ if _, err := tx.Exec(query, version, dirty); err != nil {
+ if errRollback := tx.Rollback(); errRollback != nil {
+ err = multierror.Append(err, errRollback)
+ }
+ return &database.Error{OrigErr: err, Query: []byte(query)}
+ }
+ }
+
+ if err := tx.Commit(); err != nil {
+ return &database.Error{OrigErr: err, Err: "transaction commit failed"}
+ }
+
+ return nil
+}
+
+func LoadAppliedVersions(ctx context.Context, conn *sql.Conn, schemaName, migrationsTable string) (map[uint64]bool, error) {
+ appliedVersions := map[uint64]bool{}
+
+ query := `SELECT version, dirty FROM ` + pq.QuoteIdentifier(schemaName) + `.` + pq.QuoteIdentifier(migrationsTable) + ` ORDER BY version`
+ rows, err := conn.QueryContext(context.Background(), query)
+ if err != nil {
+ if e, ok := err.(*pq.Error); ok {
+ if e.Code.Name() == "undefined_table" {
+ return appliedVersions, nil
+ }
+ }
+ return nil, &database.Error{OrigErr: err, Query: []byte(query)}
+ }
+ defer rows.Close()
+ var version uint64
+ var dirty bool
+ for rows.Next() {
+ err := rows.Scan(&version, &dirty)
+ if err != nil {
+ return nil, &database.Error{OrigErr: err, Query: []byte(query)}
+ }
+ appliedVersions[version] = dirty
+ }
+ return appliedVersions, nil
+}
+
+func (p *nonSequentialDbDriver) loadAppliedVersions() error {
+ if p.appliedVersions != nil {
+ return nil
+ }
+ applied, err := LoadAppliedVersions(context.Background(), p.conn, p.schemaName, p.migrationsTable)
+ if err != nil {
+ return err
+ }
+ p.appliedVersions = applied
+ return nil
+}
+
+func (src *nonSequentialSource) Prev(version uint) (prevVersion uint, err error) {
+ m, err := src.migration(version, -1)
+ if err != nil {
+ return 0, err
+ }
+ // If the migration is applied, return this version
+ if _, ok := src.dbDriver.appliedVersions[m.Number]; ok {
+ return uint(m.Number), nil
+ }
+ // Otherwise skip to the previous version
+ return src.Prev(uint(m.Number))
}
-func (src) verIdx(version uint, offset int) int {
- return int(version) - 1 + offset
+func (src *nonSequentialSource) Next(version uint) (nextVersion uint, err error) {
+ m, err := src.migration(version, +1)
+ if err != nil {
+ return 0, err
+ }
+ // If the migration is applied, return the next version
+ if _, ok := src.dbDriver.appliedVersions[m.Number]; ok {
+ return src.Next(uint(m.Number))
+ }
+ // Otherwise, return this version
+ return uint(m.Number), nil
}
diff --git a/cli/daemon/sqldb/proxy.go b/cli/daemon/sqldb/proxy.go
index 63d7116f1a..3c0d957308 100644
--- a/cli/daemon/sqldb/proxy.go
+++ b/cli/daemon/sqldb/proxy.go
@@ -2,14 +2,19 @@ package sqldb
import (
"context"
+ "crypto/tls"
"fmt"
"io"
"net"
+ "strings"
"time"
- "encr.dev/pkg/pgproxy"
"github.com/jackc/pgproto3/v2"
"github.com/rs/zerolog/log"
+
+ "encr.dev/cli/daemon/namespace"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/pgproxy"
)
// ServeProxy serves the database proxy using the given listener.
@@ -46,140 +51,270 @@ func (cm *ClusterManager) ServeProxy(ln net.Listener) error {
// database cluster and database.
// If waitForSetup is true, it will wait for initial setup to complete
// before proxying the connection.
-func (cm *ClusterManager) ProxyConn(frontend net.Conn, waitForSetup bool) error {
- defer frontend.Close()
- var proxy pgproxy.Proxy
-
- data, err := proxy.FrontendAuth(frontend, nil, true)
+func (cm *ClusterManager) ProxyConn(client net.Conn, waitForSetup bool) error {
+ defer fns.CloseIgnore(client)
+ cl, err := pgproxy.SetupClient(client, &pgproxy.ClientConfig{
+ TLS: nil,
+ WantPassword: true,
+ })
if err != nil {
return err
}
- if cancel, ok := data.Startup.(*pgproxy.CancelRequest); ok {
- cm.cancelRequest(frontend, cancel)
+ if cancel, ok := cl.Hello.(*pgproxy.CancelData); ok {
+ cm.cancelRequest(client, cancel)
return nil
}
+ startup := cl.Hello.(*pgproxy.StartupData)
- clusterID := data.Password
- cluster, ok := cm.Get(clusterID)
- if !ok {
- cm.log.Error().Str("cluster", clusterID).Msg("dbproxy: could not find cluster")
- writeMsg(frontend, &pgproto3.ErrorResponse{
- Severity: "FATAL",
- Code: "08006",
- Message: "database cluster not running",
- })
- return nil
- }
+ // If the username is "encore" we're connecting to a database cluster
+ // which may not be local
+ var cluster *Cluster
+ if startup.Username == "encore" {
+ password := startup.Password
+ found, ok := cm.LookupPassword(password)
+ if !ok {
+ cm.log.Error().Msg("dbproxy: could not find cluster")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "database cluster not found or invalid connection string",
+ })
+ return nil
+ }
+ cluster = found
+ } else {
+ // The username is the app slug we want to connect to
+ app, err := cm.apps.FindLatestByPlatformOrLocalID(startup.Username)
+ if err != nil {
+ cm.log.Error().Err(err).Msg("dbproxy: could not find app")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "unknown app ID",
+ })
+ return nil
+ }
- db, ok := cluster.GetDB(data.Database)
- if !ok {
- writeMsg(frontend, &pgproto3.ErrorResponse{
- Severity: "FATAL",
- Code: "08006",
- Message: "database not found",
+ ctx := context.Background()
+
+ clusterType, nsID, ok := strings.Cut(startup.Password, "-")
+
+ // Look up the namespace to use.
+ var ns *namespace.Namespace
+ if !ok {
+ ns, err = cm.ns.GetActive(ctx, app)
+ } else {
+ ns, err = cm.ns.GetByID(ctx, app, namespace.ID(nsID))
+ }
+ if err != nil {
+ cm.log.Error().Err(err).Msg("dbproxy: could not find infra namespace")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "unknown active infra namespace",
+ })
+ return nil
+ }
+
+ // Resolve the cluster type.
+ var ct ClusterType
+ switch clusterType {
+ case "local":
+ ct = Run
+ case "test":
+ ct = Test
+ case "shadow":
+ ct = Shadow
+ default:
+ cm.log.Error().Str("password", startup.Password).Msg("dbproxy: invalid password for connection URI")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "28P01", // 28P01 = invalid password
+ Message: "if connecting with an app slug as the username, the only accepted passwords are 'local' or 'test' to route to those instances on your local system",
+ })
+ return nil
+ }
+
+ // Create the cluster if it doesn't exist in memory yet
+ // This might be because the daemon is running, but the hasn't done anything
+ // with the app in question yet on this run
+ cluster = cm.Create(context.Background(), &CreateParams{
+ ClusterID: GetClusterID(app, ct, ns),
+ Memfs: ct.Memfs(),
})
- return nil
+
+ // Ensure the cluster is started
+ _, err = cluster.Start(context.Background(), nil)
+ if err != nil {
+ cm.log.Error().Err(err).Msg("dbproxy: could not start cluster")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "could not start database cluster",
+ })
+ return nil
+ }
}
- var ready <-chan struct{}
- if waitForSetup {
- ready = db.Ready()
- } else {
- s := make(chan struct{})
- close(s)
- ready = s
+ // If Encore knows about the database, check if it's ready
+ // however if the cluster doesn't know about the database, skip this part.
+ //
+ // This is because either:
+ // 1. The database exists and is connected to
+ // 2. The database does not exist, and the remote server will return a "database doesn't exist" error.
+ dbname := startup.Database
+ db, ok := cluster.GetDB(dbname)
+ if ok {
+ var ready <-chan struct{}
+ if waitForSetup {
+ ready = db.Ready()
+ } else {
+ s := make(chan struct{})
+ close(s)
+ ready = s
+ }
+
+ // Wait for up to 60s for the cluster and database to come online.
+ select {
+ case <-db.Ctx.Done():
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "db is shutting down",
+ })
+ return nil
+ case <-time.After(60 * time.Second):
+ cm.log.Error().Str("db", db.ApplicationCloudName()).Msg("dbproxy: timed out waiting for database to come online")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "timed out waiting for db to complete setup",
+ })
+ return nil
+
+ case <-ready:
+ // Continue connecting to backend, below
+ }
}
- // Wait for up to 60s for the cluster and database to come online.
- select {
- case <-db.Ctx.Done():
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ info, err := cluster.Info(context.Background())
+ if err != nil {
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
- Message: "db is shutting down",
+ Message: "cluster not running: " + err.Error(),
})
return nil
- case <-time.After(60 * time.Second):
- cm.log.Error().Str("db", data.Database).Msg("dbproxy: timed out waiting for database to come online")
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ }
+
+ server, err := net.Dial("tcp", info.Config.Host)
+ if err != nil {
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
- Message: "timed out waiting for db to complete setup",
+ Message: "database not running: " + err.Error(),
})
return nil
-
- case <-ready:
- // Continue connecting to backend, below
}
+ defer fns.CloseIgnore(server)
- backend, err := net.Dial("tcp", cluster.HostPort)
+ // Send a modified startup message to the backend
+ admin, _ := info.Encore.First(RoleAdmin, RoleSuperuser)
+ startup.Username = admin.Username
+ startup.Password = admin.Password
+ if db == nil {
+ // We don't know about this database, we'll use the requested name
+ // in case it does actually exist within the cluster.
+ //
+ // If it doesn't the cluster will return an SQL error to the client.
+ startup.Database = dbname
+ } else {
+ startup.Database = db.ApplicationCloudName()
+ }
+ fe, err := pgproxy.SetupServer(server, &pgproxy.ServerConfig{
+ TLS: nil,
+ Startup: startup,
+ })
if err != nil {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
- Message: "database not running: " + err.Error(),
+ Message: "could not connect: " + err.Error(),
})
return nil
}
- defer backend.Close()
+ log.Trace().Msg("backend connection established, notifying client")
+
+ if err := pgproxy.AuthenticateClient(cl.Backend); err != nil {
+ return err
+ }
- data.Username = "encore"
- data.Password = clusterID
- beData, err := proxy.BackendAuth(backend, nil, data)
+ keyData, err := pgproxy.FinalizeInitialHandshake(cl.Backend, fe)
if err != nil {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
- Message: "could not connect: " + err.Error(),
+ Message: "could not establish connection: " + err.Error(),
})
return nil
}
+ log.Trace().Msg("connection handshake completed, proxying steady-state data")
// Store the key data so we know where to route cancellation requests.
- if key := beData.KeyData; key != nil {
+ if keyData != nil {
cm.mu.Lock()
- cm.backendKeyData[key.SecretKey] = cluster
+ cm.backendKeyData[keyData.SecretKey] = cluster
cm.mu.Unlock()
defer func() {
cm.mu.Lock()
- delete(cm.backendKeyData, key.SecretKey)
+ delete(cm.backendKeyData, keyData.SecretKey)
cm.mu.Unlock()
}()
}
- return proxy.Data(db.Ctx)
+ return pgproxy.CopySteadyState(cl.Backend, fe)
}
// PreauthProxyConn is a pre-authenticated proxy conn directly specifically to the given cluster.
-func (cm *ClusterManager) PreauthProxyConn(frontend net.Conn, clusterID string) error {
- defer frontend.Close()
- var proxy pgproxy.Proxy
-
- data, err := proxy.FrontendAuth(frontend, nil, false)
+func (cm *ClusterManager) PreauthProxyConn(client net.Conn, id ClusterID) error {
+ defer fns.CloseIgnore(client)
+ cl, err := pgproxy.SetupClient(client, &pgproxy.ClientConfig{
+ TLS: &tls.Config{MinVersion: tls.VersionTLS12},
+ })
if err != nil {
+ log.Error().Err(err).Msg("failed to setup client")
return err
}
- if cancel, ok := data.Startup.(*pgproxy.CancelRequest); ok {
- cm.cancelRequest(frontend, cancel)
+ if cancel, ok := cl.Hello.(*pgproxy.CancelData); ok {
+ cm.cancelRequest(client, cancel)
return nil
}
+ startup := cl.Hello.(*pgproxy.StartupData)
- cluster, ok := cm.Get(clusterID)
+ cluster, ok := cm.Get(id)
if !ok {
- cm.log.Error().Str("cluster", clusterID).Msg("dbproxy: could not find cluster")
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ cm.log.Error().Interface("cluster", id).Msg("dbproxy: could not find cluster")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "database cluster not running",
})
return nil
}
-
- db, ok := cluster.GetDB(data.Database)
+ if cluster.IsExternalDB(startup.Database) {
+ cm.log.Error().Str("db", startup.Database).Msg("dbproxy: cannot proxy external database")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "proxy to external databases is disabled",
+ })
+ return nil
+ }
+ db, ok := cluster.GetDB(startup.Database)
if !ok {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "database not found",
@@ -190,15 +325,15 @@ func (cm *ClusterManager) PreauthProxyConn(frontend net.Conn, clusterID string)
// Wait for up to 60s for the cluster to come online.
select {
case <-db.Ctx.Done():
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "db is shutting down",
})
return nil
case <-time.After(60 * time.Second):
- cm.log.Error().Str("db", data.Database).Msg("dbproxy: timed out waiting for database to come online")
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ cm.log.Error().Str("db", startup.Database).Msg("dbproxy: timed out waiting for database to come online")
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "timed out waiting for db to complete setup",
@@ -209,22 +344,37 @@ func (cm *ClusterManager) PreauthProxyConn(frontend net.Conn, clusterID string)
// Continue connecting to backend, below
}
- backend, err := net.Dial("tcp", cluster.HostPort)
+ info, err := cluster.Info(context.Background())
if err != nil {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "cluster not running: " + err.Error(),
+ })
+ return nil
+ }
+
+ server, err := net.Dial("tcp", info.Config.Host)
+ if err != nil {
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "database not running: " + err.Error(),
})
return nil
}
- defer backend.Close()
+ defer fns.CloseIgnore(server)
- data.Username = "encore"
- data.Password = clusterID
- beData, err := proxy.BackendAuth(backend, nil, data)
+ admin, _ := info.Encore.First(RoleAdmin, RoleSuperuser)
+ startup.Username = admin.Username
+ startup.Password = admin.Password
+ startup.Database = db.ApplicationCloudName()
+ fe, err := pgproxy.SetupServer(server, &pgproxy.ServerConfig{
+ TLS: nil,
+ Startup: startup,
+ })
if err != nil {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "could not connect: " + err.Error(),
@@ -232,44 +382,77 @@ func (cm *ClusterManager) PreauthProxyConn(frontend net.Conn, clusterID string)
return nil
}
+ if err := pgproxy.AuthenticateClient(cl.Backend); err != nil {
+ return err
+ }
+
+ keyData, err := pgproxy.FinalizeInitialHandshake(cl.Backend, fe)
+ if err != nil {
+ _ = cl.Backend.Send(&pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "could not establish connection: " + err.Error(),
+ })
+ return nil
+ }
+
// Store the key data so we know where to route cancellation requests.
- if key := beData.KeyData; key != nil {
+ if keyData != nil {
cm.mu.Lock()
- cm.backendKeyData[key.SecretKey] = cluster
+ cm.backendKeyData[keyData.SecretKey] = cluster
cm.mu.Unlock()
defer func() {
cm.mu.Lock()
- delete(cm.backendKeyData, key.SecretKey)
+ delete(cm.backendKeyData, keyData.SecretKey)
cm.mu.Unlock()
}()
}
- return proxy.Data(db.Ctx)
+ log.Trace().Msg("successfully completed handshake, copying data back and forth")
+ return pgproxy.CopySteadyState(cl.Backend, fe)
}
// cancelRequest handles a cancel request.
-func (cm *ClusterManager) cancelRequest(frontend io.ReadWriter, req *pgproxy.CancelRequest) {
+func (cm *ClusterManager) cancelRequest(client io.Writer, req *pgproxy.CancelData) {
cm.mu.Lock()
- cluster, ok := cm.backendKeyData[req.SecretKey]
+ cluster, ok := cm.backendKeyData[req.Raw.SecretKey]
cm.mu.Unlock()
if !ok {
return
}
- backend, err := net.Dial("tcp", cluster.HostPort)
+ info, err := cluster.Info(context.Background())
if err != nil {
- writeMsg(frontend, &pgproto3.ErrorResponse{
+ msg := &pgproto3.ErrorResponse{
Severity: "FATAL",
Code: "08006",
Message: "database cluster not running",
- })
+ }
+ encode, _ := msg.Encode(nil)
+ _, _ = client.Write(encode)
return
}
- writeMsg(backend, req)
- backend.Close()
+
+ backend, err := net.Dial("tcp", info.Config.Host)
+ if err != nil {
+ msg := &pgproto3.ErrorResponse{
+ Severity: "FATAL",
+ Code: "08006",
+ Message: "database cluster not running",
+ }
+ encode, _ := msg.Encode(nil)
+ _, _ = client.Write(encode)
+ return
+ }
+ defer fns.CloseIgnore(backend)
+ _ = pgproxy.SendCancelRequest(backend, req.Raw)
}
func writeMsg(w io.Writer, msg pgproto3.Message) error {
- _, err := w.Write(msg.Encode(nil))
+ encode, err := msg.Encode(nil)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(encode)
return err
}
diff --git a/cli/daemon/sqldb/remote.go b/cli/daemon/sqldb/remote.go
index f416946766..7f6181b692 100644
--- a/cli/daemon/sqldb/remote.go
+++ b/cli/daemon/sqldb/remote.go
@@ -4,21 +4,22 @@ import (
"context"
"crypto/rand"
"encoding/base64"
+ "errors"
"fmt"
"net"
"time"
- "encr.dev/pkg/pgproxy"
- "encr.dev/proto/encore/server/remote"
- "github.com/jackc/pgproto3/v2"
+ "github.com/gorilla/websocket"
"github.com/rs/zerolog/log"
- "google.golang.org/grpc/metadata"
+
+ "encr.dev/cli/internal/platform"
+ "encr.dev/pkg/pgproxy"
)
// OneshotProxy listens on a random port for a single connection, and proxies that connection to a remote db.
// It reports the one-time password and port to use.
// Once a connection has been established, it stops listening.
-func OneshotProxy(rc remote.RemoteClient, appSlug, envSlug string) (port int, passwd string, err error) {
+func OneshotProxy(appSlug, envSlug string, role RoleType) (port int, passwd string, err error) {
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
return 0, "", err
@@ -29,147 +30,93 @@ func OneshotProxy(rc remote.RemoteClient, appSlug, envSlug string) (port int, pa
}
passwd = base64.RawURLEncoding.EncodeToString(passwdBytes[:])
- go oneshotServer(context.Background(), rc, ln, passwd, appSlug, envSlug)
+ go oneshotServer(context.Background(), ln, passwd, appSlug, envSlug, role)
return ln.Addr().(*net.TCPAddr).Port, passwd, nil
}
-func oneshotServer(ctx context.Context, rc remote.RemoteClient, ln net.Listener, passwd, appSlug, envSlug string) error {
- defer ln.Close()
-
- gotMainConn := make(chan struct{}) // closed when accepted
- go func() {
- // Wait for the first conn at most 60s before giving up
- select {
- case <-gotMainConn:
- case <-time.After(60 * time.Second):
- ln.Close()
- case <-ctx.Done():
- ln.Close()
- }
- }()
-
- var tempDelay time.Duration // how long to sleep on accept failure
- first := true
- for {
- frontend, e := ln.Accept()
- if e != nil {
- if ne, ok := e.(net.Error); ok && ne.Temporary() {
- if tempDelay == 0 {
- tempDelay = 5 * time.Millisecond
- } else {
- tempDelay *= 2
- }
- if max := 1 * time.Second; tempDelay > max {
- tempDelay = max
+func oneshotServer(ctx context.Context, ln net.Listener, passwd, appSlug, envSlug string, role RoleType) error {
+ proxy := &pgproxy.SingleBackendProxy{
+ RequirePassword: passwd != "",
+ FrontendTLS: nil,
+ DialBackend: func(ctx context.Context, startup *pgproxy.StartupData) (pgproxy.LogicalConn, error) {
+ if startup.Password != passwd {
+ return nil, fmt.Errorf("bad password")
+ }
+ startupData, err := startup.Raw.Encode(nil)
+ if err != nil {
+ return nil, err
+ }
+ ws, err := platform.DBConnect(ctx, appSlug, envSlug, startup.Database, role.String(), startupData)
+ if err != nil {
+ var e platform.Error
+ if errors.As(err, &e) && e.HTTPCode == 404 {
+ return nil, pgproxy.DatabaseNotFoundError{Database: startup.Database}
}
- log.Printf("sqldb: accept error: %v; retrying in %v", e, tempDelay)
- time.Sleep(tempDelay)
- continue
+ return nil, err
}
- return fmt.Errorf("sqldb: could not accept: %v", e)
- }
-
- tempDelay = 0
-
- if first {
- // If this is the first connection, treat it as the main connection
- // and close the listener when it exits.
- first = false
- close(gotMainConn)
- go ProxyRemoteConn(ctx, rc, frontend, passwd, appSlug, envSlug)
- } else {
- go ProxyRemoteConn(ctx, rc, frontend, passwd, appSlug, envSlug)
- }
- }
-}
-
-// ProxyRemoteConn proxies a frontend to the remote database pointed at by appSlug and envSlug.
-// The passwd is what we expect the frontend to provide to authenticate the connection.
-func ProxyRemoteConn(ctx context.Context, rc remote.RemoteClient, frontend net.Conn, passwd, appSlug, envSlug string) {
- defer frontend.Close()
- var proxy pgproxy.Proxy
- data, err := proxy.FrontendAuth(frontend, nil, passwd != "")
- if err != nil {
- log.Printf("sqldb: proxy handshake error: %v", err)
- return
- }
-
- // If we are setting up a real connection (as opposed to issuing a cancel request, which
- // does not use password based auth), make sure the password matches.
- if _, ok := data.Startup.(*pgproxy.StartupMessage); ok && data.Password != passwd {
- writeMsg(frontend, &pgproto3.ErrorResponse{
- Severity: "FATAL",
- Code: "08006",
- Message: "invalid password",
- })
- return
+ conn := &WebsocketLogicalConn{Conn: ws}
+ return conn, nil
+ },
}
- ctx = metadata.AppendToOutgoingContext(ctx, "appSlug", appSlug, "envSlug", envSlug)
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- stream, err := rc.DBConnect(ctx)
- if err != nil {
- log.Printf("sqldb: proxy: could not connect to remote db: %v", err)
- return
- }
-
- sw := dbConnectWriter{stream: stream}
- sr := dbConnectReader{stream: stream}
- backend := &struct {
- dbConnectWriter
- dbConnectReader
- }{sw, sr}
-
- data.Username = "encore"
- data.Password = ""
- if _, err := proxy.BackendAuth(backend, nil, data); err != nil {
- log.Printf("sqldb: proxy: could not connect to remote db: %v", err)
- writeMsg(frontend, &pgproto3.ErrorResponse{
- Severity: "FATAL",
- Code: "08006",
- Message: "could not connect to remote db: " + err.Error(),
- })
- return
- }
-
- proxy.Data(ctx)
+ return proxy.Serve(ctx, ln)
}
-type dbConnectWriter struct {
- stream remote.Remote_DBConnectClient
+type WebsocketLogicalConn struct {
+ *websocket.Conn
+ buf []byte
}
-func (w *dbConnectWriter) Write(p []byte) (int, error) {
- err := w.stream.Send(&remote.Data{Data: p})
+var _ pgproxy.LogicalConn = (*WebsocketLogicalConn)(nil)
+
+func (c *WebsocketLogicalConn) Write(p []byte) (int, error) {
+ err := c.Conn.WriteMessage(websocket.BinaryMessage, p)
if err != nil {
return 0, err
}
return len(p), nil
}
-type dbConnectReader struct {
- stream remote.Remote_DBConnectClient
- buf []byte
-}
-
-func (r *dbConnectReader) Read(p []byte) (int, error) {
+func (c *WebsocketLogicalConn) Read(p []byte) (int, error) {
// If we have remaining data from the previous message we received
// from the stream, simply return that.
- if len(r.buf) > 0 {
- n := copy(p, r.buf)
- r.buf = r.buf[n:]
+ if len(c.buf) > 0 {
+ n := copy(p, c.buf)
+ c.buf = c.buf[n:]
return n, nil
}
// No more buffered data, wait for a new message from the stream.
- msg, err := r.stream.Recv()
+ for {
+ typ, data, err := c.Conn.ReadMessage()
+ if err != nil {
+ return 0, err
+ } else if typ != websocket.BinaryMessage {
+ continue
+ }
+
+ // Read as much data as possible directly to the waiting caller.
+ // Anything remaining beyond that gets buffered until the next Read call.
+ n := copy(p, data)
+ c.buf = data[n:]
+ return n, nil
+ }
+}
+
+func (c *WebsocketLogicalConn) Cancel(req *pgproxy.CancelData) error {
+ enc := base64.StdEncoding
+ data, err := req.Raw.Encode(nil)
if err != nil {
- return 0, err
+ return err
}
- // Read as much data as possible directly to the waiting caller.
- // Anything remaining beyond that gets buffered until the next Read call.
- n := copy(p, msg.Data)
- r.buf = msg.Data[n:]
- return n, nil
+ encoded := make([]byte, enc.EncodedLen(len(data)))
+ enc.Encode(encoded, data)
+ log.Info().Msgf("sending cancel request %x", data)
+ return c.Conn.WriteMessage(websocket.TextMessage, encoded)
+}
+
+func (c *WebsocketLogicalConn) SetDeadline(t time.Time) error {
+ _ = c.Conn.SetReadDeadline(t)
+ err := c.Conn.SetWriteDeadline(t)
+ return err
}
diff --git a/cli/daemon/sqldb/utils.go b/cli/daemon/sqldb/utils.go
new file mode 100644
index 0000000000..331f63de87
--- /dev/null
+++ b/cli/daemon/sqldb/utils.go
@@ -0,0 +1,38 @@
+package sqldb
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+
+ meta "encr.dev/proto/encore/parser/meta/v1"
+)
+
+// WaitForConn waits for a successful connection to uri to be established.
+func WaitForConn(ctx context.Context, uri string) error {
+ var err error
+ for i := 0; i < 40; i++ {
+ var conn *pgx.Conn
+ conn, err = pgx.Connect(ctx, uri)
+ if err == nil {
+ err = conn.Ping(ctx)
+ _ = conn.Close(ctx)
+ if err == nil {
+ return nil
+ }
+ } else if ctx.Err() != nil {
+ // We'll never succeed once the context has been canceled.
+ // Give up straight away.
+ break
+ }
+ time.Sleep(250 * time.Millisecond)
+ }
+ return fmt.Errorf("database did not come up: %v", err)
+}
+
+// IsUsed reports whether the application uses SQL databases at all.
+func IsUsed(md *meta.Data) bool {
+ return len(md.SqlDatabases) > 0
+}
diff --git a/cli/daemon/telemetry.go b/cli/daemon/telemetry.go
new file mode 100644
index 0000000000..8b18134a0c
--- /dev/null
+++ b/cli/daemon/telemetry.go
@@ -0,0 +1,15 @@
+package daemon
+
+import (
+ "context"
+
+ "google.golang.org/protobuf/types/known/emptypb"
+
+ "encr.dev/cli/internal/telemetry"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+func (s *Server) Telemetry(ctx context.Context, req *daemonpb.TelemetryConfig) (*emptypb.Empty, error) {
+ telemetry.UpdateConfig(req.AnonId, req.Enabled, req.Debug)
+ return new(emptypb.Empty), nil
+}
diff --git a/cli/daemon/test.go b/cli/daemon/test.go
new file mode 100644
index 0000000000..628cfa6abd
--- /dev/null
+++ b/cli/daemon/test.go
@@ -0,0 +1,152 @@
+package daemon
+
+import (
+ "context"
+ "fmt"
+ "runtime/debug"
+
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog/log"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "encr.dev/cli/daemon/run"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/fns"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// Test runs tests.
+func (s *Server) Test(req *daemonpb.TestRequest, stream daemonpb.Daemon_TestServer) error {
+ ctx := stream.Context()
+ slog := &streamLog{stream: stream, buffered: false}
+ stderr := slog.Stderr(false)
+ sendErr := func(err error) {
+ stderr.Write([]byte(err.Error() + "\n"))
+ streamExit(stream, 1)
+ }
+
+ ctx, tracer, err := s.beginTracing(ctx, req.AppRoot, req.WorkingDir, req.TraceFile)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+ defer tracer.Close()
+
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ ns, err := s.namespaceOrActive(ctx, app, nil /* tests don't support different namespaces */)
+ if err != nil {
+ sendErr(err)
+ return nil
+ }
+
+ secrets := s.sm.Load(app)
+
+ testCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ testResults := make(chan error, 1)
+ go func() {
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ var err error
+ switch recovered := recovered.(type) {
+ case error:
+ err = recovered
+ default:
+ err = fmt.Errorf("%+v", recovered)
+ }
+ stack := debug.Stack()
+ log.Err(err).Msgf("panic during test run:\n%s", stack)
+ testResults <- fmt.Errorf("panic occured within Encore during test run: %v\n%s\n", recovered, stack)
+ }
+ }()
+
+ testEnv := append([]string{"ENCORE_RUNTIME_LOG=error"}, req.Environ...)
+
+ tp := run.TestParams{
+ TestSpecParams: &run.TestSpecParams{
+ App: app,
+ NS: ns,
+ WorkingDir: req.WorkingDir,
+ Environ: testEnv,
+ Args: req.Args,
+ Secrets: secrets,
+ CodegenDebug: req.CodegenDebug,
+ },
+ Stdout: slog.Stdout(false),
+ Stderr: slog.Stderr(false),
+ }
+ testResults <- s.mgr.Test(testCtx, tp)
+ }()
+
+ if err := <-testResults; err != nil {
+ sendErr(err)
+ } else {
+ streamExit(stream, 0)
+ }
+ return nil
+}
+
+// TestSpec runs tests.
+func (s *Server) TestSpec(ctx context.Context, req *daemonpb.TestSpecRequest) (resp *daemonpb.TestSpecResponse, err error) {
+ ctx, tracer, err := s.beginTracing(ctx, req.AppRoot, req.WorkingDir, nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to begin tracing")
+ }
+ defer fns.CloseIgnore(tracer)
+
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to track app")
+ }
+
+ ns, err := s.namespaceOrActive(ctx, app, nil /* tests don't support different namespaces */)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to get namespace")
+ }
+
+ secrets := s.sm.Load(app)
+
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ var panicErr error
+ switch recovered := recovered.(type) {
+ case error:
+ panicErr = recovered
+ default:
+ panicErr = fmt.Errorf("%+v", recovered)
+ }
+ stack := debug.Stack()
+ log.Err(panicErr).Msgf("panic during test run:\n%s", stack)
+ err = fmt.Errorf("panic during test run: %v", panicErr)
+ }
+ }()
+
+ testEnv := append([]string{"ENCORE_RUNTIME_LOG=error"}, req.Environ...)
+
+ spec, err := s.mgr.TestSpec(ctx, run.TestSpecParams{
+ App: app,
+ NS: ns,
+ WorkingDir: req.WorkingDir,
+ Environ: testEnv,
+ Args: req.Args,
+ Secrets: secrets,
+ })
+ if errors.Is(err, builder.ErrNoTests) {
+ return nil, status.Error(codes.NotFound, "no tests defined")
+ } else if err != nil {
+ return nil, err
+ }
+
+ return &daemonpb.TestSpecResponse{
+ Command: spec.Command,
+ Args: spec.Args,
+ Environ: spec.Environ,
+ }, nil
+}
diff --git a/cli/daemon/tracing.go b/cli/daemon/tracing.go
new file mode 100644
index 0000000000..6fd1334a3c
--- /dev/null
+++ b/cli/daemon/tracing.go
@@ -0,0 +1,22 @@
+package daemon
+
+import (
+ "context"
+ "path/filepath"
+
+ "encr.dev/internal/etrace"
+)
+
+func (s *Server) beginTracing(ctx context.Context, appRoot, workingDir string, traceFile *string) (context.Context, *etrace.Tracer, error) {
+ if traceFile == nil {
+ return ctx, nil, nil
+ }
+
+ var dst string
+ if filepath.IsAbs(*traceFile) {
+ dst = *traceFile
+ } else {
+ dst = filepath.Join(appRoot, workingDir, *traceFile)
+ }
+ return etrace.WithFileTracer(ctx, dst)
+}
diff --git a/cli/daemon/userfacing.go b/cli/daemon/userfacing.go
new file mode 100644
index 0000000000..35ca67d6b3
--- /dev/null
+++ b/cli/daemon/userfacing.go
@@ -0,0 +1,78 @@
+package daemon
+
+import (
+ "context"
+ "runtime"
+
+ "github.com/cockroachdb/errors"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/builder"
+ "encr.dev/pkg/builder/builderimpl"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/vcs"
+ daemonpb "encr.dev/proto/encore/daemon"
+)
+
+// GenWrappers generates Encore wrappers.
+func (s *Server) GenWrappers(ctx context.Context, req *daemonpb.GenWrappersRequest) (*daemonpb.GenWrappersResponse, error) {
+ app, err := s.apps.Track(req.AppRoot)
+ if err != nil {
+ return nil, errors.Wrap(err, "resolve app")
+ }
+ if err := s.genUserFacing(ctx, app); err != nil {
+ return nil, err
+ }
+ return &daemonpb.GenWrappersResponse{}, nil
+}
+
+// genUserFacing generates user-facing wrappers.
+func (s *Server) genUserFacing(ctx context.Context, app *apps.Instance) error {
+ expSet, err := app.Experiments(nil)
+ if err != nil {
+ return errors.Wrap(err, "resolve experiments")
+ }
+
+ vcsRevision := vcs.GetRevision(app.Root())
+ buildInfo := builder.BuildInfo{
+ BuildTags: builder.LocalBuildTags,
+ CgoEnabled: true,
+ StaticLink: false,
+ DebugMode: builder.DebugModeDisabled,
+ Environ: nil,
+ GOOS: runtime.GOOS,
+ GOARCH: runtime.GOARCH,
+ KeepOutput: false,
+ Revision: vcsRevision.Revision,
+ UncommittedChanges: vcsRevision.Uncommitted,
+
+ // Use the local JS runtime if this is a development build.
+ UseLocalJSRuntime: version.Channel == version.DevBuild,
+ }
+
+ bld := builderimpl.Resolve(app.Lang(), expSet)
+ defer fns.CloseIgnore(bld)
+
+ parse, err := bld.Parse(ctx, builder.ParseParams{
+ Build: buildInfo,
+ App: app,
+ Experiments: expSet,
+ WorkingDir: ".",
+ ParseTests: false,
+ })
+ if err != nil {
+ return errors.Wrap(err, "parse app")
+ }
+
+ if err := app.CacheMetadata(parse.Meta); err != nil {
+ return errors.Wrap(err, "cache metadata")
+ }
+
+ err = bld.GenUserFacing(ctx, builder.GenUserFacingParams{
+ Build: buildInfo,
+ App: app,
+ Parse: parse,
+ })
+ return errors.Wrap(err, "generate wrappers")
+}
diff --git a/cli/daemon/watch.go b/cli/daemon/watch.go
new file mode 100644
index 0000000000..6f644f5da8
--- /dev/null
+++ b/cli/daemon/watch.go
@@ -0,0 +1,143 @@
+package daemon
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/bep/debounce"
+ "github.com/cockroachdb/errors"
+ "github.com/rs/zerolog/log"
+
+ "encr.dev/cli/daemon/apps"
+ "encr.dev/cli/daemon/run"
+ "encr.dev/pkg/watcher"
+ "encr.dev/pkg/xos"
+)
+
+func (s *Server) watchApps() {
+ if os.Getenv("ENCORE_DAEMON_WATCH") == "0" {
+ return
+ }
+ s.apps.RegisterAppListener(func(i *apps.Instance) {
+ s.regenerateUserCode(context.Background(), i)
+ if err := s.updateGitIgnore(i); err != nil {
+ log.Error().Err(err).Msg("unable to update app gitignore")
+ }
+ })
+ if err := s.apps.WatchAll(s.onWatchEvent); err != nil {
+ log.Error().Err(err).Msg("unable to set up app watchers")
+ } else {
+ log.Info().Msg("successfully set up file watchers")
+ }
+}
+
+func (s *Server) onWatchEvent(i *apps.Instance, events []watcher.Event) {
+ if run.IgnoreEvents(events) {
+ return
+ }
+
+ // Use debounce to avoid calling this on every single change.
+ s.appDebounceMu.Lock()
+ deb := s.appDebouncers[i]
+ if deb == nil {
+ deb = ®enerateCodeDebouncer{
+ debounce: debounce.New(100 * time.Millisecond),
+ doRun: func() { s.regenerateUserCode(context.Background(), i) },
+ }
+ s.appDebouncers[i] = deb
+ }
+ s.appDebounceMu.Unlock()
+
+ deb.ChangeEvent()
+}
+
+type regenerateCodeDebouncer struct {
+ debounce func(func())
+ mu sync.Mutex
+ running bool
+ runAfter bool
+
+ doRun func()
+}
+
+func (g *regenerateCodeDebouncer) ChangeEvent() {
+ g.debounce(func() {
+ g.mu.Lock()
+
+ // If we're already running, mark to run again when complete.
+ if g.running {
+ g.runAfter = true
+ g.mu.Unlock()
+ return
+ }
+
+ // Otherwise, keep re-running for as long as change events come in.
+ g.running = true
+ g.runAfter = true // to start us off, at least once.
+ for g.runAfter {
+ g.runAfter = false // reset for next time
+ g.mu.Unlock()
+ g.doRun() // actually run
+ g.mu.Lock()
+ }
+
+ // If we get here g.runAfter nobody requested another run, so we can stop.
+ g.running = false
+
+ g.mu.Unlock()
+ })
+}
+
+func (s *Server) regenerateUserCode(ctx context.Context, app *apps.Instance) {
+ if err := s.genUserFacing(ctx, app); err != nil {
+ log.Error().Err(err).Str("app", app.PlatformOrLocalID()).Msg("failed to regenerate app")
+ } else {
+ log.Info().Str("app", app.PlatformOrLocalID()).Msg("successfully generated user code")
+ }
+}
+
+// updateGitIgnore updates the gitignore file to include Encore directives, if needed.
+func (s *Server) updateGitIgnore(i *apps.Instance) error {
+ dst := filepath.Join(i.Root(), ".gitignore")
+ data, err := os.ReadFile(dst)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return errors.Wrap(err, "read .gitignore")
+ }
+
+ // Find which directives are already present
+ directives := []string{"encore.gen.go", "encore.gen.cue", "/.encore", "/encore.gen"}
+ found := make([]bool, len(directives))
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ ln := scanner.Text()
+ for i, directive := range directives {
+ if ln == directive {
+ found[i] = true
+ }
+ }
+ }
+
+ // Add the ones that are missing
+ updated := false
+ for i, directive := range directives {
+ if !found[i] {
+ if len(data) > 0 && !bytes.HasSuffix(data, []byte("\n")) {
+ data = append(data, '\n')
+ }
+ data = append(data, directive+"\n"...)
+ updated = true
+ }
+ }
+
+ // Write the file back if there were any changes
+ if updated {
+ return xos.WriteFile(dst, data, 0644)
+ }
+ return nil
+}
diff --git a/cli/internal/browser/browser.go b/cli/internal/browser/browser.go
index 576e936e67..2091a690c8 100644
--- a/cli/internal/browser/browser.go
+++ b/cli/internal/browser/browser.go
@@ -42,6 +42,17 @@ func Commands() [][]string {
return cmds
}
+// CanOpen reports whether it's likely that Open will succeed.
+func CanOpen() bool {
+ cmds := Commands()
+ for _, cmd := range cmds {
+ if _, err := exec.LookPath(cmd[0]); err == nil {
+ return true
+ }
+ }
+ return false
+}
+
// Open tries to open url in a browser and reports whether it succeeded.
func Open(url string) bool {
for _, args := range Commands() {
diff --git a/cli/internal/bubbles/checklist/checklist.go b/cli/internal/bubbles/checklist/checklist.go
new file mode 100644
index 0000000000..6035a7816b
--- /dev/null
+++ b/cli/internal/bubbles/checklist/checklist.go
@@ -0,0 +1,288 @@
+package checklist
+
+import (
+ "strconv"
+ "strings"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/mattn/go-runewidth"
+)
+
+type Item interface {
+ Render(selected, checked bool) string
+}
+
+type Model[I Item] struct {
+ Data []I
+ PerPage int
+ Initial []bool
+
+ // init indicates whether the data model has completed initialization
+ init bool
+
+ checked []bool // len(checked) == len(pageData)
+
+ // index global real time index
+ index int
+ // maxIndex global max index
+ maxIndex int
+ // pageIndex real time index of current page
+ pageIndex int
+ // pageMaxIndex current page max index
+ pageMaxIndex int
+
+ // pageData data set rendered in real time on the current page
+ pageData []I
+}
+
+func (m Model[I]) Selected() (I, bool) {
+ idx := m.pageIndex
+ if idx >= 0 && idx < len(m.pageData) {
+ return m.pageData[idx], true
+ }
+ var zero I
+ return zero, false
+}
+
+func (m Model[I]) Checked() []I {
+ indices := m.CheckedIndices()
+ items := make([]I, len(indices))
+ for i, idx := range indices {
+ items[i] = m.pageData[idx]
+ }
+ return items
+}
+
+func (m Model[I]) CheckedIndices() []int {
+ var indices []int
+ for i, checked := range m.checked {
+ if checked {
+ indices = append(indices, i)
+ }
+ }
+ return indices
+}
+
+func (m Model[I]) View() string {
+ var out strings.Builder
+ cursor := "»" // TODO color etc
+ for i, obj := range m.pageData {
+ selected := i == m.pageIndex
+ checked := m.checked[i]
+ if selected {
+ out.WriteString(cursor)
+ out.WriteString(" ")
+ } else {
+ out.WriteString(strings.Repeat(" ", runewidth.StringWidth(cursor)+1))
+ }
+
+ if checked {
+ out.WriteString("[x] ")
+ } else {
+ out.WriteString("[ ] ")
+ }
+
+ out.WriteString(obj.Render(selected, checked))
+ out.WriteString("\n")
+ }
+
+ return out.String()
+}
+
+// Update method responds to various events and modifies the data model
+// according to the corresponding events
+func (m Model[I]) Update(msg tea.Msg) (Model[I], tea.Cmd) {
+ if !m.init {
+ m.initData()
+ return m, nil
+ }
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch strings.ToLower(msg.String()) {
+ case "down":
+ m.moveDown()
+ case "up":
+ m.moveUp()
+ case "right", "pgdown", "l", "k":
+ m.nextPage()
+ case "left", "pgup", "h", "j":
+ m.prePage()
+ case "1", "2", "3", "4", "5", "6", "7", "8", "9":
+ num, _ := strconv.Atoi(msg.String())
+ idx := num - 1
+ m.forward(idx)
+
+ case "x", " ":
+ m.toggle()
+ }
+ }
+ return m, nil
+}
+
+func (m *Model[I]) toggle() {
+ idx := m.pageIndex
+ if idx >= 0 && idx < len(m.pageData) {
+ m.checked[idx] = !m.checked[idx]
+ }
+}
+
+// moveDown executes the downward movement of the cursor,
+// while adjusting the internal index and refreshing the data area
+func (m *Model[I]) moveDown() {
+ // the page index has not reached the maximum value, and the page
+ // data area does not need to be updated
+ if m.pageIndex < m.pageMaxIndex {
+ m.pageIndex++
+ // check whether the global index reaches the maximum value before sliding
+ if m.index < m.maxIndex {
+ m.index++
+ }
+ return
+ }
+
+ // the page index reaches the maximum value, slide the page data area window,
+ // the page index maintains the maximum value
+ if m.pageIndex == m.pageMaxIndex {
+ // check whether the global index reaches the maximum value before sliding
+ if m.index < m.maxIndex {
+ // global index increment
+ m.index++
+ // window slide down one data
+ m.pageData = m.Data[m.index+1-m.PerPage : m.index+1]
+ return
+ }
+ }
+}
+
+// moveUp performs an upward movement of the cursor,
+// while adjusting the internal index and refreshing the data area
+func (m *Model[I]) moveUp() {
+ // the page index has not reached the minimum value, and the page
+ // data area does not need to be updated
+ if m.pageIndex > 0 {
+ m.pageIndex--
+ // check whether the global index reaches the minimum before sliding
+ if m.index > 0 {
+ m.index--
+ }
+ return
+ }
+
+ // the page index reaches the minimum value, slide the page data window,
+ // and the page index maintains the minimum value
+ if m.pageIndex == 0 {
+ // check whether the global index reaches the minimum before sliding
+ if m.index > 0 {
+ // window slide up one data
+ m.pageData = m.Data[m.index-1 : m.index-1+m.PerPage]
+ // global index decrement
+ m.index--
+ return
+ }
+ }
+}
+
+// nextPage triggers the page-down action, and does not change
+// the real-time page index(pageIndex)
+func (m *Model[I]) nextPage() {
+ // Get the start and end position of the page data area slice: m.Data[start:end]
+ //
+ // note: the slice is closed left and opened right: `[start,end)`
+ // assuming that the global data area has unlimited length,
+ // end should always be the actual page `length+1`,
+ // the maximum value of end should be equal to `len(m.Data)`
+ // under limited length
+ pageStart, pageEnd := m.pageIndexInfo()
+ // there are two cases when `end` does not reach the maximum value
+ if pageEnd < len(m.Data) {
+ // the `end` value is at least one page length away from the global maximum index
+ if len(m.Data)-pageEnd >= m.PerPage {
+ // slide back one page in the page data area
+ m.pageData = m.Data[pageStart+m.PerPage : pageEnd+m.PerPage]
+ // Global real-time index increases by one page length
+ m.index += m.PerPage
+ } else { // `end` is less than a page length from the global maximum index
+ // slide the page data area directly to the end
+ m.pageData = m.Data[len(m.Data)-m.PerPage : len(m.Data)]
+ // `sliding distance` = `position after sliding` - `position before sliding`
+ // the global real-time index should also synchronize the same sliding distance
+ m.index += len(m.Data) - pageEnd
+ }
+ }
+}
+
+// prePage triggers the page-up action, and does not change
+// the real-time page index(pageIndex)
+func (m *Model[I]) prePage() {
+ // Get the start and end position of the page data area slice: m.Data[start:end]
+ //
+ // note: the slice is closed left and opened right: `[start,end)`
+ // assuming that the global data area has unlimited length,
+ // end should always be the actual page `length+1`,
+ // the maximum value of end should be equal to `len(m.Data)`
+ // under limited length
+ pageStart, pageEnd := m.pageIndexInfo()
+ // there are two cases when `start` does not reach the minimum value
+ if pageStart > 0 {
+ // `start` is at least one page length from the minimum
+ if pageStart >= m.PerPage {
+ // slide the page data area forward one page
+ m.pageData = m.Data[pageStart-m.PerPage : pageEnd-m.PerPage]
+ // Global real-time index reduces the length of one page
+ m.index -= m.PerPage
+ } else { // `start` to the minimum value less than one page length
+ // slide the page data area directly to the start
+ m.pageData = m.Data[:m.PerPage]
+ // `sliding distance` = `position before sliding` - `minimum value(0)`
+ // the global real-time index should also synchronize the same sliding distance
+ m.index -= pageStart - 0
+ }
+ }
+}
+
+// forward triggers a fast jump action, if the pageIndex
+// is invalid, keep it as it is
+func (m *Model[I]) forward(pageIndex int) {
+ // pageIndex has exceeded the maximum index of the page, ignore
+ if pageIndex > m.pageMaxIndex {
+ return
+ }
+
+ // calculate the distance moved to pageIndex
+ l := pageIndex - m.pageIndex
+ // update the global real time index
+ m.index += l
+ // update the page real time index
+ m.pageIndex = pageIndex
+
+}
+
+// initData initialize the data model, set the default value and
+// fix the wrong parameter settings during initialization
+func (m *Model[I]) initData() {
+ if m.PerPage > len(m.Data) || m.PerPage < 1 {
+ m.PerPage = len(m.Data)
+ m.pageData = m.Data
+ } else {
+ m.pageData = m.Data[:m.PerPage]
+ }
+
+ m.pageIndex = 0
+ m.pageMaxIndex = m.PerPage - 1
+ m.index = 0
+ m.maxIndex = len(m.Data) - 1
+ m.checked = make([]bool, len(m.Data))
+ copy(m.checked, m.Initial)
+ m.init = true
+}
+
+// pageIndexInfo return the start and end positions of the slice of the
+// page data area corresponding to the global data area
+func (m *Model[I]) pageIndexInfo() (start, end int) {
+ // `Global real-time index` - `page real-time index` = `start index of page data area`
+ start = m.index - m.pageIndex
+ // `Page data area start index` + `single page size` = `page data area end index`
+ end = start + m.PerPage
+ return
+}
diff --git a/cli/internal/bubbles/selector/selector.go b/cli/internal/bubbles/selector/selector.go
new file mode 100644
index 0000000000..e7856071c8
--- /dev/null
+++ b/cli/internal/bubbles/selector/selector.go
@@ -0,0 +1,263 @@
+package selector
+
+import (
+ "strconv"
+ "strings"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/mattn/go-runewidth"
+)
+
+type Item interface {
+ Render(selected bool) string
+}
+
+func New[I Item](data []I, perPage int) Model[I] {
+ m := Model[I]{data: data, perPage: perPage}
+ m.initData()
+ return m
+}
+
+type Model[I Item] struct {
+ data []I
+ pageData []I
+ perPage int
+
+ focused bool
+
+ // init indicates whether the data model has completed initialization
+ init bool
+ // index global real time index
+ index int
+ // maxIndex global max index
+ maxIndex int
+ // pageIndex real time index of current page
+ pageIndex int
+ // pageMaxIndex current page max index
+ pageMaxIndex int
+}
+
+func (m Model[I]) Selected() (I, bool) {
+ idx := m.index
+ if idx >= 0 && idx < len(m.data) {
+ return m.data[idx], true
+ }
+ var zero I
+ return zero, false
+}
+
+func (m Model[I]) View() string {
+ var out strings.Builder
+ cursor := "»" // TODO color etc
+ for i, obj := range m.pageData {
+ selected := i == m.pageIndex
+ if selected {
+ out.WriteString(cursor)
+ out.WriteString(" ")
+ } else {
+ out.WriteString(strings.Repeat(" ", runewidth.StringWidth(cursor)+1))
+ }
+ out.WriteString(obj.Render(selected))
+ out.WriteString("\n")
+ }
+
+ return out.String()
+}
+
+func (m *Model[I]) Focus() tea.Cmd {
+ m.focused = true
+ return nil
+}
+
+func (m *Model[I]) Blur() tea.Cmd {
+ m.focused = false
+ return nil
+}
+
+// Update method responds to various events and modifies the data model
+// according to the corresponding events
+func (m Model[I]) Update(msg tea.Msg) (Model[I], tea.Cmd) {
+ if !m.init {
+ m.initData()
+ return m, nil
+ } else if !m.focused {
+ return m, nil
+ }
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ switch strings.ToLower(msg.String()) {
+ case "down":
+ m.moveDown()
+ case "up":
+ m.moveUp()
+ case "right", "pgdown", "l", "k":
+ m.nextPage()
+ case "left", "pgup", "h", "j":
+ m.prePage()
+ case "1", "2", "3", "4", "5", "6", "7", "8", "9":
+ num, _ := strconv.Atoi(msg.String())
+ idx := num - 1
+ m.forward(idx)
+ }
+ }
+ return m, nil
+}
+
+// moveDown executes the downward movement of the cursor,
+// while adjusting the internal index and refreshing the data area
+func (m *Model[I]) moveDown() {
+ // the page index has not reached the maximum value, and the page
+ // data area does not need to be updated
+ if m.pageIndex < m.pageMaxIndex {
+ m.pageIndex++
+ // check whether the global index reaches the maximum value before sliding
+ if m.index < m.maxIndex {
+ m.index++
+ }
+ return
+ }
+
+ // the page index reaches the maximum value, slide the page data area window,
+ // the page index maintains the maximum value
+ if m.pageIndex == m.pageMaxIndex {
+ // check whether the global index reaches the maximum value before sliding
+ if m.index < m.maxIndex {
+ // global index increment
+ m.index++
+ // window slide down one data
+ m.pageData = m.data[m.index+1-m.perPage : m.index+1]
+ return
+ }
+ }
+}
+
+// moveUp performs an upward movement of the cursor,
+// while adjusting the internal index and refreshing the data area
+func (m *Model[I]) moveUp() {
+ // the page index has not reached the minimum value, and the page
+ // data area does not need to be updated
+ if m.pageIndex > 0 {
+ m.pageIndex--
+ // check whether the global index reaches the minimum before sliding
+ if m.index > 0 {
+ m.index--
+ }
+ return
+ }
+
+ // the page index reaches the minimum value, slide the page data window,
+ // and the page index maintains the minimum value
+ if m.pageIndex == 0 {
+ // check whether the global index reaches the minimum before sliding
+ if m.index > 0 {
+ // window slide up one data
+ m.pageData = m.data[m.index-1 : m.index-1+m.perPage]
+ // global index decrement
+ m.index--
+ return
+ }
+ }
+}
+
+// nextPage triggers the page-down action, and does not change
+// the real-time page index(pageIndex)
+func (m *Model[I]) nextPage() {
+ // Get the start and end position of the page data area slice: m.Data[start:end]
+ //
+ // note: the slice is closed left and opened right: `[start,end)`
+ // assuming that the global data area has unlimited length,
+ // end should always be the actual page `length+1`,
+ // the maximum value of end should be equal to `len(m.Data)`
+ // under limited length
+ pageStart, pageEnd := m.pageIndexInfo()
+ // there are two cases when `end` does not reach the maximum value
+ if pageEnd < len(m.data) {
+ // the `end` value is at least one page length away from the global maximum index
+ if len(m.data)-pageEnd >= m.perPage {
+ // slide back one page in the page data area
+ m.pageData = m.data[pageStart+m.perPage : pageEnd+m.perPage]
+ // Global real-time index increases by one page length
+ m.index += m.perPage
+ } else { // `end` is less than a page length from the global maximum index
+ // slide the page data area directly to the end
+ m.pageData = m.data[len(m.data)-m.perPage : len(m.data)]
+ // `sliding distance` = `position after sliding` - `position before sliding`
+ // the global real-time index should also synchronize the same sliding distance
+ m.index += len(m.data) - pageEnd
+ }
+ }
+}
+
+// prePage triggers the page-up action, and does not change
+// the real-time page index(pageIndex)
+func (m *Model[I]) prePage() {
+ // Get the start and end position of the page data area slice: m.Data[start:end]
+ //
+ // note: the slice is closed left and opened right: `[start,end)`
+ // assuming that the global data area has unlimited length,
+ // end should always be the actual page `length+1`,
+ // the maximum value of end should be equal to `len(m.Data)`
+ // under limited length
+ pageStart, pageEnd := m.pageIndexInfo()
+ // there are two cases when `start` does not reach the minimum value
+ if pageStart > 0 {
+ // `start` is at least one page length from the minimum
+ if pageStart >= m.perPage {
+ // slide the page data area forward one page
+ m.pageData = m.data[pageStart-m.perPage : pageEnd-m.perPage]
+ // Global real-time index reduces the length of one page
+ m.index -= m.perPage
+ } else { // `start` to the minimum value less than one page length
+ // slide the page data area directly to the start
+ m.pageData = m.data[:m.perPage]
+ // `sliding distance` = `position before sliding` - `minimum value(0)`
+ // the global real-time index should also synchronize the same sliding distance
+ m.index -= pageStart - 0
+ }
+ }
+}
+
+// forward triggers a fast jump action, if the pageIndex
+// is invalid, keep it as it is
+func (m *Model[I]) forward(pageIndex int) {
+ // pageIndex has exceeded the maximum index of the page, ignore
+ if pageIndex > m.pageMaxIndex {
+ return
+ }
+
+ // calculate the distance moved to pageIndex
+ l := pageIndex - m.pageIndex
+ // update the global real time index
+ m.index += l
+ // update the page real time index
+ m.pageIndex = pageIndex
+
+}
+
+// initData initialize the data model, set the default value and
+// fix the wrong parameter settings during initialization
+func (m *Model[I]) initData() {
+ if m.perPage > len(m.data) || m.perPage < 1 {
+ m.perPage = len(m.data)
+ m.pageData = m.data
+ } else {
+ m.pageData = m.data[:m.perPage]
+ }
+
+ m.pageIndex = 0
+ m.pageMaxIndex = m.perPage - 1
+ m.index = 0
+ m.maxIndex = len(m.data) - 1
+ m.init = true
+}
+
+// pageIndexInfo return the start and end positions of the slice of the
+// page data area corresponding to the global data area
+func (m *Model[I]) pageIndexInfo() (start, end int) {
+ // `Global real-time index` - `page real-time index` = `start index of page data area`
+ start = m.index - m.pageIndex
+ // `Page data area start index` + `single page size` = `page data area end index`
+ end = start + m.perPage
+ return
+}
diff --git a/cli/internal/codegen/client.go b/cli/internal/codegen/client.go
deleted file mode 100644
index ece60a520e..0000000000
--- a/cli/internal/codegen/client.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Package codegen generates code for use with Encore apps.
-package codegen
-
-import (
- "bytes"
- "errors"
- "fmt"
- "path/filepath"
- "strings"
-
- meta "encr.dev/proto/encore/parser/meta/v1"
-)
-
-// Lang represents a programming language or dialect that we support generating code for.
-type Lang string
-
-// These constants represent supported languages.
-const (
- Unknown Lang = ""
- TypeScript Lang = "typescript"
-)
-
-type generator interface {
- Generate(buf *bytes.Buffer, appSlug string, md *meta.Data) error
-}
-
-// ErrUnknownLang is reported by Generate when the language is not known.
-var ErrUnknownLang = errors.New("unknown language")
-
-// Detect attempts to detect the language from the given filename.
-func Detect(path string) (lang Lang, ok bool) {
- suffix := strings.ToLower(filepath.Ext(path))
- switch suffix {
- case ".ts":
- return TypeScript, true
- default:
- return Unknown, false
- }
-}
-
-// Client generates an API client based on the given app metadata.
-func Client(lang Lang, appSlug string, md *meta.Data) (code []byte, err error) {
- defer func() {
- if e := recover(); e != nil {
- err = fmt.Errorf("codegen.Client %s %s panicked: %v", lang, appSlug, e)
- }
- }()
-
- var gen generator
- switch lang {
- case TypeScript:
- gen = &ts{}
- default:
- return nil, ErrUnknownLang
- }
-
- var buf bytes.Buffer
- if err := gen.Generate(&buf, appSlug, md); err != nil {
- return nil, fmt.Errorf("genclient.Generate %s %s: %v", lang, appSlug, err)
- }
- return buf.Bytes(), nil
-}
diff --git a/cli/internal/codegen/ts.go b/cli/internal/codegen/ts.go
deleted file mode 100644
index 8ed5ea8995..0000000000
--- a/cli/internal/codegen/ts.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package codegen
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "sort"
- "strings"
-
- meta "encr.dev/proto/encore/parser/meta/v1"
- schema "encr.dev/proto/encore/parser/schema/v1"
-)
-
-/* The TypeScript generator generates code that looks like this:
-export namespace task {
- export interface AddParams {
- description: string
- }
-
- export class ServiceClient {
- public Add(params: task_AddParams): Promise {
- // ...
- }
- }
-}
-
-*/
-
-type ts struct {
- *bytes.Buffer
- md *meta.Data
- appSlug string
- typs *typeRegistry
- currDecl *schema.Decl
-}
-
-func (ts *ts) Generate(buf *bytes.Buffer, appSlug string, md *meta.Data) (err error) {
- defer ts.handleBailout(&err)
-
- ts.Buffer = buf
- ts.md = md
- ts.appSlug = appSlug
- ts.typs = getNamedTypes(md)
-
- nss := ts.typs.Namespaces()
- seenNs := make(map[string]bool)
- ts.writeClient()
- for _, svc := range md.Svcs {
- ts.writeService(svc)
- seenNs[svc.Name] = true
- }
- for _, ns := range nss {
- if !seenNs[ns] {
- ts.writeNamespace(ns)
- }
- }
- ts.writeBaseClient()
-
- return nil
-}
-
-func (ts *ts) hasPublicRPC(svc *meta.Service) bool {
- for _, rpc := range svc.Rpcs {
- if rpc.AccessType != meta.RPC_PRIVATE {
- return true
- }
- }
- return false
-}
-
-func (ts *ts) writeService(svc *meta.Service) {
- // Determine if we have anything worth exposing.
- // Either a public RPC or a named type.
- publicRPC := ts.hasPublicRPC(svc)
- decls := ts.typs.Decls(svc.Name)
- if !publicRPC && len(decls) == 0 {
- return
- }
-
- ns := svc.Name
- fmt.Fprintf(ts, "export namespace %s {\n", ns)
-
- sort.Slice(decls, func(i, j int) bool {
- return decls[i].Name < decls[j].Name
- })
- for i, d := range decls {
- if i > 0 {
- ts.WriteString("\n")
- }
- ts.writeDeclDef(ns, d)
- }
-
- if !publicRPC {
- ts.WriteString("}\n\n")
- return
- }
- ts.WriteString("\n")
-
- numIndent := 1
- indent := func() {
- ts.WriteString(strings.Repeat(" ", numIndent))
- }
-
- indent()
- fmt.Fprint(ts, "export class ServiceClient {\n")
- numIndent++
-
- // Constructor
- indent()
- ts.WriteString("private baseClient: BaseClient\n\n")
- indent()
- ts.WriteString("constructor(baseClient: BaseClient) {\n")
- numIndent++
- indent()
- ts.WriteString("this.baseClient = baseClient\n")
- numIndent--
- indent()
- ts.WriteString("}\n")
-
- // RPCs
- for _, rpc := range svc.Rpcs {
- if rpc.AccessType == meta.RPC_PRIVATE {
- continue
- }
-
- ts.WriteByte('\n')
-
- // Doc string
- if rpc.Doc != "" {
- scanner := bufio.NewScanner(strings.NewReader(rpc.Doc))
- indent()
- ts.WriteString("/**\n")
- for scanner.Scan() {
- indent()
- ts.WriteString(" * ")
- ts.WriteString(scanner.Text())
- ts.WriteByte('\n')
- }
- indent()
- ts.WriteString(" */\n")
- }
-
- // Signature
- indent()
- fmt.Fprintf(ts, "public %s(", rpc.Name)
- if rpc.RequestSchema != nil {
- ts.WriteString("params: ")
- ts.writeDecl(ns, rpc.RequestSchema)
- }
-
- ts.WriteString("): Promise<")
- if rpc.ResponseSchema != nil {
- ts.writeDecl(ns, rpc.ResponseSchema)
- } else {
- ts.WriteString("void")
- }
- ts.WriteString("> {\n")
-
- // Body
- numIndent++
- indent()
- if rpc.ResponseSchema == nil {
- fmt.Fprintf(ts, `return this.baseClient.doVoid("%s.%s"`, svc.Name, rpc.Name)
- } else {
- ts.WriteString("return this.baseClient.do<")
- ts.writeDecl(svc.Name, rpc.ResponseSchema)
- fmt.Fprintf(ts, `>("%s.%s"`, svc.Name, rpc.Name)
- }
- if rpc.RequestSchema != nil {
- ts.WriteString(", params")
- }
- ts.WriteString(")\n")
- numIndent--
- indent()
- ts.WriteString("}\n")
- }
- numIndent--
- indent()
- ts.WriteString("}\n}\n\n")
-}
-
-func (ts *ts) writeNamespace(ns string) {
- decls := ts.typs.Decls(ns)
- if len(decls) == 0 {
- return
- }
-
- fmt.Fprintf(ts, "export namespace %s {\n", ns)
- sort.Slice(decls, func(i, j int) bool {
- return decls[i].Name < decls[j].Name
- })
- for i, d := range decls {
- if i > 0 {
- ts.WriteString("\n")
- }
- ts.writeDeclDef(ns, d)
- }
- ts.WriteString("}\n\n")
-}
-
-func (ts *ts) writeDeclDef(ns string, decl *schema.Decl) {
- if decl.Doc != "" {
- scanner := bufio.NewScanner(strings.NewReader(decl.Doc))
- ts.WriteString(" /**\n")
- for scanner.Scan() {
- ts.WriteString(" * ")
- ts.WriteString(scanner.Text())
- ts.WriteByte('\n')
- }
- ts.WriteString(" */\n")
- }
-
- // If it's a struct type, expose it as an interface;
- // other types should be type aliases.
- if st := decl.Type.GetStruct(); st != nil {
- fmt.Fprintf(ts, " export interface %s ", decl.Name)
- } else {
- fmt.Fprintf(ts, " export type %s = ", decl.Name)
- }
- ts.currDecl = decl
- ts.writeTyp(ns, decl.Type, 1)
- ts.WriteString("\n")
-}
-
-func (ts *ts) writeClient() {
- ts.WriteString("export default class Client {\n")
-
- numIndent := 1
- indent := func() {
- ts.WriteString(strings.Repeat(" ", numIndent))
- }
-
- for _, svc := range ts.md.Svcs {
- if ts.hasPublicRPC(svc) {
- indent()
- fmt.Fprintf(ts, "%s: %s.ServiceClient\n", svc.Name, svc.Name)
- }
- }
- ts.WriteByte('\n')
-
- indent()
- ts.WriteString("constructor(environment: string = \"production\", token?: string) {\n")
- numIndent++
-
- indent()
- ts.WriteString("const base = new BaseClient(environment, token)\n")
- for _, svc := range ts.md.Svcs {
- if ts.hasPublicRPC(svc) {
- indent()
- fmt.Fprintf(ts, "this.%s = new %s.ServiceClient(base)\n", svc.Name, svc.Name)
- }
- }
-
- numIndent--
- indent()
- fmt.Fprint(ts, "}\n}\n\n")
-}
-
-func (ts *ts) writeBaseClient() {
- ts.WriteString(`class BaseClient {
- baseURL: string
- headers: {[key: string]: string}
-
- constructor(environment: string, token?: string) {
- this.headers = {"Content-Type": "application/json"}
- if (token !== undefined) {
- this.headers["Authorization"] = "Bearer " + token
- }
- if (environment === "local") {
- this.baseURL = "http://localhost:4060/"
- } else {
- this.baseURL = ` + "`https://" + ts.appSlug + ".encoreapi.com/${environment}/`" + `
- }
- }
-
- public async do(endpoint: string, req?: any): Promise {
- let response = await fetch(this.baseURL + endpoint, {
- method: "POST",
- headers: this.headers,
- body: JSON.stringify(req || {})
- })
- if (!response.ok) {
- let body = await response.text()
- throw new Error("request failed: " + body)
- }
- return (await response.json())
- }
-
- public async doVoid(endpoint: string, req?: any): Promise {
- let response = await fetch(this.baseURL + endpoint, {
- method: "POST",
- headers: this.headers,
- body: JSON.stringify(req || {})
- })
- if (!response.ok) {
- let body = await response.text()
- throw new Error("request failed: " + body)
- }
- await response.text()
- }
-}
-`)
-}
-
-func (ts *ts) writeDecl(ns string, decl *schema.Decl) {
- if decl.Loc.PkgName != ns {
- ts.WriteString(decl.Loc.PkgName + ".")
- }
- ts.WriteString(decl.Name)
-}
-
-func (ts *ts) writeTyp(ns string, typ *schema.Type, numIndents int) {
- switch typ := typ.Typ.(type) {
- case *schema.Type_Named:
- decl := ts.md.Decls[typ.Named.Id]
- ts.writeDecl(ns, decl)
- case *schema.Type_List:
- elem := typ.List.Elem
- ts.writeTyp(ns, elem, numIndents)
- ts.WriteString("[]")
-
- case *schema.Type_Map:
- ts.WriteString("{ [key: ")
- ts.writeTyp(ns, typ.Map.Key, numIndents)
- ts.WriteString("]: ")
- ts.writeTyp(ns, typ.Map.Value, numIndents)
- ts.WriteString(" }")
-
- case *schema.Type_Builtin:
- t := ""
- switch typ.Builtin {
- case schema.Builtin_ANY:
- t = "any"
- case schema.Builtin_BOOL:
- t = "boolean"
- case schema.Builtin_INT8:
- t = "number"
- case schema.Builtin_INT16:
- t = "number"
- case schema.Builtin_INT32:
- t = "number"
- case schema.Builtin_INT64:
- t = "number"
- case schema.Builtin_UINT8:
- t = "number"
- case schema.Builtin_UINT16:
- t = "number"
- case schema.Builtin_UINT32:
- t = "number"
- case schema.Builtin_UINT64:
- t = "number"
- case schema.Builtin_FLOAT32:
- t = "number"
- case schema.Builtin_FLOAT64:
- t = "number"
- case schema.Builtin_STRING:
- t = "string"
- case schema.Builtin_BYTES:
- t = "string" // TODO
- case schema.Builtin_TIME:
- t = "string" // TODO
- case schema.Builtin_JSON:
- t = "object"
- case schema.Builtin_UUID:
- t = "string"
- case schema.Builtin_USER_ID:
- t = "string"
- default:
- ts.errorf("unknown builtin type %v", typ.Builtin)
- }
- ts.WriteString(t)
-
- case *schema.Type_Struct:
- indent := func() {
- ts.WriteString(strings.Repeat(" ", numIndents+1))
- }
- ts.WriteString("{\n")
- for i, field := range typ.Struct.Fields {
- if field.Doc != "" {
- scanner := bufio.NewScanner(strings.NewReader(field.Doc))
- indent()
- ts.WriteString("/**\n")
- for scanner.Scan() {
- indent()
- ts.WriteString(" * ")
- ts.WriteString(scanner.Text())
- ts.WriteByte('\n')
- }
- indent()
- ts.WriteString(" */\n")
- }
-
- indent()
- name := field.Name
- if js := field.JsonName; js != "" {
- name = js
- }
- ts.WriteString(name)
-
- // Treat recursively seen types as if they are optional
- recursiveType := false
- if n := field.Typ.GetNamed(); n != nil {
- recursiveType = ts.typs.IsRecursiveRef(ts.currDecl.Id, n.Id)
- }
- if field.Optional || recursiveType {
- ts.WriteString("?")
- }
- ts.WriteString(": ")
- ts.writeTyp(ns, field.Typ, numIndents+1)
- ts.WriteString("\n")
-
- // Add another empty line if we have a doc comment
- // and this was not the last field.
- if field.Doc != "" && i < len(typ.Struct.Fields)-1 {
- ts.WriteByte('\n')
- }
- }
- ts.WriteString(strings.Repeat(" ", numIndents))
- ts.WriteByte('}')
- }
-}
-
-type bailout struct{ err error }
-
-func (ts *ts) errorf(format string, args ...interface{}) {
- panic(bailout{fmt.Errorf(format, args...)})
-}
-
-func (ts *ts) handleBailout(dst *error) {
- if err := recover(); err != nil {
- if bail, ok := err.(bailout); ok {
- *dst = bail.err
- } else {
- panic(err)
- }
- }
-}
diff --git a/cli/internal/codegen/ts_test.go b/cli/internal/codegen/ts_test.go
deleted file mode 100644
index 2afae608ab..0000000000
--- a/cli/internal/codegen/ts_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package codegen
-
-import (
- "strings"
- "testing"
-
- "encr.dev/parser"
- qt "github.com/frankban/quicktest"
- "github.com/rogpeppe/go-internal/txtar"
-)
-
-func TestTypeScript(t *testing.T) {
- c := qt.New(t)
-
- const code = `
--- go.mod --
-module app
-
--- encore.app --
-{"id": ""}
-
--- svc/svc.go --
-package svc
-
-type Request struct {
- Foo Foo
-}
-
-type Foo int
-
--- svc/api.go --
-package svc
-
-import "context"
-
-//encore:api public
-func DummyAPI(ctx context.Context, req *Request) error {
- return nil
-}
-`
-
- ar := txtar.Parse([]byte(code))
- base := t.TempDir()
- err := txtar.Write(ar, base)
- c.Assert(err, qt.IsNil)
-
- res, err := parser.Parse(&parser.Config{
- AppRoot: base,
- ModulePath: "app",
- })
- c.Assert(err, qt.IsNil)
-
- ts, err := Client(TypeScript, "app", res.Meta)
- c.Assert(err, qt.IsNil)
- expect := `export default class Client {
- svc: svc.ServiceClient
-
- constructor(environment: string = "production", token?: string) {
- const base = new BaseClient(environment, token)
- this.svc = new svc.ServiceClient(base)
- }
-}
-
-export namespace svc {
- export type Foo = number
-
- export interface Request {
- Foo: Foo
- }
-
- export class ServiceClient {
- private baseClient: BaseClient
-
- constructor(baseClient: BaseClient) {
- this.baseClient = baseClient
- }
-
- public DummyAPI(params: Request): Promise {
- return this.baseClient.doVoid("svc.DummyAPI", params)
- }
- }
-}
-
-class BaseClient {
- baseURL: string
- headers: {[key: string]: string}
-
- constructor(environment: string, token?: string) {
- this.headers = {"Content-Type": "application/json"}
- if (token !== undefined) {
- this.headers["Authorization"] = "Bearer " + token
- }
- if (environment === "local") {
- this.baseURL = "http://localhost:4060/"
- } else {
- this.baseURL = ` + "`" + `https://app.encoreapi.com/${environment}/` + "`" + `
- }
- }
-
- public async do(endpoint: string, req?: any): Promise {
- let response = await fetch(this.baseURL + endpoint, {
- method: "POST",
- headers: this.headers,
- body: JSON.stringify(req || {})
- })
- if (!response.ok) {
- let body = await response.text()
- throw new Error("request failed: " + body)
- }
- return (await response.json())
- }
-
- public async doVoid(endpoint: string, req?: any): Promise {
- let response = await fetch(this.baseURL + endpoint, {
- method: "POST",
- headers: this.headers,
- body: JSON.stringify(req || {})
- })
- if (!response.ok) {
- let body = await response.text()
- throw new Error("request failed: " + body)
- }
- await response.text()
- }
-}
-`
-
- c.Assert(strings.Split(string(ts), "\n"), qt.DeepEquals, strings.Split(expect, "\n"))
-}
diff --git a/cli/internal/codegen/types.go b/cli/internal/codegen/types.go
deleted file mode 100644
index 3c0fe6557e..0000000000
--- a/cli/internal/codegen/types.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package codegen
-
-import (
- "fmt"
- "sort"
-
- meta "encr.dev/proto/encore/parser/meta/v1"
- schema "encr.dev/proto/encore/parser/schema/v1"
-)
-
-func getNamedTypes(md *meta.Data) *typeRegistry {
- r := &typeRegistry{
- md: md,
- namespaces: make(map[string][]*schema.Decl),
- seenDecls: make(map[uint32]bool),
- declRefs: make(map[uint32]map[uint32]bool),
- }
- for _, svc := range md.Svcs {
- for _, rpc := range svc.Rpcs {
- if rpc.AccessType != meta.RPC_PRIVATE {
- r.VisitDecl(rpc.RequestSchema)
- r.VisitDecl(rpc.ResponseSchema)
- }
- }
- }
- return r
-}
-
-// typeRegistry computes the visible set of type declarations
-// and how to group them into namespaces.
-type typeRegistry struct {
- md *meta.Data
- namespaces map[string][]*schema.Decl
- seenDecls map[uint32]bool
- declRefs map[uint32]map[uint32]bool // tracks which decls reference which other decls
- currDecl *schema.Decl // may be nil
-}
-
-type namedType struct {
- pkg string
- name string
-}
-
-func (v *typeRegistry) Decls(name string) []*schema.Decl {
- return v.namespaces[name]
-}
-
-func (v *typeRegistry) Namespaces() []string {
- nss := make([]string, 0, len(v.namespaces))
- for ns := range v.namespaces {
- nss = append(nss, ns)
- }
- sort.Strings(nss)
- return nss
-}
-
-func (v *typeRegistry) Visit(typ *schema.Type) {
- if typ == nil {
- return
- }
- switch t := typ.Typ.(type) {
- case *schema.Type_Named:
- v.visitNamed(t.Named)
- case *schema.Type_List:
- v.Visit(t.List.Elem)
- case *schema.Type_Map:
- v.Visit(t.Map.Key)
- v.Visit(t.Map.Value)
- case *schema.Type_Struct:
- for _, f := range t.Struct.Fields {
- v.Visit(f.Typ)
- }
- case *schema.Type_Builtin:
- // do nothing
- default:
- panic(fmt.Sprintf("unhandled type: %T", typ))
- }
-}
-
-func (v *typeRegistry) VisitDecl(decl *schema.Decl) {
- if decl == nil {
- return
- }
-
- if !v.seenDecls[decl.Id] {
- v.seenDecls[decl.Id] = true
- ns := decl.Loc.PkgName
- v.namespaces[ns] = append(v.namespaces[ns], decl)
-
- // Set currDecl when processing this and then reset it
- prev := v.currDecl
- v.currDecl = decl
- v.Visit(decl.Type)
- v.currDecl = prev
- }
-}
-
-func (v *typeRegistry) visitNamed(n *schema.Named) {
- to := n.Id
- curr := v.currDecl
- if curr != nil {
- from := curr.Id
- if _, ok := v.declRefs[from]; !ok {
- v.declRefs[from] = make(map[uint32]bool)
- }
- v.declRefs[from][to] = true
- }
-
- decl := v.md.Decls[to]
- v.VisitDecl(decl)
-
- // Add transitive refs
- if curr != nil {
- from := curr.Id
- for to2 := range v.declRefs[to] {
- v.declRefs[from][to2] = true
- }
- }
-}
-
-func (v *typeRegistry) IsRecursiveRef(from, to uint32) bool {
- return v.declRefs[from][to] && v.declRefs[to][from]
-}
diff --git a/cli/internal/conf/conf.go b/cli/internal/conf/conf.go
deleted file mode 100644
index 41b0821264..0000000000
--- a/cli/internal/conf/conf.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Package conf writes and reads the Encore configuration file for the user.
-package conf
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "runtime"
-
- "go4.org/syncutil"
- "golang.org/x/oauth2"
-)
-
-// Config represents the stored Encore configuration.
-type Config struct {
- oauth2.Token
- Email string `json:"email"`
- WireGuard struct {
- PublicKey string `json:"pub,omitempty"`
- PrivateKey string `json:"priv,omitempty"`
- } `json:"wg,omitempty"`
-}
-
-// Write persists the configuration for the user.
-func Write(cfg *Config) (err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("conf.Write: %v", err)
- }
- }()
-
- dir, err := os.UserConfigDir()
- if err != nil {
- return err
- }
- path := filepath.Join(dir, "encore", ".auth_token")
- if data, err := json.Marshal(cfg); err != nil {
- return err
- } else if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
- return err
- } else if err := ioutil.WriteFile(path, data, 0600); err != nil {
- return err
- }
- return nil
-}
-
-func Logout() error {
- dir, err := os.UserConfigDir()
- if err != nil {
- return err
- }
- path := filepath.Join(dir, "encore", ".auth_token")
- if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
- return err
- }
- return nil
-}
-
-func CurrentUser() (*Config, error) {
- dir, err := os.UserConfigDir()
- if err != nil {
- return nil, fmt.Errorf("conf.CurrentUser: %w", err)
- }
- conf, err := readConf(dir)
- if err != nil {
- return nil, fmt.Errorf("conf.CurrentUser: %w", err)
- }
- return conf, nil
-}
-
-func OriginalUser(configDir string) (cfg *Config, err error) {
- if runtime.GOOS == "windows" {
- // Windows does not have the notion of a root user, so just use CurrentUser
- return CurrentUser()
- }
-
- if configDir == "" {
- var err error
- configDir, err = os.UserConfigDir()
- if err != nil {
- return nil, err
- }
- }
-
- return readConf(configDir)
-}
-
-func readConf(configDir string) (*Config, error) {
- path := filepath.Join(configDir, "encore", ".auth_token")
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- var conf Config
- if err := json.Unmarshal(data, &conf); err != nil {
- return nil, err
- }
- return &conf, nil
-}
-
-// TokenSource implements oauth2.TokenSource by looking up the
-// current logged in user's API Token.
-// The zero value is ready to be used.
-type TokenSource struct {
- setup syncutil.Once
- ts oauth2.TokenSource
-}
-
-// Token implements oauth2.TokenSource.
-func (ts *TokenSource) Token() (*oauth2.Token, error) {
- err := ts.setup.Do(func() error {
- cfg, err := CurrentUser()
- if errors.Is(err, os.ErrNotExist) {
- return fmt.Errorf("not logged in: run 'encore auth login' first")
- } else if err != nil {
- return fmt.Errorf("could not get Encore auth token: %v", err)
- }
-
- oauth2Cfg := &oauth2.Config{
- Endpoint: oauth2.Endpoint{
- TokenURL: "https://api.encore.dev/login/oauth:refresh-token",
- },
- }
- ts.ts = oauth2Cfg.TokenSource(context.Background(), &cfg.Token)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return ts.ts.Token()
-}
-
-// AuthClient creates an *http.Client that authenticates requests
-// using the logged-in user.
-func AuthClient() *http.Client {
- return oauth2.NewClient(nil, &TokenSource{})
-}
diff --git a/cli/internal/env/env.go b/cli/internal/env/env.go
deleted file mode 100644
index 470589b7bb..0000000000
--- a/cli/internal/env/env.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Package env answers where Encore tools and resources are located.
-package env
-
-import (
- "fmt"
- "os"
- "path/filepath"
-)
-
-// EncoreRuntimePath reports the path to the Encore runtime.
-// It can be overridden by setting ENCORE_RUNTIME_PATH.
-func EncoreRuntimePath() string {
- if p := os.Getenv("ENCORE_RUNTIME_PATH"); p != "" {
- return p
- }
- root, ok := determineRoot()
- if !ok {
- fmt.Fprintln(os.Stderr, "fatal: could not determine Encore install root.\n"+
- "You can specify the path to the Encore runtime manually by setting the ENCORE_RUNTIME_PATH environment variable.")
- os.Exit(1)
- }
- return filepath.Join(root, "runtime")
-}
-
-// EncoreGoRoot reports the path to the Encore Go root.
-// It can be overridden by setting ENCORE_GOROOT.
-func EncoreGoRoot() string {
- if p := os.Getenv("ENCORE_GOROOT"); p != "" {
- return p
- }
- root, ok := determineRoot()
- if !ok {
- fmt.Fprintln(os.Stderr, "fatal: could not determine Encore install root.\n"+
- "You can specify the path to the Encore GOROOT manually by setting the ENCORE_GOROOT environment variable.")
- os.Exit(1)
- }
- return filepath.Join(root, "encore-go")
-}
-
-// determineRoot determines encore root by checking the location relative
-// to the executable, to enable relocatable installs.
-func determineRoot() (root string, ok bool) {
- exe, err := os.Executable()
- if err == nil {
- // Homebrew uses a lot of symlinks, so we need to get back to the actual location
- // to be able to use the heuristic below.
- if sym, err := filepath.EvalSymlinks(exe); err == nil {
- exe = sym
- }
-
- root := filepath.Dir(filepath.Dir(exe))
- // Heuristic: check if "encore-go" and "runtime" dirs exist in this location.
- _, err1 := os.Stat(filepath.Join(root, "encore-go"))
- _, err2 := os.Stat(filepath.Join(root, "runtime"))
- if err1 == nil && err2 == nil {
- return root, true
- }
- }
- return "", false
-}
diff --git a/cli/internal/gosym/pclntab.go b/cli/internal/gosym/pclntab.go
new file mode 100644
index 0000000000..fafad1c81a
--- /dev/null
+++ b/cli/internal/gosym/pclntab.go
@@ -0,0 +1,561 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Line tables
+ */
+
+package gosym
+
+import (
+ "bytes"
+ "encoding/binary"
+ "sync"
+)
+
+// version of the pclntab
+type version int
+
+const (
+ verUnknown version = iota
+ ver11
+ ver12
+ ver116
+)
+
+// A LineTable is a data structure mapping program counters to line numbers.
+//
+// In Go 1.1 and earlier, each function (represented by a Func) had its own LineTable,
+// and the line number corresponded to a numbering of all source lines in the
+// program, across all files. That absolute line number would then have to be
+// converted separately to a file name and line number within the file.
+//
+// In Go 1.2, the format of the data changed so that there is a single LineTable
+// for the entire program, shared by all Funcs, and there are no absolute line
+// numbers, just line numbers within specific files.
+//
+// For the most part, LineTable's methods should be treated as an internal
+// detail of the package; callers should use the methods on Table instead.
+type LineTable struct {
+ Data []byte
+ PC uint64
+ Line int
+
+ // This mutex is used to keep parsing of pclntab synchronous.
+ mu sync.Mutex
+
+ // Contains the version of the pclntab section.
+ version version
+
+ // Go 1.2/1.16 state
+ binary binary.ByteOrder
+ quantum uint32
+ ptrsize uint32
+ funcnametab []byte
+ cutab []byte
+ funcdata []byte
+ functab []byte
+ nfunctab uint32
+ filetab []byte
+ pctab []byte // points to the pctables.
+ nfiletab uint32
+ funcNames map[uint32]string // cache the function names
+ strings map[uint32]string // interned substrings of Data, keyed by offset
+ // fileMap varies depending on the version of the object file.
+ // For ver12, it maps the name to the index in the file table.
+ // For ver116, it maps the name to the offset in filetab.
+ fileMap map[string]uint32
+}
+
+// NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4,
+// but we have no idea whether we're using arm or not. This only
+// matters in the old (pre-Go 1.2) symbol table format, so it's not worth
+// fixing.
+const oldQuantum = 1
+
+func (t *LineTable) parse(targetPC uint64, targetLine int) (b []byte, pc uint64, line int) {
+ // The PC/line table can be thought of as a sequence of
+ // *
+ // batches. Each update batch results in a (pc, line) pair,
+ // where line applies to every PC from pc up to but not
+ // including the pc of the next pair.
+ //
+ // Here we process each update individually, which simplifies
+ // the code, but makes the corner cases more confusing.
+ b, pc, line = t.Data, t.PC, t.Line
+ for pc <= targetPC && line != targetLine && len(b) > 0 {
+ code := b[0]
+ b = b[1:]
+ switch {
+ case code == 0:
+ if len(b) < 4 {
+ b = b[0:0]
+ break
+ }
+ val := binary.BigEndian.Uint32(b)
+ b = b[4:]
+ line += int(val)
+ case code <= 64:
+ line += int(code)
+ case code <= 128:
+ line -= int(code - 64)
+ default:
+ pc += oldQuantum * uint64(code-128)
+ continue
+ }
+ pc += oldQuantum
+ }
+ return b, pc, line
+}
+
+func (t *LineTable) slice(pc uint64) *LineTable {
+ data, pc, line := t.parse(pc, -1)
+ return &LineTable{Data: data, PC: pc, Line: line}
+}
+
+// PCToLine returns the line number for the given program counter.
+//
+// Deprecated: Use Table's PCToLine method instead.
+func (t *LineTable) PCToLine(pc uint64) int {
+ if t.isGo12() {
+ return t.go12PCToLine(pc, nil)
+ }
+ _, _, line := t.parse(pc, -1)
+ return line
+}
+
+// LineToPC returns the program counter for the given line number,
+// considering only program counters before maxpc.
+//
+// Deprecated: Use Table's LineToPC method instead.
+func (t *LineTable) LineToPC(line int, maxpc uint64) uint64 {
+ if t.isGo12() {
+ return 0
+ }
+ _, pc, line1 := t.parse(maxpc, line)
+ if line1 != line {
+ return 0
+ }
+ // Subtract quantum from PC to account for post-line increment
+ return pc - oldQuantum
+}
+
+// NewLineTable returns a new PC/line table
+// corresponding to the encoded data.
+// Text must be the start address of the
+// corresponding text segment.
+func NewLineTable(data []byte, text uint64) *LineTable {
+ return &LineTable{Data: data, PC: text, Line: 0, funcNames: make(map[uint32]string), strings: make(map[uint32]string)}
+}
+
+// Go 1.2 symbol table format.
+// See golang.org/s/go12symtab.
+//
+// A general note about the methods here: rather than try to avoid
+// index out of bounds errors, we trust Go to detect them, and then
+// we recover from the panics and treat them as indicative of a malformed
+// or incomplete table.
+//
+// The methods called by symtab.go, which begin with "go12" prefixes,
+// are expected to have that recovery logic.
+
+// isGo12 reports whether this is a Go 1.2 (or later) symbol table.
+func (t *LineTable) isGo12() bool {
+ t.parsePclnTab()
+ return t.version >= ver12
+}
+
+const go12magic = 0xfffffffb
+const go116magic = 0xfffffffa
+
+// uintptr returns the pointer-sized value encoded at b.
+// The pointer size is dictated by the table being read.
+func (t *LineTable) uintptr(b []byte) uint64 {
+ if t.ptrsize == 4 {
+ return uint64(t.binary.Uint32(b))
+ }
+ return t.binary.Uint64(b)
+}
+
+// parsePclnTab parses the pclntab, setting the version.
+func (t *LineTable) parsePclnTab() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.version != verUnknown {
+ return
+ }
+
+ // Note that during this function, setting the version is the last thing we do.
+ // If we set the version too early, and parsing failed (likely as a panic on
+ // slice lookups), we'd have a mistaken version.
+ //
+ // Error paths through this code will default the version to 1.1.
+ t.version = ver11
+
+ defer func() {
+ // If we panic parsing, assume it's a Go 1.1 pclntab.
+ recover()
+ }()
+
+ // Check header: 4-byte magic, two zeros, pc quantum, pointer size.
+ if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
+ (t.Data[6] != 1 && t.Data[6] != 2 && t.Data[6] != 4) || // pc quantum
+ (t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
+ return
+ }
+
+ var possibleVersion version
+ leMagic := binary.LittleEndian.Uint32(t.Data)
+ beMagic := binary.BigEndian.Uint32(t.Data)
+ switch {
+ case leMagic == go12magic:
+ t.binary, possibleVersion = binary.LittleEndian, ver12
+ case beMagic == go12magic:
+ t.binary, possibleVersion = binary.BigEndian, ver12
+ case leMagic == go116magic:
+ t.binary, possibleVersion = binary.LittleEndian, ver116
+ case beMagic == go116magic:
+ t.binary, possibleVersion = binary.BigEndian, ver116
+ default:
+ return
+ }
+
+ // quantum and ptrSize are the same between 1.2 and 1.16
+ t.quantum = uint32(t.Data[6])
+ t.ptrsize = uint32(t.Data[7])
+
+ switch possibleVersion {
+ case ver116:
+ t.nfunctab = uint32(t.uintptr(t.Data[8:]))
+ t.nfiletab = uint32(t.uintptr(t.Data[8+t.ptrsize:]))
+ offset := t.uintptr(t.Data[8+2*t.ptrsize:])
+ t.funcnametab = t.Data[offset:]
+ offset = t.uintptr(t.Data[8+3*t.ptrsize:])
+ t.cutab = t.Data[offset:]
+ offset = t.uintptr(t.Data[8+4*t.ptrsize:])
+ t.filetab = t.Data[offset:]
+ offset = t.uintptr(t.Data[8+5*t.ptrsize:])
+ t.pctab = t.Data[offset:]
+ offset = t.uintptr(t.Data[8+6*t.ptrsize:])
+ t.funcdata = t.Data[offset:]
+ t.functab = t.Data[offset:]
+ functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize
+ t.functab = t.functab[:functabsize]
+ case ver12:
+ t.nfunctab = uint32(t.uintptr(t.Data[8:]))
+ t.funcdata = t.Data
+ t.funcnametab = t.Data
+ t.functab = t.Data[8+t.ptrsize:]
+ t.pctab = t.Data
+ functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize
+ fileoff := t.binary.Uint32(t.functab[functabsize:])
+ t.functab = t.functab[:functabsize]
+ t.filetab = t.Data[fileoff:]
+ t.nfiletab = t.binary.Uint32(t.filetab)
+ t.filetab = t.filetab[:t.nfiletab*4]
+ default:
+ panic("unreachable")
+ }
+ t.version = possibleVersion
+}
+
+// go12Funcs returns a slice of Funcs derived from the Go 1.2 pcln table.
+func (t *LineTable) go12Funcs() []Func {
+ // Assume it is malformed and return nil on error.
+ defer func() {
+ recover()
+ }()
+
+ n := len(t.functab) / int(t.ptrsize) / 2
+ funcs := make([]Func, n)
+ for i := range funcs {
+ f := &funcs[i]
+ f.Entry = t.uintptr(t.functab[2*i*int(t.ptrsize):])
+ f.End = t.uintptr(t.functab[(2*i+2)*int(t.ptrsize):])
+ info := t.funcdata[t.uintptr(t.functab[(2*i+1)*int(t.ptrsize):]):]
+ f.LineTable = t
+ f.FrameSize = int(t.binary.Uint32(info[t.ptrsize+2*4:]))
+ f.Sym = &Sym{
+ Value: f.Entry,
+ Type: 'T',
+ Name: t.funcName(t.binary.Uint32(info[t.ptrsize:])),
+ GoType: 0,
+ Func: f,
+ }
+ }
+ return funcs
+}
+
+// findFunc returns the func corresponding to the given program counter.
+func (t *LineTable) findFunc(pc uint64) []byte {
+ if pc < t.uintptr(t.functab) || pc >= t.uintptr(t.functab[len(t.functab)-int(t.ptrsize):]) {
+ return nil
+ }
+
+ // The function table is a list of 2*nfunctab+1 uintptrs,
+ // alternating program counters and offsets to func structures.
+ f := t.functab
+ nf := t.nfunctab
+ for nf > 0 {
+ m := nf / 2
+ fm := f[2*t.ptrsize*m:]
+ if t.uintptr(fm) <= pc && pc < t.uintptr(fm[2*t.ptrsize:]) {
+ return t.funcdata[t.uintptr(fm[t.ptrsize:]):]
+ } else if pc < t.uintptr(fm) {
+ nf = m
+ } else {
+ f = f[(m+1)*2*t.ptrsize:]
+ nf -= m + 1
+ }
+ }
+ return nil
+}
+
+// readvarint reads, removes, and returns a varint from *pp.
+func (t *LineTable) readvarint(pp *[]byte) uint32 {
+ var v, shift uint32
+ p := *pp
+ for shift = 0; ; shift += 7 {
+ b := p[0]
+ p = p[1:]
+ v |= (uint32(b) & 0x7F) << shift
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ *pp = p
+ return v
+}
+
+// funcName returns the name of the function found at off.
+func (t *LineTable) funcName(off uint32) string {
+ if s, ok := t.funcNames[off]; ok {
+ return s
+ }
+ i := bytes.IndexByte(t.funcnametab[off:], 0)
+ s := string(t.funcnametab[off : off+uint32(i)])
+ t.funcNames[off] = s
+ return s
+}
+
+// stringFrom returns a Go string found at off from a position.
+func (t *LineTable) stringFrom(arr []byte, off uint32) string {
+ if s, ok := t.strings[off]; ok {
+ return s
+ }
+ i := bytes.IndexByte(arr[off:], 0)
+ s := string(arr[off : off+uint32(i)])
+ t.strings[off] = s
+ return s
+}
+
+// string returns a Go string found at off.
+func (t *LineTable) string(off uint32) string {
+ return t.stringFrom(t.funcdata, off)
+}
+
+// step advances to the next pc, value pair in the encoded table.
+func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool {
+ uvdelta := t.readvarint(p)
+ if uvdelta == 0 && !first {
+ return false
+ }
+ pcdelta := t.readvarint(p) * t.quantum
+ *pc += uint64(pcdelta)
+ *val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))
+ return true
+}
+
+// pcvalue reports the value associated with the target pc.
+// off is the offset to the beginning of the pc-value table,
+// and entry is the start PC for the corresponding function.
+func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64, fn *Func) int32 {
+ p := t.pctab[off:]
+
+ val := int32(-1)
+ pc := entry
+ for t.step(&p, &pc, &val, pc == entry) {
+ if targetpc < pc {
+ return val
+ }
+ }
+ return -1
+}
+
+// findFileLine scans one function in the binary looking for a
+// program counter in the given file on the given line.
+// It does so by running the pc-value tables mapping program counter
+// to file number. Since most functions come from a single file, these
+// are usually short and quick to scan. If a file match is found, then the
+// code goes to the expense of looking for a simultaneous line number match.
+func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32, cutab []byte) uint64 {
+ if filetab == 0 || linetab == 0 {
+ return 0
+ }
+
+ fp := t.pctab[filetab:]
+ fl := t.pctab[linetab:]
+ fileVal := int32(-1)
+ filePC := entry
+ lineVal := int32(-1)
+ linePC := entry
+ fileStartPC := filePC
+ for t.step(&fp, &filePC, &fileVal, filePC == entry) {
+ fileIndex := fileVal
+ if t.version == ver116 {
+ fileIndex = int32(t.binary.Uint32(cutab[fileVal*4:]))
+ }
+ if fileIndex == filenum && fileStartPC < filePC {
+ // fileIndex is in effect starting at fileStartPC up to
+ // but not including filePC, and it's the file we want.
+ // Run the PC table looking for a matching line number
+ // or until we reach filePC.
+ lineStartPC := linePC
+ for linePC < filePC && t.step(&fl, &linePC, &lineVal, linePC == entry) {
+ // lineVal is in effect until linePC, and lineStartPC < filePC.
+ if lineVal == line {
+ if fileStartPC <= lineStartPC {
+ return lineStartPC
+ }
+ if fileStartPC < linePC {
+ return fileStartPC
+ }
+ }
+ lineStartPC = linePC
+ }
+ }
+ fileStartPC = filePC
+ }
+ return 0
+}
+
+// go12PCToLine maps program counter to line number for the Go 1.2 pcln table.
+func (t *LineTable) go12PCToLine(pc uint64, fn *Func) (line int) {
+ defer func() {
+ if recover() != nil {
+ line = -1
+ }
+ }()
+
+ f := t.findFunc(pc)
+ if f == nil {
+ return -1
+ }
+ entry := t.uintptr(f)
+ if pc > entry {
+ pc--
+ }
+ linetab := t.binary.Uint32(f[t.ptrsize+5*4:])
+ return int(t.pcvalue(linetab, entry, pc, fn))
+}
+
+// go12PCToFile maps program counter to file name for the Go 1.2 pcln table.
+func (t *LineTable) go12PCToFile(pc uint64, fn *Func) (file string) {
+ defer func() {
+ if recover() != nil {
+ file = ""
+ }
+ }()
+
+ f := t.findFunc(pc)
+ if f == nil {
+ return ""
+ }
+ entry := t.uintptr(f)
+ if pc > entry {
+ pc--
+ }
+ filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
+ fno := t.pcvalue(filetab, entry, pc, fn)
+ if t.version == ver12 {
+ if fno <= 0 {
+ return ""
+ }
+ return t.string(t.binary.Uint32(t.filetab[4*fno:]))
+ }
+ // Go ≥ 1.16
+ if fno < 0 { // 0 is valid for ≥ 1.16
+ return ""
+ }
+ cuoff := t.binary.Uint32(f[t.ptrsize+7*4:])
+ if fnoff := t.binary.Uint32(t.cutab[(cuoff+uint32(fno))*4:]); fnoff != ^uint32(0) {
+ return t.stringFrom(t.filetab, fnoff)
+ }
+ return ""
+}
+
+// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2/1.16 pcln table.
+func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) {
+ defer func() {
+ if recover() != nil {
+ pc = 0
+ }
+ }()
+
+ t.initFileMap()
+ filenum, ok := t.fileMap[file]
+ if !ok {
+ return 0
+ }
+
+ // Scan all functions.
+ // If this turns out to be a bottleneck, we could build a map[int32][]int32
+ // mapping file number to a list of functions with code from that file.
+ var cutab []byte
+ for i := uint32(0); i < t.nfunctab; i++ {
+ f := t.funcdata[t.uintptr(t.functab[2*t.ptrsize*i+t.ptrsize:]):]
+ entry := t.uintptr(f)
+ filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
+ linetab := t.binary.Uint32(f[t.ptrsize+5*4:])
+ if t.version == ver116 {
+ cuoff := t.binary.Uint32(f[t.ptrsize+7*4:]) * 4
+ cutab = t.cutab[cuoff:]
+ }
+ pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line), cutab)
+ if pc != 0 {
+ return pc
+ }
+ }
+ return 0
+}
+
+// initFileMap initializes the map from file name to file number.
+func (t *LineTable) initFileMap() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.fileMap != nil {
+ return
+ }
+ m := make(map[string]uint32)
+
+ if t.version == ver12 {
+ for i := uint32(1); i < t.nfiletab; i++ {
+ s := t.string(t.binary.Uint32(t.filetab[4*i:]))
+ m[s] = i
+ }
+ } else {
+ var pos uint32
+ for i := uint32(0); i < t.nfiletab; i++ {
+ s := t.stringFrom(t.filetab, pos)
+ m[s] = pos
+ pos += uint32(len(s) + 1)
+ }
+ }
+ t.fileMap = m
+}
+
+// go12MapFiles adds to m a key for every file in the Go 1.2 LineTable.
+// Every key maps to obj. That's not a very interesting map, but it provides
+// a way for callers to obtain the list of files in the program.
+func (t *LineTable) go12MapFiles(m map[string]*Obj, obj *Obj) {
+ defer func() {
+ recover()
+ }()
+
+ t.initFileMap()
+ for file := range t.fileMap {
+ m[file] = obj
+ }
+}
diff --git a/cli/internal/gosym/symtab.go b/cli/internal/gosym/symtab.go
new file mode 100644
index 0000000000..e0d4a9b8dc
--- /dev/null
+++ b/cli/internal/gosym/symtab.go
@@ -0,0 +1,723 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gosym implements access to the Go symbol
+// and line number tables embedded in Go binaries generated
+// by the gc compilers.
+package gosym
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+/*
+ * Symbols
+ */
+
+// A Sym represents a single symbol table entry.
+type Sym struct {
+ Value uint64
+ Type byte
+ Name string
+ GoType uint64
+ // If this symbol is a function symbol, the corresponding Func
+ Func *Func
+}
+
+// Static reports whether this symbol is static (not visible outside its file).
+func (s *Sym) Static() bool { return s.Type >= 'a' }
+
+// PackageName returns the package part of the symbol name,
+// or the empty string if there is none.
+func (s *Sym) PackageName() string {
+ name := s.Name
+
+ // A prefix of "type." and "go." is a compiler-generated symbol that doesn't belong to any package.
+ // See variable reservedimports in cmd/compile/internal/gc/subr.go
+ if strings.HasPrefix(name, "go.") || strings.HasPrefix(name, "type.") {
+ return ""
+ }
+
+ pathend := strings.LastIndex(name, "/")
+ if pathend < 0 {
+ pathend = 0
+ }
+
+ if i := strings.Index(name[pathend:], "."); i != -1 {
+ return name[:pathend+i]
+ }
+ return ""
+}
+
+// ReceiverName returns the receiver type name of this symbol,
+// or the empty string if there is none.
+func (s *Sym) ReceiverName() string {
+ pathend := strings.LastIndex(s.Name, "/")
+ if pathend < 0 {
+ pathend = 0
+ }
+ l := strings.Index(s.Name[pathend:], ".")
+ r := strings.LastIndex(s.Name[pathend:], ".")
+ if l == -1 || r == -1 || l == r {
+ return ""
+ }
+ return s.Name[pathend+l+1 : pathend+r]
+}
+
+// BaseName returns the symbol name without the package or receiver name.
+func (s *Sym) BaseName() string {
+ if i := strings.LastIndex(s.Name, "."); i != -1 {
+ return s.Name[i+1:]
+ }
+ return s.Name
+}
+
+// A Func collects information about a single function.
+type Func struct {
+ Entry uint64
+ *Sym
+ End uint64
+ Params []*Sym // nil for Go 1.3 and later binaries
+ Locals []*Sym // nil for Go 1.3 and later binaries
+ FrameSize int
+ LineTable *LineTable
+ Obj *Obj
+}
+
+// An Obj represents a collection of functions in a symbol table.
+//
+// The exact method of division of a binary into separate Objs is an internal detail
+// of the symbol table format.
+//
+// In early versions of Go each source file became a different Obj.
+//
+// In Go 1 and Go 1.1, each package produced one Obj for all Go sources
+// and one Obj per C source file.
+//
+// In Go 1.2, there is a single Obj for the entire program.
+type Obj struct {
+ // Funcs is a list of functions in the Obj.
+ Funcs []Func
+
+ // In Go 1.1 and earlier, Paths is a list of symbols corresponding
+ // to the source file names that produced the Obj.
+ // In Go 1.2, Paths is nil.
+ // Use the keys of Table.Files to obtain a list of source files.
+ Paths []Sym // meta
+}
+
+/*
+ * Symbol tables
+ */
+
+// Table represents a Go symbol table. It stores all of the
+// symbols decoded from the program and provides methods to translate
+// between symbols, names, and addresses.
+type Table struct {
+ Syms []Sym // nil for Go 1.3 and later binaries
+ Funcs []Func
+ Files map[string]*Obj // for Go 1.2 and later all files map to one Obj
+ Objs []Obj // for Go 1.2 and later only one Obj in slice
+
+ go12line *LineTable // Go 1.2 line number table
+}
+
+type sym struct {
+ value uint64
+ gotype uint64
+ typ byte
+ name []byte
+}
+
+var (
+ littleEndianSymtab = []byte{0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00}
+ bigEndianSymtab = []byte{0xFF, 0xFF, 0xFF, 0xFD, 0x00, 0x00, 0x00}
+ oldLittleEndianSymtab = []byte{0xFE, 0xFF, 0xFF, 0xFF, 0x00, 0x00}
+)
+
+func walksymtab(data []byte, fn func(sym) error) error {
+ if len(data) == 0 { // missing symtab is okay
+ return nil
+ }
+ var order binary.ByteOrder = binary.BigEndian
+ newTable := false
+ switch {
+ case bytes.HasPrefix(data, oldLittleEndianSymtab):
+ // Same as Go 1.0, but little endian.
+ // Format was used during interim development between Go 1.0 and Go 1.1.
+ // Should not be widespread, but easy to support.
+ data = data[6:]
+ order = binary.LittleEndian
+ case bytes.HasPrefix(data, bigEndianSymtab):
+ newTable = true
+ case bytes.HasPrefix(data, littleEndianSymtab):
+ newTable = true
+ order = binary.LittleEndian
+ }
+ var ptrsz int
+ if newTable {
+ if len(data) < 8 {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ ptrsz = int(data[7])
+ if ptrsz != 4 && ptrsz != 8 {
+ return &DecodingError{7, "invalid pointer size", ptrsz}
+ }
+ data = data[8:]
+ }
+ var s sym
+ p := data
+ for len(p) >= 4 {
+ var typ byte
+ if newTable {
+ // Symbol type, value, Go type.
+ typ = p[0] & 0x3F
+ wideValue := p[0]&0x40 != 0
+ goType := p[0]&0x80 != 0
+ if typ < 26 {
+ typ += 'A'
+ } else {
+ typ += 'a' - 26
+ }
+ s.typ = typ
+ p = p[1:]
+ if wideValue {
+ if len(p) < ptrsz {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ // fixed-width value
+ if ptrsz == 8 {
+ s.value = order.Uint64(p[0:8])
+ p = p[8:]
+ } else {
+ s.value = uint64(order.Uint32(p[0:4]))
+ p = p[4:]
+ }
+ } else {
+ // varint value
+ s.value = 0
+ shift := uint(0)
+ for len(p) > 0 && p[0]&0x80 != 0 {
+ s.value |= uint64(p[0]&0x7F) << shift
+ shift += 7
+ p = p[1:]
+ }
+ if len(p) == 0 {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ s.value |= uint64(p[0]) << shift
+ p = p[1:]
+ }
+ if goType {
+ if len(p) < ptrsz {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ // fixed-width go type
+ if ptrsz == 8 {
+ s.gotype = order.Uint64(p[0:8])
+ p = p[8:]
+ } else {
+ s.gotype = uint64(order.Uint32(p[0:4]))
+ p = p[4:]
+ }
+ }
+ } else {
+ // Value, symbol type.
+ s.value = uint64(order.Uint32(p[0:4]))
+ if len(p) < 5 {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ typ = p[4]
+ if typ&0x80 == 0 {
+ return &DecodingError{len(data) - len(p) + 4, "bad symbol type", typ}
+ }
+ typ &^= 0x80
+ s.typ = typ
+ p = p[5:]
+ }
+
+ // Name.
+ var i int
+ var nnul int
+ for i = 0; i < len(p); i++ {
+ if p[i] == 0 {
+ nnul = 1
+ break
+ }
+ }
+ switch typ {
+ case 'z', 'Z':
+ p = p[i+nnul:]
+ for i = 0; i+2 <= len(p); i += 2 {
+ if p[i] == 0 && p[i+1] == 0 {
+ nnul = 2
+ break
+ }
+ }
+ }
+ if len(p) < i+nnul {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ s.name = p[0:i]
+ i += nnul
+ p = p[i:]
+
+ if !newTable {
+ if len(p) < 4 {
+ return &DecodingError{len(data), "unexpected EOF", nil}
+ }
+ // Go type.
+ s.gotype = uint64(order.Uint32(p[:4]))
+ p = p[4:]
+ }
+ fn(s)
+ }
+ return nil
+}
+
+// NewTable decodes the Go symbol table (the ".gosymtab" section in ELF),
+// returning an in-memory representation.
+// Starting with Go 1.3, the Go symbol table no longer includes symbol data.
+func NewTable(symtab []byte, pcln *LineTable) (*Table, error) {
+ var n int
+ err := walksymtab(symtab, func(s sym) error {
+ n++
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var t Table
+ if pcln.isGo12() {
+ t.go12line = pcln
+ }
+ fname := make(map[uint16]string)
+ t.Syms = make([]Sym, 0, n)
+ nf := 0
+ nz := 0
+ lasttyp := uint8(0)
+ err = walksymtab(symtab, func(s sym) error {
+ n := len(t.Syms)
+ t.Syms = t.Syms[0 : n+1]
+ ts := &t.Syms[n]
+ ts.Type = s.typ
+ ts.Value = s.value
+ ts.GoType = s.gotype
+ switch s.typ {
+ default:
+ // rewrite name to use . instead of · (c2 b7)
+ w := 0
+ b := s.name
+ for i := 0; i < len(b); i++ {
+ if b[i] == 0xc2 && i+1 < len(b) && b[i+1] == 0xb7 {
+ i++
+ b[i] = '.'
+ }
+ b[w] = b[i]
+ w++
+ }
+ ts.Name = string(s.name[0:w])
+ case 'z', 'Z':
+ if lasttyp != 'z' && lasttyp != 'Z' {
+ nz++
+ }
+ for i := 0; i < len(s.name); i += 2 {
+ eltIdx := binary.BigEndian.Uint16(s.name[i : i+2])
+ elt, ok := fname[eltIdx]
+ if !ok {
+ return &DecodingError{-1, "bad filename code", eltIdx}
+ }
+ if n := len(ts.Name); n > 0 && ts.Name[n-1] != '/' {
+ ts.Name += "/"
+ }
+ ts.Name += elt
+ }
+ }
+ switch s.typ {
+ case 'T', 't', 'L', 'l':
+ nf++
+ case 'f':
+ fname[uint16(s.value)] = ts.Name
+ }
+ lasttyp = s.typ
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ t.Funcs = make([]Func, 0, nf)
+ t.Files = make(map[string]*Obj)
+
+ var obj *Obj
+ if t.go12line != nil {
+ // Put all functions into one Obj.
+ t.Objs = make([]Obj, 1)
+ obj = &t.Objs[0]
+ t.go12line.go12MapFiles(t.Files, obj)
+ } else {
+ t.Objs = make([]Obj, 0, nz)
+ }
+
+ // Count text symbols and attach frame sizes, parameters, and
+ // locals to them. Also, find object file boundaries.
+ lastf := 0
+ for i := 0; i < len(t.Syms); i++ {
+ sym := &t.Syms[i]
+ switch sym.Type {
+ case 'Z', 'z': // path symbol
+ if t.go12line != nil {
+ // Go 1.2 binaries have the file information elsewhere. Ignore.
+ break
+ }
+ // Finish the current object
+ if obj != nil {
+ obj.Funcs = t.Funcs[lastf:]
+ }
+ lastf = len(t.Funcs)
+
+ // Start new object
+ n := len(t.Objs)
+ t.Objs = t.Objs[0 : n+1]
+ obj = &t.Objs[n]
+
+ // Count & copy path symbols
+ var end int
+ for end = i + 1; end < len(t.Syms); end++ {
+ if c := t.Syms[end].Type; c != 'Z' && c != 'z' {
+ break
+ }
+ }
+ obj.Paths = t.Syms[i:end]
+ i = end - 1 // loop will i++
+
+ // Record file names
+ depth := 0
+ for j := range obj.Paths {
+ s := &obj.Paths[j]
+ if s.Name == "" {
+ depth--
+ } else {
+ if depth == 0 {
+ t.Files[s.Name] = obj
+ }
+ depth++
+ }
+ }
+
+ case 'T', 't', 'L', 'l': // text symbol
+ if n := len(t.Funcs); n > 0 {
+ t.Funcs[n-1].End = sym.Value
+ }
+ if sym.Name == "runtime.etext" || sym.Name == "etext" {
+ continue
+ }
+
+ // Count parameter and local (auto) syms
+ var np, na int
+ var end int
+ countloop:
+ for end = i + 1; end < len(t.Syms); end++ {
+ switch t.Syms[end].Type {
+ case 'T', 't', 'L', 'l', 'Z', 'z':
+ break countloop
+ case 'p':
+ np++
+ case 'a':
+ na++
+ }
+ }
+
+ // Fill in the function symbol
+ n := len(t.Funcs)
+ t.Funcs = t.Funcs[0 : n+1]
+ fn := &t.Funcs[n]
+ sym.Func = fn
+ fn.Params = make([]*Sym, 0, np)
+ fn.Locals = make([]*Sym, 0, na)
+ fn.Sym = sym
+ fn.Entry = sym.Value
+ fn.Obj = obj
+ if t.go12line != nil {
+ // All functions share the same line table.
+ // It knows how to narrow down to a specific
+ // function quickly.
+ fn.LineTable = t.go12line
+ } else if pcln != nil {
+ fn.LineTable = pcln.slice(fn.Entry)
+ pcln = fn.LineTable
+ }
+ for j := i; j < end; j++ {
+ s := &t.Syms[j]
+ switch s.Type {
+ case 'm':
+ fn.FrameSize = int(s.Value)
+ case 'p':
+ n := len(fn.Params)
+ fn.Params = fn.Params[0 : n+1]
+ fn.Params[n] = s
+ case 'a':
+ n := len(fn.Locals)
+ fn.Locals = fn.Locals[0 : n+1]
+ fn.Locals[n] = s
+ }
+ }
+ i = end - 1 // loop will i++
+ }
+ }
+
+ if t.go12line != nil && nf == 0 {
+ t.Funcs = t.go12line.go12Funcs()
+ }
+ if obj != nil {
+ obj.Funcs = t.Funcs[lastf:]
+ }
+ return &t, nil
+}
+
+// PCToFunc returns the function containing the program counter pc,
+// or nil if there is no such function.
+func (t *Table) PCToFunc(pc uint64) *Func {
+ funcs := t.Funcs
+ for len(funcs) > 0 {
+ m := len(funcs) / 2
+ fn := &funcs[m]
+ switch {
+ case pc < fn.Entry:
+ funcs = funcs[0:m]
+ case fn.Entry <= pc && pc < fn.End:
+ return fn
+ default:
+ funcs = funcs[m+1:]
+ }
+ }
+ return nil
+}
+
+// PCToLine looks up line number information for a program counter.
+// If there is no information, it returns fn == nil.
+func (t *Table) PCToLine(pc uint64) (file string, line int, fn *Func) {
+ if fn = t.PCToFunc(pc); fn == nil {
+ return
+ }
+ if t.go12line != nil {
+ file = t.go12line.go12PCToFile(pc, fn)
+ line = t.go12line.go12PCToLine(pc, fn)
+ } else {
+ file, line = fn.Obj.lineFromAline(fn.LineTable.PCToLine(pc))
+ }
+ return
+}
+
+// LineToPC looks up the first program counter on the given line in
+// the named file. It returns UnknownPathError or UnknownLineError if
+// there is an error looking up this line.
+func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) {
+ obj, ok := t.Files[file]
+ if !ok {
+ return 0, nil, UnknownFileError(file)
+ }
+
+ if t.go12line != nil {
+ pc := t.go12line.go12LineToPC(file, line)
+ if pc == 0 {
+ return 0, nil, &UnknownLineError{file, line}
+ }
+ return pc, t.PCToFunc(pc), nil
+ }
+
+ abs, err := obj.alineFromLine(file, line)
+ if err != nil {
+ return
+ }
+ for i := range obj.Funcs {
+ f := &obj.Funcs[i]
+ pc := f.LineTable.LineToPC(abs, f.End)
+ if pc != 0 {
+ return pc, f, nil
+ }
+ }
+ return 0, nil, &UnknownLineError{file, line}
+}
+
+// LookupSym returns the text, data, or bss symbol with the given name,
+// or nil if no such symbol is found.
+func (t *Table) LookupSym(name string) *Sym {
+ // TODO(austin) Maybe make a map
+ for i := range t.Syms {
+ s := &t.Syms[i]
+ switch s.Type {
+ case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
+ if s.Name == name {
+ return s
+ }
+ }
+ }
+ return nil
+}
+
+// LookupFunc returns the text, data, or bss symbol with the given name,
+// or nil if no such symbol is found.
+func (t *Table) LookupFunc(name string) *Func {
+ for i := range t.Funcs {
+ f := &t.Funcs[i]
+ if f.Sym.Name == name {
+ return f
+ }
+ }
+ return nil
+}
+
+// SymByAddr returns the text, data, or bss symbol starting at the given address.
+func (t *Table) SymByAddr(addr uint64) *Sym {
+ for i := range t.Syms {
+ s := &t.Syms[i]
+ switch s.Type {
+ case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
+ if s.Value == addr {
+ return s
+ }
+ }
+ }
+ return nil
+}
+
+/*
+ * Object files
+ */
+
+// This is legacy code for Go 1.1 and earlier, which used the
+// Plan 9 format for pc-line tables. This code was never quite
+// correct. It's probably very close, and it's usually correct, but
+// we never quite found all the corner cases.
+//
+// Go 1.2 and later use a simpler format, documented at golang.org/s/go12symtab.
+
+func (o *Obj) lineFromAline(aline int) (string, int) {
+ type stackEnt struct {
+ path string
+ start int
+ offset int
+ prev *stackEnt
+ }
+
+ noPath := &stackEnt{"", 0, 0, nil}
+ tos := noPath
+
+pathloop:
+ for _, s := range o.Paths {
+ val := int(s.Value)
+ switch {
+ case val > aline:
+ break pathloop
+
+ case val == 1:
+ // Start a new stack
+ tos = &stackEnt{s.Name, val, 0, noPath}
+
+ case s.Name == "":
+ // Pop
+ if tos == noPath {
+ return "", 0
+ }
+ tos.prev.offset += val - tos.start
+ tos = tos.prev
+
+ default:
+ // Push
+ tos = &stackEnt{s.Name, val, 0, tos}
+ }
+ }
+
+ if tos == noPath {
+ return "", 0
+ }
+ return tos.path, aline - tos.start - tos.offset + 1
+}
+
+func (o *Obj) alineFromLine(path string, line int) (int, error) {
+ if line < 1 {
+ return 0, &UnknownLineError{path, line}
+ }
+
+ for i, s := range o.Paths {
+ // Find this path
+ if s.Name != path {
+ continue
+ }
+
+ // Find this line at this stack level
+ depth := 0
+ var incstart int
+ line += int(s.Value)
+ pathloop:
+ for _, s := range o.Paths[i:] {
+ val := int(s.Value)
+ switch {
+ case depth == 1 && val >= line:
+ return line - 1, nil
+
+ case s.Name == "":
+ depth--
+ if depth == 0 {
+ break pathloop
+ } else if depth == 1 {
+ line += val - incstart
+ }
+
+ default:
+ if depth == 1 {
+ incstart = val
+ }
+ depth++
+ }
+ }
+ return 0, &UnknownLineError{path, line}
+ }
+ return 0, UnknownFileError(path)
+}
+
+/*
+ * Errors
+ */
+
+// UnknownFileError represents a failure to find the specific file in
+// the symbol table.
+type UnknownFileError string
+
+func (e UnknownFileError) Error() string { return "unknown file: " + string(e) }
+
+// UnknownLineError represents a failure to map a line to a program
+// counter, either because the line is beyond the bounds of the file
+// or because there is no code on the given line.
+type UnknownLineError struct {
+ File string
+ Line int
+}
+
+func (e *UnknownLineError) Error() string {
+ return "no code at " + e.File + ":" + strconv.Itoa(e.Line)
+}
+
+// DecodingError represents an error during the decoding of
+// the symbol table.
+type DecodingError struct {
+ off int
+ msg string
+ val interface{}
+}
+
+func (e *DecodingError) Error() string {
+ msg := e.msg
+ if e.val != nil {
+ msg += fmt.Sprintf(" '%v'", e.val)
+ }
+ msg += fmt.Sprintf(" at byte %#x", e.off)
+ return msg
+}
diff --git a/cli/internal/gosym/symtab_test.go b/cli/internal/gosym/symtab_test.go
new file mode 100644
index 0000000000..b6ed8f554c
--- /dev/null
+++ b/cli/internal/gosym/symtab_test.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gosym
+
+import (
+ "fmt"
+ "testing"
+)
+
+func assertString(t *testing.T, dsc, out, tgt string) {
+ if out != tgt {
+ t.Fatalf("Expected: %q Actual: %q for %s", tgt, out, dsc)
+ }
+}
+
+func TestStandardLibPackage(t *testing.T) {
+ s1 := Sym{Name: "io.(*LimitedReader).Read"}
+ s2 := Sym{Name: "io.NewSectionReader"}
+ assertString(t, fmt.Sprintf("package of %q", s1.Name), s1.PackageName(), "io")
+ assertString(t, fmt.Sprintf("package of %q", s2.Name), s2.PackageName(), "io")
+ assertString(t, fmt.Sprintf("receiver of %q", s1.Name), s1.ReceiverName(), "(*LimitedReader)")
+ assertString(t, fmt.Sprintf("receiver of %q", s2.Name), s2.ReceiverName(), "")
+}
+
+func TestStandardLibPathPackage(t *testing.T) {
+ s1 := Sym{Name: "debug/gosym.(*LineTable).PCToLine"}
+ s2 := Sym{Name: "debug/gosym.NewTable"}
+ assertString(t, fmt.Sprintf("package of %q", s1.Name), s1.PackageName(), "debug/gosym")
+ assertString(t, fmt.Sprintf("package of %q", s2.Name), s2.PackageName(), "debug/gosym")
+ assertString(t, fmt.Sprintf("receiver of %q", s1.Name), s1.ReceiverName(), "(*LineTable)")
+ assertString(t, fmt.Sprintf("receiver of %q", s2.Name), s2.ReceiverName(), "")
+}
+
+func TestRemotePackage(t *testing.T) {
+ s1 := Sym{Name: "github.com/docker/doc.ker/pkg/mflag.(*FlagSet).PrintDefaults"}
+ s2 := Sym{Name: "github.com/docker/doc.ker/pkg/mflag.PrintDefaults"}
+ assertString(t, fmt.Sprintf("package of %q", s1.Name), s1.PackageName(), "github.com/docker/doc.ker/pkg/mflag")
+ assertString(t, fmt.Sprintf("package of %q", s2.Name), s2.PackageName(), "github.com/docker/doc.ker/pkg/mflag")
+ assertString(t, fmt.Sprintf("receiver of %q", s1.Name), s1.ReceiverName(), "(*FlagSet)")
+ assertString(t, fmt.Sprintf("receiver of %q", s2.Name), s2.ReceiverName(), "")
+}
+
+func TestIssue29551(t *testing.T) {
+ symNames := []string{
+ "type..eq.[9]debug/elf.intName",
+ "type..hash.debug/elf.ProgHeader",
+ "type..eq.runtime._panic",
+ "type..hash.struct { runtime.gList; runtime.n int32 }",
+ "go.(*struct { sync.Mutex; math/big.table [64]math/big",
+ }
+
+ for _, symName := range symNames {
+ s := Sym{Name: symName}
+ assertString(t, fmt.Sprintf("package of %q", s.Name), s.PackageName(), "")
+ }
+}
diff --git a/cli/internal/gosym/testdata/main.go b/cli/internal/gosym/testdata/main.go
new file mode 100644
index 0000000000..b7702184cd
--- /dev/null
+++ b/cli/internal/gosym/testdata/main.go
@@ -0,0 +1,10 @@
+package main
+
+func linefrompc()
+func pcfromline()
+
+func main() {
+ // Prevent GC of our test symbols
+ linefrompc()
+ pcfromline()
+}
diff --git a/cli/internal/gosym/testdata/pclinetest.h b/cli/internal/gosym/testdata/pclinetest.h
new file mode 100644
index 0000000000..156c0b87b0
--- /dev/null
+++ b/cli/internal/gosym/testdata/pclinetest.h
@@ -0,0 +1,9 @@
+// +build ignore
+
+// Empty include file to generate z symbols
+
+
+
+
+
+// EOF
diff --git a/cli/internal/gosym/testdata/pclinetest.s b/cli/internal/gosym/testdata/pclinetest.s
new file mode 100644
index 0000000000..53461cdfc1
--- /dev/null
+++ b/cli/internal/gosym/testdata/pclinetest.s
@@ -0,0 +1,48 @@
+TEXT ·linefrompc(SB),4,$0 // Each byte stores its line delta
+BYTE $2;
+BYTE $1;
+BYTE $1; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1;
+BYTE $1;
+BYTE $1; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+BYTE $1; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+#include "pclinetest.h"
+BYTE $2;
+#include "pclinetest.h"
+BYTE $2;
+BYTE $255;
+
+TEXT ·pcfromline(SB),4,$0 // Each record stores its line delta, then n, then n more bytes
+BYTE $32; BYTE $0;
+BYTE $1; BYTE $1; BYTE $0;
+BYTE $1; BYTE $0;
+
+BYTE $2; BYTE $4; BYTE $0; BYTE $0; BYTE $0; BYTE $0;
+
+
+#include "pclinetest.h"
+BYTE $4; BYTE $0;
+
+
+BYTE $3; BYTE $3; BYTE $0; BYTE $0; BYTE $0;
+#include "pclinetest.h"
+
+
+BYTE $4; BYTE $3; BYTE $0; BYTE $0; BYTE $0;
+BYTE $255;
diff --git a/cli/internal/gosym/testdata/pcln115.gz b/cli/internal/gosym/testdata/pcln115.gz
new file mode 100644
index 0000000000..db5c3d48ad
Binary files /dev/null and b/cli/internal/gosym/testdata/pcln115.gz differ
diff --git a/cli/internal/jsonrpc2/wire.go b/cli/internal/jsonrpc2/wire.go
index d805f57937..0c985cad60 100644
--- a/cli/internal/jsonrpc2/wire.go
+++ b/cli/internal/jsonrpc2/wire.go
@@ -33,7 +33,7 @@ var (
ErrServerOverloaded = NewError(-32000, "JSON RPC overloaded")
)
-// wireRequest is sent to a server to represent a Call or Notify operaton.
+// wireRequest is sent to a server to represent a Call or Notify operation.
type wireRequest struct {
// VersionTag is always encoded as the string "2.0"
VersionTag wireVersionTag `json:"jsonrpc"`
@@ -137,9 +137,9 @@ func (id ID) Format(f fmt.State, r rune) {
}
switch {
case id.name != "":
- fmt.Fprintf(f, strF, id.name)
+ _, _ = fmt.Fprintf(f, strF, id.name)
default:
- fmt.Fprintf(f, numF, id.number)
+ _, _ = fmt.Fprintf(f, numF, id.number)
}
}
diff --git a/cli/internal/login/deviceauth.go b/cli/internal/login/deviceauth.go
new file mode 100644
index 0000000000..a9ff68acf3
--- /dev/null
+++ b/cli/internal/login/deviceauth.go
@@ -0,0 +1,182 @@
+package login
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/briandowns/spinner"
+ "github.com/fatih/color"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/browser"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+)
+
+// DeviceAuth logs in the suser with the device auth flow.
+func DeviceAuth() (*conf.Config, error) {
+ // Generate PKCE challenge.
+ randData, err := genRandData()
+ if err != nil {
+ return nil, fmt.Errorf("could not generate random data: %v", err)
+ }
+ codeVerifier := base64.RawURLEncoding.EncodeToString([]byte(randData))
+ challengeHash := sha256.Sum256([]byte(codeVerifier))
+ codeChallenge := base64.RawURLEncoding.EncodeToString(challengeHash[:])
+
+ resp, err := platform.BeginDeviceAuthFlow(context.Background(), platform.BeginAuthorizationFlowParams{
+ CodeChallenge: codeChallenge,
+ ClientID: "encore_cli",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ bold = color.New(color.Bold)
+ faint = color.New(color.Faint)
+ )
+
+ fmt.Printf("Your pairing code is %s\n", bold.Sprint(resp.UserCode))
+ faint.Println("This pairing code verifies your authentication with Encore.")
+
+ inputCh := make(chan struct{}, 1)
+
+ spin := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ spin.Prefix = "Waiting for login confirmation..."
+
+ if !env.IsSSH() && browser.CanOpen() {
+ fmt.Fprintf(os.Stdout, "Press Enter to open the browser or visit %s (^C to quit)\n",
+ resp.VerificationURI)
+
+ // Asynchronously wait for input.
+ w := waitForEnterPress()
+ defer w.Stop()
+ go func() {
+ select {
+ case <-w.pressed:
+ inputCh <- struct{}{}
+ case <-w.quit:
+ }
+ }()
+
+ } else {
+ // On Windows we need a proper \r\n newline to ensure the URL detection doesn't extend to the next line.
+ // fmt.Fprintln and family prints just a simple \n, so don't use that.
+ fmt.Fprintf(os.Stdout, "To authenticate with Encore, please go to: %s%s", resp.VerificationURI, cmdutil.Newline)
+ spin.Start()
+ }
+
+ resultCh := make(chan deviceAuthResult, 1)
+ go pollForDeviceAuthResult(codeVerifier, resp, resultCh)
+
+ for {
+ select {
+ case <-inputCh:
+ // The user hit Enter; show a spinner and try to open the browser.
+ spin.Start()
+ if !browser.Open(resp.VerificationURI) {
+ spin.FinalMSG = fmt.Sprintf("Failed to open browser, please go to %s manually.", resp.VerificationURI)
+ spin.Stop()
+
+ // Create a new spinner so the message above stays around.
+ spin = spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+ spin.Prefix = "Waiting for login confirmation..."
+ spin.Start()
+ }
+
+ case res := <-resultCh:
+ if res.err != nil {
+ spin.FinalMSG = fmt.Sprintf("Failed to log in: %v", res.err)
+ spin.Stop()
+ return nil, res.err
+ }
+
+ spin.Stop()
+ return res.cfg, nil
+ }
+ }
+}
+
+type deviceAuthResult struct {
+ cfg *conf.Config
+ err error
+}
+
+func pollForDeviceAuthResult(codeVerifier string, data *platform.BeginAuthorizationFlowResponse, resultCh chan<- deviceAuthResult) {
+PollLoop:
+ for {
+ interval := data.Interval
+ if interval <= 0 {
+ interval = 5
+ }
+ time.Sleep(time.Duration(interval) * time.Second)
+
+ resp, err := platform.PollDeviceAuthFlow(context.Background(), platform.PollDeviceAuthFlowParams{
+ DeviceCode: data.DeviceCode,
+ CodeVerifier: codeVerifier,
+ })
+ if err != nil {
+ if e, ok := err.(platform.Error); ok {
+ switch e.Code {
+ case "auth_pending":
+ // Not yet authorized, continue polling.
+ continue PollLoop
+
+ case "rate_limited":
+ // Spurious error; sleep a bit extra before retrying to be safe.
+ time.Sleep(5 * time.Second)
+ continue PollLoop
+ }
+ }
+ resultCh <- deviceAuthResult{err: err}
+ return
+ }
+
+ cfg := &conf.Config{Token: *resp.Token, Actor: resp.Actor, Email: resp.Email, AppSlug: resp.AppSlug}
+ resultCh <- deviceAuthResult{cfg: cfg}
+ return
+ }
+}
+
+type enterPressWaiter struct {
+ quit chan struct{} // close to abort the waiter
+ pressed chan struct{} // closed when enter has been pressed
+ runDone chan struct{} // closed when the run goroutine has exited
+}
+
+func waitForEnterPress() *enterPressWaiter {
+ w := &enterPressWaiter{
+ quit: make(chan struct{}),
+ pressed: make(chan struct{}, 1),
+ runDone: make(chan struct{}),
+ }
+ go w.run()
+ return w
+}
+
+func (w *enterPressWaiter) run() {
+ defer close(w.runDone)
+ fmt.Fscanln(os.Stdin)
+ select {
+ case w.pressed <- struct{}{}:
+ case <-w.quit:
+ }
+}
+
+func (w *enterPressWaiter) Stop() {
+ close(w.quit)
+ os.Stdin.SetReadDeadline(time.Now()) // interrupt the pending read
+
+ // Asynchronously wait for the run goroutine to exit before
+ // we reset the read deadline.
+ go func() {
+ <-w.runDone
+ os.Stdin.SetReadDeadline(time.Time{}) // reset read deadline
+ }()
+}
diff --git a/cli/internal/login/interactive.go b/cli/internal/login/interactive.go
new file mode 100644
index 0000000000..7da93251bb
--- /dev/null
+++ b/cli/internal/login/interactive.go
@@ -0,0 +1,125 @@
+package login
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/briandowns/spinner"
+
+ "encr.dev/cli/cmd/encore/cmdutil"
+ "encr.dev/cli/internal/browser"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
+)
+
+// interactive keeps the state of an ongoing login flow.
+type interactive struct {
+ result chan *conf.Config // Successful logins are sent on this
+
+ state string
+ challenge string
+ pubKey, privKey string
+ srv *http.Server
+ ln net.Listener
+}
+
+// Interactive begins an interactive login attempt.
+func Interactive() (*conf.Config, error) {
+ // Generate initial request state
+ state, err1 := genRandData()
+ challenge, err2 := genRandData()
+ if err1 != nil || err2 != nil {
+ return nil, fmt.Errorf("could not generate random data: %v/%v", err1, err2)
+ }
+
+ challengeHash := sha256.Sum256([]byte(challenge))
+ encodedChallenge := base64.RawURLEncoding.EncodeToString(challengeHash[:])
+
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ defer ln.Close()
+ addr := ln.Addr().(*net.TCPAddr)
+ url := fmt.Sprintf("http://localhost:%d/oauth", addr.Port)
+
+ req := &platform.CreateOAuthSessionParams{
+ Challenge: encodedChallenge,
+ State: state,
+ RedirectURL: url,
+ }
+ authURL, err := platform.CreateOAuthSession(context.Background(), req)
+ if err != nil {
+ return nil, err
+ }
+
+ flow := &interactive{
+ result: make(chan *conf.Config),
+
+ state: state,
+ challenge: challenge,
+ }
+ flow.srv = &http.Server{Handler: http.HandlerFunc(flow.oauthHandler)}
+ go flow.srv.Serve(ln)
+
+ spin := spinner.New(spinner.CharSets[14], 100*time.Millisecond)
+
+ if env.IsSSH() || !browser.Open(authURL) {
+ // On Windows we need a proper \r\n newline to ensure the URL detection doesn't extend to the next line.
+ // fmt.Fprintln and family prints just a simple \n, so don't use that.
+ fmt.Fprint(os.Stdout, "Log in to Encore using your browser here: ", authURL, cmdutil.Newline)
+ } else {
+ spin.Prefix = "Waiting for login to complete "
+ spin.Start()
+ defer spin.Stop()
+ }
+
+ select {
+ case res := <-flow.result:
+ return res, nil
+ case <-time.After(10 * time.Minute):
+ return nil, errors.New("Timed out waiting for login confirmation")
+ }
+}
+
+func (f *interactive) oauthHandler(w http.ResponseWriter, req *http.Request) {
+ if req.URL.Path != "/oauth" {
+ http.Error(w, "Not Found", http.StatusNotFound)
+ return
+ }
+ code := req.FormValue("code")
+ reqState := req.FormValue("state")
+ if code == "" || reqState != f.state {
+ http.Error(w, "Bad Request (bad code or state)", http.StatusBadRequest)
+ return
+ }
+
+ params := &platform.ExchangeOAuthTokenParams{
+ Challenge: f.challenge,
+ Code: code,
+ }
+ resp, err := platform.ExchangeOAuthToken(req.Context(), params)
+ if err != nil {
+ http.Error(w, "Could not exchange token: "+err.Error(), http.StatusBadGateway)
+ return
+ } else if resp.Token == nil {
+ http.Error(w, "Invalid response: missing token", http.StatusBadGateway)
+ return
+ }
+
+ conf := &conf.Config{Token: *resp.Token, Actor: resp.Actor, Email: resp.Email, AppSlug: resp.AppSlug}
+ select {
+ case f.result <- conf:
+ http.Redirect(w, req, "https://www.encore.dev/auth/success", http.StatusFound)
+ default:
+ http.Error(w, "Unexpected request", http.StatusBadRequest)
+ }
+}
diff --git a/cli/internal/login/login.go b/cli/internal/login/login.go
index e110611a43..85a5c152d2 100644
--- a/cli/internal/login/login.go
+++ b/cli/internal/login/login.go
@@ -2,146 +2,39 @@
package login
import (
- "bytes"
+ "context"
"crypto/rand"
- "crypto/sha256"
"encoding/base64"
- "encoding/json"
"fmt"
- "io"
- "net"
- "net/http"
- "encr.dev/cli/internal/conf"
- "encr.dev/cli/internal/wgtunnel"
- "golang.org/x/oauth2"
+ "encr.dev/cli/internal/browser"
+ "encr.dev/cli/internal/platform"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/env"
)
-// Flow keeps the state of an ongoing login flow.
-type Flow struct {
- URL string // Local URL the flow is listening on
- LoginCh chan *conf.Config // Successful logins are sent on this
-
- state string
- challenge string
- pubKey, privKey string
- srv *http.Server
- ln net.Listener
+func DecideFlow() (*conf.Config, error) {
+ if env.IsSSH() || !browser.CanOpen() {
+ return DeviceAuth()
+ }
+ return Interactive()
}
-// Begin begins a new login attempt.
-func Begin() (f *Flow, err error) {
- // Generate initial request state
- state, err1 := genRandData()
- challenge, err2 := genRandData()
- if err1 != nil || err2 != nil {
- return nil, fmt.Errorf("could not generate random data: %v/%v", err1, err2)
+func WithAuthKey(authKey string) (*conf.Config, error) {
+ params := &platform.ExchangeAuthKeyParams{
+ AuthKey: authKey,
}
-
- challengeHash := sha256.Sum256([]byte(challenge))
- encodedChallenge := base64.RawURLEncoding.EncodeToString(challengeHash[:])
-
- ln, err := net.Listen("tcp", "127.0.0.1:0")
+ resp, err := platform.ExchangeAuthKey(context.Background(), params)
if err != nil {
return nil, err
- }
- defer func() {
- if err != nil {
- ln.Close()
- }
- }()
- addr := ln.Addr().(*net.TCPAddr)
- url := fmt.Sprintf("http://localhost:%d/oauth", addr.Port)
-
- req := map[string]string{
- "challenge": encodedChallenge,
- "state": state,
- "redirect_url": url,
- }
- var resp struct {
- OK bool
- Error struct {
- Code string
- Detail interface{}
- }
- Data struct {
- AuthURL string `json:"auth_url"`
- }
- }
- if err := apiReq("/login/oauth:create-session", req, &resp); err != nil {
- return nil, fmt.Errorf("could not contact auth server: %v", err)
- } else if !resp.OK {
- return nil, fmt.Errorf("auth failure: code: %s", resp.Error.Code)
- }
-
- flow := &Flow{
- URL: resp.Data.AuthURL,
- LoginCh: make(chan *conf.Config),
-
- state: state,
- challenge: challenge,
- }
- flow.srv = &http.Server{Handler: http.HandlerFunc(flow.oauthHandler)}
- go flow.srv.Serve(ln)
- return flow, nil
-}
-
-// Close closes the login flow.
-func (f *Flow) Close() {
- f.srv.Close()
-}
-
-func (f *Flow) oauthHandler(w http.ResponseWriter, req *http.Request) {
- if req.URL.Path != "/oauth" {
- http.Error(w, "Not Found", http.StatusNotFound)
- return
- }
- code := req.FormValue("code")
- reqState := req.FormValue("state")
- if code == "" || reqState != f.state {
- http.Error(w, "Bad Request (bad code or state)", http.StatusBadRequest)
- return
+ } else if resp.Token == nil {
+ return nil, fmt.Errorf("invalid response: missing token")
}
- reqData := map[string]string{
- "challenge": f.challenge,
- "code": code,
- }
- var resp struct {
- OK bool
- Error struct {
- Code string
- Detail interface{}
- }
- Data struct {
- Email string
- Token *oauth2.Token
- }
- }
- if err := apiReq("/login/oauth:exchange-token", reqData, &resp); err != nil {
- http.Error(w, "Could not exchange token: "+err.Error(), http.StatusBadGateway)
- return
- } else if !resp.OK {
- http.Error(w, "Could not exchange token: "+resp.Error.Code, http.StatusBadGateway)
- return
- } else if resp.Data.Token == nil {
- http.Error(w, "Invalid response: missing token", http.StatusBadGateway)
- return
- }
+ tok := resp.Token
+ conf := &conf.Config{Token: *tok, Actor: resp.Actor, AppSlug: resp.AppSlug}
- tok := resp.Data.Token
- conf := &conf.Config{Token: *tok, Email: resp.Data.Email}
- pub, priv, err := wgtunnel.GenKey()
- if err == nil {
- conf.WireGuard.PublicKey = pub.String()
- conf.WireGuard.PrivateKey = priv.String()
- }
- select {
- case f.LoginCh <- conf:
- http.Redirect(w, req, "https://www.encore.dev/auth/success", http.StatusFound)
- default:
- http.Error(w, "Unexpected request", http.StatusBadRequest)
- }
+ return conf, nil
}
func genRandData() (string, error) {
@@ -152,26 +45,3 @@ func genRandData() (string, error) {
}
return base64.RawURLEncoding.EncodeToString(data), nil
}
-
-func apiReq(endpoint string, reqParams, respParams interface{}) error {
- var body io.Reader
- if reqParams != nil {
- reqData, err := json.Marshal(reqParams)
- if err != nil {
- return err
- }
- body = bytes.NewReader(reqData)
- }
-
- resp, err := http.Post("https://api.encore.dev"+endpoint, "application/json", body)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if respParams != nil {
- err := json.NewDecoder(resp.Body).Decode(respParams)
- return err
- }
- return nil
-}
diff --git a/cli/internal/manifest/manifest.go b/cli/internal/manifest/manifest.go
new file mode 100644
index 0000000000..9c52d81c5a
--- /dev/null
+++ b/cli/internal/manifest/manifest.go
@@ -0,0 +1,128 @@
+// Package manifest reads and writes Encore app manifests.
+package manifest
+
+import (
+ "crypto/rand"
+ "encoding/base32"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "encr.dev/pkg/xos"
+)
+
+// Manifest represents the persisted manifest for
+// an Encore application. It is not intended to be committed to
+// source control.
+type Manifest struct {
+ // AppID is a unique identifier for the app.
+ // It uses the encore.dev app slug if the app
+ // is linked, and is otherwise a randomly generated id.
+ AppID string `json:"appID,omitempty"`
+
+ // LocalID is a unique id for the app that's only used locally.
+ // It is randomly generated on first use.
+ LocalID string `json:"local_id"`
+
+ // Tutorial is set to the name of the tutorial the user is currently on or empty.
+ Tutorial string `json:"tutorial"`
+}
+
+// SetTutorial sets the tutorial field on the app manifest
+func SetTutorial(appRoot string, tutorial string) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("read/create manifest: %v", err)
+ }
+ }()
+
+ var man Manifest
+
+ // Use the existing manifest if we have one.
+ cfgPath := filepath.Join(appRoot, ".encore", "manifest.json")
+ if data, err := os.ReadFile(cfgPath); err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return err
+ } else if err == nil {
+ err = json.Unmarshal(data, &man)
+ if err != nil {
+ return err
+ }
+ }
+
+ man.Tutorial = tutorial
+
+ // Write it back.
+ out, _ := json.Marshal(&man)
+ if err := os.MkdirAll(filepath.Dir(cfgPath), 0755); err != nil {
+ return err
+ } else if err := xos.WriteFile(cfgPath, out, 0644); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ReadOrCreate reads the manifest for the app rooted at appRoot.
+// If it doesn't exist it creates it first.
+func ReadOrCreate(appRoot string) (mf *Manifest, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("read/create manifest: %v", err)
+ }
+ }()
+
+ var man Manifest
+
+ // Use the existing manifest if we have one.
+ cfgPath := filepath.Join(appRoot, ".encore", "manifest.json")
+ if data, err := os.ReadFile(cfgPath); err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return nil, err
+ } else if err == nil {
+ err = json.Unmarshal(data, &man)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Generate a local ID if we don't have one.
+ if man.LocalID == "" {
+ // If we have a legacy AppID, migrate that over to the local id.
+ if man.AppID != "" {
+ man.LocalID = man.AppID
+ man.AppID = ""
+ } else {
+ id, err := genID()
+ if err != nil {
+ return nil, err
+ }
+ man.LocalID = id
+ }
+ }
+
+ // Write it back.
+ out, _ := json.Marshal(&man)
+ if err := os.MkdirAll(filepath.Dir(cfgPath), 0755); err != nil {
+ return nil, err
+ } else if err := xos.WriteFile(cfgPath, out, 0644); err != nil {
+ return nil, err
+ }
+ return &man, nil
+}
+
+const encodeStr = "23456789abcdefghikmnopqrstuvwxyz"
+
+var encoding = base32.NewEncoding(encodeStr).WithPadding(base32.NoPadding)
+
+// genID generates a random id for a local ID
+//
+// Note: the fact this generates without a hyphen is expected and used
+// to identify a local ID vs a platform ID
+func genID() (string, error) {
+ var data [3]byte
+ if _, err := rand.Read(data[:]); err != nil {
+ return "", err
+ }
+ return encoding.EncodeToString(data[:]), nil
+}
diff --git a/cli/internal/onboarding/onboarding.go b/cli/internal/onboarding/onboarding.go
new file mode 100644
index 0000000000..46d0a8f0af
--- /dev/null
+++ b/cli/internal/onboarding/onboarding.go
@@ -0,0 +1,92 @@
+package onboarding
+
+import (
+ "encoding/json"
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "encr.dev/pkg/xos"
+)
+
+type Event struct {
+ time.Time
+}
+
+type State struct {
+ FirstRun Event `json:"first_run"`
+ DeployHint Event `json:"deploy_hint"`
+ EventMap map[string]*Event `json:"carousel"`
+}
+
+func (e *State) Property(prop string) *Event {
+ if e.EventMap == nil {
+ e.EventMap = map[string]*Event{}
+ }
+ _, ok := e.EventMap[prop]
+ if !ok {
+ e.EventMap[prop] = &Event{}
+ }
+ return e.EventMap[prop]
+}
+
+func (e *Event) IsSet() bool {
+ return !e.IsZero()
+}
+
+func (e *Event) Set() bool {
+ if !e.IsSet() {
+ e.Time = time.Now()
+ return true
+ }
+ return false
+}
+
+func Load() (*State, error) {
+ cfg := &State{EventMap: map[string]*Event{}}
+ path, err := configPath()
+ if err != nil {
+ return cfg, err
+ }
+
+ data, err := os.ReadFile(path)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ err = nil
+ }
+ return cfg, err
+ }
+ err = json.Unmarshal(data, &cfg)
+ if err != nil {
+ return cfg, err
+ }
+
+ if cfg.FirstRun.IsSet() && time.Since(cfg.FirstRun.Time) > 14*24*time.Hour {
+ cfg.Property("carousel").Set()
+ }
+ return cfg, err
+}
+
+func (cfg *State) Write() error {
+ path, err := configPath()
+ if err != nil {
+ return err
+ }
+ data, err := json.Marshal(cfg)
+ if err != nil {
+ return err
+ } else if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ return xos.WriteFile(path, data, 0644)
+}
+
+func configPath() (string, error) {
+ dir, err := os.UserConfigDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(dir, "encore", "onboarding.json"), nil
+}
diff --git a/cli/internal/platform/api.go b/cli/internal/platform/api.go
new file mode 100644
index 0000000000..aa47fbd5cf
--- /dev/null
+++ b/cli/internal/platform/api.go
@@ -0,0 +1,180 @@
+package platform
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/gorilla/websocket"
+
+ "encr.dev/pkg/fns"
+ metav1 "encr.dev/proto/encore/parser/meta/v1"
+)
+
+type CreateAppParams struct {
+ Name string
+ InitialSecrets map[string]string
+ AppRootDir string
+}
+
+type App struct {
+ ID string `json:"eid"`
+ LegacyID string `json:"id"`
+ Slug string `json:"slug"`
+ Name string `json:"name"`
+ Description string `json:"description"` // can be blank
+ MainBranch *string `json:"main_branch"` // nil if not set
+}
+
+type Rollout struct {
+ ID string `json:"id"`
+ EnvName string `json:"env_name"`
+}
+
+type Env struct {
+ ID string `json:"id"`
+ Slug string `json:"slug"`
+ Type string `json:"type"`
+ Cloud string `json:"cloud"`
+}
+
+func CreateApp(ctx context.Context, p *CreateAppParams) (*App, error) {
+ var resp App
+ err := call(ctx, "POST", "/apps", p, &resp, true)
+ return &resp, err
+}
+
+func Deploy(ctx context.Context, appSlug, env, sha, branch string) (*Rollout, error) {
+ var resp Rollout
+ err := call(
+ ctx,
+ "POST",
+ fmt.Sprintf(
+ "/apps/%s/envs/%s/rollouts",
+ url.PathEscape(appSlug),
+ url.PathEscape(env),
+ ), map[string]string{
+ "sha": sha,
+ "branch": branch,
+ },
+ &resp,
+ true)
+ return &resp, err
+}
+
+func ListApps(ctx context.Context) ([]*App, error) {
+ var resp []*App
+ err := call(ctx, "GET", "/user/apps", nil, &resp, true)
+ return resp, err
+}
+
+func GetApp(ctx context.Context, appSlug string) (*App, error) {
+ var resp App
+ err := call(ctx, "GET", "/apps/"+url.PathEscape(appSlug), nil, &resp, true)
+ return &resp, err
+}
+
+func ListEnvs(ctx context.Context, appSlug string) ([]*Env, error) {
+ var resp []*Env
+ err := call(ctx, "GET", "/apps/"+url.PathEscape(appSlug)+"/envs", nil, &resp, true)
+ return resp, err
+}
+
+type SecretKind string
+
+const (
+ DevelopmentSecrets SecretKind = "development"
+ ProductionSecrets SecretKind = "production"
+)
+
+func GetLocalSecretValues(ctx context.Context, appSlug string, poll bool) (secrets map[string]string, err error) {
+ url := "/apps/" + url.PathEscape(appSlug) + "/secrets:values?kind=development"
+ if poll {
+ url += "&poll=true"
+ }
+ err = call(ctx, "GET", url, nil, &secrets, true)
+ return secrets, err
+}
+
+type SecretVersion struct {
+ Number int `json:"number"`
+ Created time.Time `json:"created"`
+}
+
+func SetAppSecret(ctx context.Context, appSlug string, kind SecretKind, secretKey, value string) (*SecretVersion, error) {
+ params := struct {
+ Kind SecretKind
+ Value string
+ }{Kind: kind, Value: value}
+ url := fmt.Sprintf("/apps/%s/secrets/%s/versions",
+ url.PathEscape(appSlug),
+ url.PathEscape(secretKey),
+ )
+ var resp SecretVersion
+ err := call(ctx, "POST", url, ¶ms, &resp, true)
+ return &resp, err
+}
+
+func GetEnvMeta(ctx context.Context, appSlug, envName string) (*metav1.Data, error) {
+ url := "/apps/" + url.PathEscape(appSlug) + "/envs/" + url.PathEscape(envName) + "/meta"
+ body, err := rawCall(ctx, "GET", url, nil, true)
+ if err != nil {
+ return nil, err
+ }
+ defer fns.CloseIgnore(body)
+ data, err := io.ReadAll(body)
+ if err != nil {
+ return nil, fmt.Errorf("platform.GetEnvMeta: %v", err)
+ }
+ var md metav1.Data
+ if err := proto.Unmarshal(data, &md); err != nil {
+ return nil, fmt.Errorf("platform.GetEnvMeta: %v", err)
+ }
+ return &md, nil
+}
+
+func DBConnect(ctx context.Context, appSlug, envSlug, dbName, role string, startupData []byte) (*websocket.Conn, error) {
+ path := escapef("/apps/%s/envs/%s/sqldb-connect/%s", appSlug, envSlug, dbName)
+ if role != "" {
+ path += "?role=" + url.QueryEscape(role)
+ }
+ return wsDial(ctx, path, true, map[string]string{
+ "X-Startup-Message": base64.StdEncoding.EncodeToString(startupData),
+ })
+}
+
+func EnvLogs(ctx context.Context, appSlug, envSlug string) (*websocket.Conn, error) {
+ path := escapef("/apps/%s/envs/%s/log", appSlug, envSlug)
+ return wsDial(ctx, path, true, nil)
+}
+
+func KubernetesClusters(ctx context.Context, appSlug string, envName string) (string, string, []KubeCtlConfig, error) {
+ type K8SClusterConfigs struct {
+ AppSlug string `json:"app"`
+ EnvName string `json:"env"`
+ Clusters []KubeCtlConfig `json:"clusters"`
+ }
+
+ var resp K8SClusterConfigs
+ err := call(ctx, "GET", "/apps/"+url.PathEscape(appSlug)+"/envs/"+url.PathEscape(envName)+"/k8s-clusters", nil, &resp, true)
+ return resp.AppSlug, resp.EnvName, resp.Clusters, err
+}
+
+type KubeCtlConfig struct {
+ EnvID string `json:"env_id"` // The ID of the environment
+ ResID string `json:"res_id"` // The ID of the cluster
+ Name string `json:"name"` // The name of the cluster
+ DefaultNamespace string `json:"namespace,omitempty"` // The default namespace for the cluster (if any)
+}
+
+func escapef(format string, args ...string) string {
+ ifaces := make([]interface{}, len(args))
+ for i, arg := range args {
+ ifaces[i] = url.PathEscape(arg)
+ }
+ return fmt.Sprintf(format, ifaces...)
+}
diff --git a/cli/internal/platform/client.go b/cli/internal/platform/client.go
new file mode 100644
index 0000000000..6de6ee91b3
--- /dev/null
+++ b/cli/internal/platform/client.go
@@ -0,0 +1,270 @@
+package platform
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "runtime"
+
+ "github.com/gorilla/websocket"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/rs/zerolog/log"
+
+ "encr.dev/cli/internal/platform/gql"
+ "encr.dev/internal/conf"
+ "encr.dev/internal/version"
+ "encr.dev/pkg/fns"
+)
+
+type Error struct {
+ HTTPStatus string `json:"-"`
+ HTTPCode int `json:"-"`
+ Code string
+ Detail json.RawMessage
+}
+
+type ValidationDetails struct {
+ Field string `json:"field"`
+ Type string `json:"type"`
+}
+
+func (e Error) Error() string {
+ if len(e.Detail) > 0 {
+ return fmt.Sprintf("http %s: code=%s detail=%s", e.HTTPStatus, e.Code, e.Detail)
+ }
+ return fmt.Sprintf("http %s: code=%s", e.HTTPStatus, e.Code)
+}
+
+// call makes a call to the API endpoint given by method and path.
+// If reqParams and respParams are non-nil they are JSON-marshalled/unmarshalled.
+func call(ctx context.Context, method, path string, reqParams, respParams interface{}, auth bool) (err error) {
+ log.Trace().Interface("request", reqParams).Msgf("-> %s %s", method, path)
+ defer func() {
+ if err != nil {
+ log.Trace().Err(err).Msgf("<- ERR %s %s", method, path)
+ } else {
+ log.Trace().Interface("response", respParams).Msgf("<- OK %s %s", method, path)
+ }
+ }()
+
+ resp, err := sendPlatformReq(ctx, method, path, reqParams, auth)
+ if err != nil {
+ return err
+ }
+ defer fns.CloseIgnore(resp.Body)
+
+ var respStruct struct {
+ OK bool
+ Error Error
+ Data json.RawMessage
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&respStruct); err != nil {
+ return fmt.Errorf("decode response: %v", err)
+ } else if !respStruct.OK {
+ e := respStruct.Error
+ e.HTTPCode = resp.StatusCode
+ e.HTTPStatus = resp.Status
+ return e
+ }
+
+ if respParams != nil {
+ if err := json.Unmarshal([]byte(respStruct.Data), respParams); err != nil {
+ return fmt.Errorf("decode response data: %v", err)
+ }
+ }
+ return nil
+}
+
+type graphqlRequest struct {
+ Query string `json:"query"`
+ Variables map[string]interface{} `json:"variables,omitempty"`
+ OperationName string `json:"operationName,omitempty"`
+ Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+var graphqlDecoder = (func() jsoniter.API {
+ enc := jsoniter.Config{}.Froze()
+ enc.RegisterExtension(NewInterfaceCodecExtension())
+ return enc
+})()
+
+// graphqlCall makes a GraphQL request.
+func graphqlCall(ctx context.Context, req graphqlRequest, respData any, auth bool) (err error) {
+ log.Trace().Msgf("-> graphql %s: %+v", req.OperationName, req.Variables)
+ httpResp, err := sendPlatformReq(ctx, "POST", "/graphql", req, auth)
+ if err != nil {
+ return err
+ }
+ defer fns.CloseIgnore(httpResp.Body)
+
+ var respStruct struct {
+ Data json.RawMessage
+ Errors gql.ErrorList
+ Extensions map[string]interface{}
+ }
+ defer func() {
+ if err != nil {
+ log.Trace().Msgf("<- ERR graphql %s: %v", req.OperationName, err)
+ } else {
+ log.Trace().Msgf("<- OK graphql %s: %s", req.OperationName, respStruct.Data)
+ }
+ }()
+
+ if err := json.NewDecoder(httpResp.Body).Decode(&respStruct); err != nil {
+ return fmt.Errorf("decode response: %v", err)
+ } else if len(respStruct.Errors) > 0 {
+ return fmt.Errorf("graphql request failed: %w", respStruct.Errors)
+ }
+ if respData != nil {
+ if err := graphqlDecoder.NewDecoder(bytes.NewReader(respStruct.Data)).Decode(respData); err != nil {
+ return fmt.Errorf("decode graphql data: %v", err)
+ }
+ }
+ return nil
+}
+
+// rawCall makes a call to the API endpoint given by method and path.
+// It returns the raw HTTP response body on success; it must be closed by the caller.
+func rawCall(ctx context.Context, method, path string, reqParams interface{}, auth bool) (respBody io.ReadCloser, err error) {
+ log.Trace().Msgf("-> %s %s: %+v", method, path, reqParams)
+ defer func() {
+ if err != nil {
+ log.Trace().Msgf("<- ERR %s %s: %v", method, path, err)
+ } else {
+ log.Trace().Msgf("<- OK %s %s", method, path)
+ }
+ }()
+
+ resp, err := sendPlatformReq(ctx, method, path, reqParams, auth)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ _ = resp.Body.Close()
+ }
+ }()
+
+ if resp.StatusCode >= 400 {
+ return nil, decodeErrorResponse(resp)
+ }
+
+ return resp.Body, nil
+}
+
+func sendPlatformReq(ctx context.Context, method, path string, reqParams any, auth bool) (httpResp *http.Response, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("%s %s: %w", method, path, err)
+ }
+ }()
+
+ var body io.Reader
+ if reqParams != nil {
+ reqData, err := json.Marshal(reqParams)
+ if err != nil {
+ return nil, fmt.Errorf("marshal request: %v", err)
+ }
+ body = bytes.NewReader(reqData)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, method, conf.APIBaseURL+path, body)
+ if err != nil {
+ return nil, err
+ }
+ if reqParams != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+
+ return doPlatformReq(req, auth)
+}
+
+func doPlatformReq(req *http.Request, auth bool) (httpResp *http.Response, err error) {
+ // Add a very limited amount of information for diagnostics
+ req.Header.Set("User-Agent", "EncoreCLI/"+version.Version)
+ req.Header.Set("X-Encore-Version", version.Version)
+ req.Header.Set("X-Encore-GOOS", runtime.GOOS)
+ req.Header.Set("X-Encore-GOARCH", runtime.GOARCH)
+
+ client := http.DefaultClient
+ if auth {
+ client = conf.AuthClient
+ }
+ return client.Do(req)
+}
+
+// wsDial sets up a WebSocket connection to the API endpoint given by method and path.
+func wsDial(ctx context.Context, path string, auth bool, extraHeaders map[string]string) (ws *websocket.Conn, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("WS %s: %w", path, err)
+ }
+ }()
+
+ // Add a very limited amount of information for diagnostics
+ header := make(http.Header)
+ header.Set("User-Agent", "EncoreCLI/"+version.Version)
+ header.Set("X-Encore-Version", version.Version)
+ header.Set("X-Encore-GOOS", runtime.GOOS)
+ header.Set("X-Encore-GOARCH", runtime.GOARCH)
+ header.Set("Origin", "http://encore-cli.local")
+ for k, v := range extraHeaders {
+ header.Set(k, v)
+ }
+
+ log.Trace().Msgf("-> %s %s: %+v", "WS", path, extraHeaders)
+ defer func() {
+ if err != nil {
+ log.Trace().Msgf("<- ERR %s %s: %v", "WS", path, err)
+ } else {
+ log.Trace().Msgf("<- OK %s %s", "WS", path)
+ }
+ }()
+
+ if auth {
+ tok, err := conf.DefaultTokenSource.Token()
+ if err != nil {
+ return nil, err
+ }
+ header.Set("Authorization", "Bearer "+tok.AccessToken)
+ }
+
+ url := conf.WSBaseURL + path
+ log.Trace().Msgf("-> %s %s: connecting to %s", "WS", path, url)
+ ws, httpResp, err := websocket.DefaultDialer.DialContext(ctx, url, header)
+ if httpResp != nil && httpResp.StatusCode >= 400 {
+ var respStruct struct {
+ OK bool
+ Error Error
+ Data json.RawMessage
+ }
+ if err := json.NewDecoder(httpResp.Body).Decode(&respStruct); err != nil {
+ return nil, fmt.Errorf("decode response: %v", err)
+ } else if !respStruct.OK {
+ e := respStruct.Error
+ e.HTTPCode = httpResp.StatusCode
+ e.HTTPStatus = httpResp.Status
+ return nil, e
+ }
+ }
+
+ return ws, err
+}
+
+func decodeErrorResponse(resp *http.Response) error {
+ var respStruct struct {
+ OK bool
+ Error Error
+ Data json.RawMessage
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&respStruct); err != nil {
+ return fmt.Errorf("decode response: %v", err)
+ }
+ e := respStruct.Error
+ e.HTTPCode = resp.StatusCode
+ e.HTTPStatus = resp.Status
+ return e
+}
diff --git a/cli/internal/platform/gql/app.go b/cli/internal/platform/gql/app.go
new file mode 100644
index 0000000000..c41eae57df
--- /dev/null
+++ b/cli/internal/platform/gql/app.go
@@ -0,0 +1,32 @@
+package gql
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type App struct {
+ ID string
+ Slug string
+}
+
+type Error struct {
+ Message string `json:"message"`
+ Path []string `json:"path"`
+ Extensions map[string]json.RawMessage `json:"extensions"`
+}
+
+func (e *Error) Error() string {
+ return e.Message
+}
+
+type ErrorList []*Error
+
+func (err ErrorList) Error() string {
+ if len(err) == 0 {
+ return "no errors"
+ } else if len(err) == 1 {
+ return err[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", err[0].Error(), len(err)-1)
+}
diff --git a/cli/internal/platform/gql/env.go b/cli/internal/platform/gql/env.go
new file mode 100644
index 0000000000..1706bc4c34
--- /dev/null
+++ b/cli/internal/platform/gql/env.go
@@ -0,0 +1,7 @@
+package gql
+
+type Env struct {
+ ID string
+ App *App
+ Name string
+}
diff --git a/cli/internal/platform/gql/secrets.go b/cli/internal/platform/gql/secrets.go
new file mode 100644
index 0000000000..15bc0369cb
--- /dev/null
+++ b/cli/internal/platform/gql/secrets.go
@@ -0,0 +1,58 @@
+package gql
+
+import (
+ "time"
+
+ "github.com/modern-go/reflect2"
+)
+
+type Secret struct {
+ Key string
+ Groups []*SecretGroup
+}
+
+type SecretGroup struct {
+ ID string
+ Key string
+ Selector []SecretSelector
+ Description string
+ Etag string
+ ArchivedAt *time.Time
+}
+
+type SecretSelector interface {
+ secretSelector()
+ String() string
+}
+
+type SecretSelectorEnvType struct {
+ Kind string
+}
+
+func (SecretSelectorEnvType) secretSelector() {}
+func (s *SecretSelectorEnvType) String() string { return "type:" + s.Kind }
+
+type SecretSelectorSpecificEnv struct {
+ Env *Env
+}
+
+func (s *SecretSelectorSpecificEnv) String() string { return "id:" + s.Env.ID }
+func (SecretSelectorSpecificEnv) secretSelector() {}
+
+type ConflictError struct {
+ AppID string
+ Key string
+ Conflicts []GroupConflict
+}
+
+type GroupConflict struct {
+ GroupID string
+ Conflicts []string
+}
+
+// TypeRegistry contains all the types that are used in the graphql schema,
+// in order to ensure they are not dead-code eliminated.
+var TypeRegistry = []reflect2.Type{
+ reflect2.TypeOf((*SecretSelectorEnvType)(nil)),
+ reflect2.TypeOf((*SecretSelectorSpecificEnv)(nil)),
+}
diff --git a/cli/internal/platform/jsoniter_ext.go b/cli/internal/platform/jsoniter_ext.go
new file mode 100644
index 0000000000..f881001831
--- /dev/null
+++ b/cli/internal/platform/jsoniter_ext.go
@@ -0,0 +1,75 @@
+package platform
+
+import (
+ "reflect"
+ "unsafe"
+
+ jsoniter "github.com/json-iterator/go"
+ "github.com/modern-go/reflect2"
+)
+
+// InterfaceCodecExtension is used to decode interface fields
+// it'll store the type of the values in a wrapper object
+type InterfaceCodecExtension struct {
+ jsoniter.DummyExtension
+}
+
+func NewInterfaceCodecExtension() *InterfaceCodecExtension {
+ return &InterfaceCodecExtension{}
+}
+
+func (e *InterfaceCodecExtension) DecorateDecoder(typ reflect2.Type, decoder jsoniter.ValDecoder) jsoniter.ValDecoder {
+ if typ.Kind() == reflect.Interface {
+ return &interfaceCodec{typ: typ, decoder: decoder}
+ }
+ return decoder
+}
+
+const gqlPackage = "encr.dev/cli/internal/platform/gql"
+
+type interfaceCodec struct {
+ typ reflect2.Type
+ decoder jsoniter.ValDecoder
+}
+
+// Decode decodes an interface value from a iterator
+func (codec *interfaceCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ // if it's not an objectvalue, we don't need to bother
+ if iter.WhatIsNext() != jsoniter.ObjectValue {
+ codec.decoder.Decode(ptr, iter)
+ return
+ }
+
+ // if it is, we try to resolve the pkgPath, type and content
+ val := iter.ReadAny()
+ typeName := val.Get("__typename").ToString()
+ if typeName == "" {
+ iter.ReportError("InterfaceCodecExtension", "missing __typename field")
+ return
+ }
+
+ // try to instantiate the type
+ t := reflect2.TypeByPackageName(gqlPackage, typeName)
+ if t == nil {
+ iter.ReportError("InterfaceCodecExtension", "cannot find type "+typeName+" in package "+gqlPackage)
+ return
+ }
+
+ // Need to create a pointer to the pointer of the type to be able to be able
+ // to replace placeholder values with the actual values
+ item := reflect2.PtrTo(reflect2.PtrTo(t)).New()
+ val.ToVal(item)
+ if err := val.LastError(); err != nil {
+ iter.ReportError("decode", err.Error())
+ return
+ }
+
+ n := reflect.New(codec.typ.Type1())
+ n.Elem().Set(reflect.ValueOf(item).Elem().Elem())
+ codec.typ.UnsafeSet(ptr, n.UnsafePointer())
+}
+
+// IsEmpty checks if a ptr is empty/nil
+func (codec *interfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return codec.typ.UnsafeIsNil(ptr)
+}
diff --git a/cli/internal/platform/jsoniter_ext_test.go b/cli/internal/platform/jsoniter_ext_test.go
new file mode 100644
index 0000000000..a851dfe3b5
--- /dev/null
+++ b/cli/internal/platform/jsoniter_ext_test.go
@@ -0,0 +1,35 @@
+package platform
+
+import (
+ "testing"
+
+ qt "github.com/frankban/quicktest"
+ jsoniter "github.com/json-iterator/go"
+
+ "encr.dev/cli/internal/platform/gql"
+)
+
+func TestInterfaceDecoder(t *testing.T) {
+ c := qt.New(t)
+ enc := jsoniter.Config{}.Froze()
+ enc.RegisterExtension(NewInterfaceCodecExtension())
+
+ data := []byte(`{
+ "key": "test",
+ "selector": [
+ {"__typename": "SecretSelectorEnvType", "kind": "type:production"},
+ {"__typename": "SecretSelectorSpecificEnv", "env": {"name": "test"}}
+ ]
+}`)
+
+ var group *gql.SecretGroup
+ err := enc.Unmarshal(data, &group)
+ c.Assert(err, qt.IsNil)
+ c.Assert(group, qt.DeepEquals, &gql.SecretGroup{
+ Key: "test",
+ Selector: []gql.SecretSelector{
+ &gql.SecretSelectorEnvType{Kind: "type:production"},
+ &gql.SecretSelectorSpecificEnv{Env: &gql.Env{Name: "test"}},
+ },
+ })
+}
diff --git a/cli/internal/platform/login.go b/cli/internal/platform/login.go
new file mode 100644
index 0000000000..bae2a015da
--- /dev/null
+++ b/cli/internal/platform/login.go
@@ -0,0 +1,151 @@
+package platform
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/oauth2"
+
+ "encr.dev/internal/conf"
+)
+
+type CreateOAuthSessionParams struct {
+ Challenge string `json:"challenge"`
+ State string `json:"state"`
+ RedirectURL string `json:"redirect_url"`
+}
+
+func CreateOAuthSession(ctx context.Context, p *CreateOAuthSessionParams) (authURL string, err error) {
+ var resp struct {
+ AuthURL string `json:"auth_url"`
+ }
+ err = call(ctx, "POST", "/login/oauth:create-session", p, &resp, false)
+ return resp.AuthURL, err
+}
+
+type BeginAuthorizationFlowParams struct {
+ CodeChallenge string
+ ClientID string
+}
+
+type BeginAuthorizationFlowResponse struct {
+ // DeviceCode is the device verification code.
+ DeviceCode string `json:"device_code"`
+
+ // UserCode is the end-user verification code.
+ UserCode string `json:"user_code"`
+
+ // VerificationURI is the end-user URL to use to login.
+ VerificationURI string `json:"verification_uri"`
+
+ // ExpiresIn is the lifetime in seconds of the device code and user code.
+ ExpiresIn int `json:"expires_in"`
+
+ // Interval is the number of seconds to wait between polling requests.
+ // If not provided, defaults to 5.
+ Interval int `json:"interval,omitempty"`
+}
+
+func BeginDeviceAuthFlow(ctx context.Context, p BeginAuthorizationFlowParams) (*BeginAuthorizationFlowResponse, error) {
+ vals := url.Values{}
+ vals.Set("code_challenge", p.CodeChallenge)
+ vals.Set("client_id", p.ClientID)
+ body := strings.NewReader(vals.Encode())
+
+ req, err := http.NewRequestWithContext(ctx, "POST", conf.APIBaseURL+"/oauth/device-auth", body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := doPlatformReq(req, false)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return nil, decodeErrorResponse(resp)
+ }
+ var respData BeginAuthorizationFlowResponse
+ if err := json.NewDecoder(resp.Body).Decode(&respData); err != nil {
+ return nil, fmt.Errorf("decoding response body: %w", err)
+ }
+ return &respData, nil
+}
+
+type PollDeviceAuthFlowParams struct {
+ DeviceCode string
+ CodeVerifier string
+}
+
+type OAuthToken struct {
+ *oauth2.Token
+ Actor string `json:"actor,omitempty"` // The ID of the user or app that authorized the token.
+ Email string `json:"email"` // empty if logging in as an app
+ AppSlug string `json:"app_slug"` // empty if logging in as a user
+}
+
+func PollDeviceAuthFlow(ctx context.Context, p PollDeviceAuthFlowParams) (*OAuthToken, error) {
+ vals := url.Values{}
+ vals.Set("grant_type", "urn:ietf:params:oauth:grant-type:device_code")
+ vals.Set("device_code", p.DeviceCode)
+ vals.Set("code_verifier", p.CodeVerifier)
+ body := strings.NewReader(vals.Encode())
+
+ req, err := http.NewRequestWithContext(ctx, "POST", conf.APIBaseURL+"/oauth/token", body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := doPlatformReq(req, false)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ return nil, decodeErrorResponse(resp)
+ }
+
+ var tok OAuthToken
+ if err := json.NewDecoder(resp.Body).Decode(&tok); err != nil {
+ return nil, fmt.Errorf("decoding response body: %w", err)
+ }
+ return &tok, nil
+}
+
+type ExchangeOAuthTokenParams struct {
+ Challenge string `json:"challenge"`
+ Code string `json:"code"`
+}
+
+type OAuthData struct {
+ Token *oauth2.Token `json:"token"`
+ Actor string `json:"actor,omitempty"` // The ID of the user or app that authorized the token.
+ Email string `json:"email"` // empty if logging in as an app
+ AppSlug string `json:"app_slug"` // empty if logging in as a user
+}
+
+func ExchangeOAuthToken(ctx context.Context, p *ExchangeOAuthTokenParams) (*OAuthData, error) {
+ var resp OAuthData
+ err := call(ctx, "POST", "/login/oauth:exchange-token", p, &resp, false)
+ return &resp, err
+}
+
+type ExchangeAuthKeyParams struct {
+ AuthKey string `json:"auth_key"`
+}
+
+func ExchangeAuthKey(ctx context.Context, p *ExchangeAuthKeyParams) (*OAuthData, error) {
+ var resp OAuthData
+ err := call(ctx, "POST", "/login/auth-key", p, &resp, false)
+ return &resp, err
+}
diff --git a/cli/internal/platform/secrets.go b/cli/internal/platform/secrets.go
new file mode 100644
index 0000000000..5cb67dcdc1
--- /dev/null
+++ b/cli/internal/platform/secrets.go
@@ -0,0 +1,159 @@
+package platform
+
+import (
+ "context"
+
+ "github.com/cockroachdb/errors"
+
+ "encr.dev/cli/internal/platform/gql"
+)
+
+func ListSecretGroups(ctx context.Context, appSlug string, keys []string) ([]*gql.Secret, error) {
+ query := `
+query ListSecretGroups($appSlug: String!, $keys: [String!]) {
+ app(slug: $appSlug) {
+ secrets(keys: $keys) {
+ key
+ groups {
+ id, etag, description, archivedAt
+ selector {
+ __typename
+ ...on SecretSelectorEnvType {
+ kind
+ }
+ ...on SecretSelectorSpecificEnv {
+ env { id, name }
+ }
+ }
+ versions { id }
+ }
+ }
+ }
+}`
+ var out struct {
+ App struct {
+ *gql.App
+ Secrets []*gql.Secret
+ }
+ }
+
+ in := graphqlRequest{Query: query, Variables: map[string]any{"appSlug": appSlug, "keys": keys}}
+ if err := graphqlCall(ctx, in, &out, true); err != nil {
+ return nil, err
+ }
+ return out.App.Secrets, nil
+}
+
+type CreateSecretGroupParams struct {
+ AppID string
+ Key string
+ PlaintextValue string
+ Description string
+ Selector []gql.SecretSelector
+}
+
+func CreateSecretGroup(ctx context.Context, p CreateSecretGroupParams) error {
+ query := `
+mutation CreateSecretGroup($input: CreateSecretGroups!) {
+ createSecretGroups(input: $input) { id }
+}`
+ envTypes, envIDs, err := mapSecretSelector(p.Selector)
+ if err != nil {
+ return err
+ }
+
+ in := graphqlRequest{Query: query, Variables: map[string]any{"input": map[string]any{
+ "appID": p.AppID,
+ "key": p.Key,
+ "entries": []map[string]any{
+ {
+ "plaintextValue": p.PlaintextValue,
+ "envTypes": envTypes,
+ "envIDs": envIDs,
+ "description": p.Description,
+ },
+ },
+ }}}
+ if err := graphqlCall(ctx, in, nil, true); err != nil {
+ return errors.Wrap(err, "create secret group")
+ }
+ return nil
+}
+
+type CreateSecretVersionParams struct {
+ GroupID string
+ PlaintextValue string
+ Etag string
+}
+
+func CreateSecretVersion(ctx context.Context, p CreateSecretVersionParams) error {
+ query := `
+mutation CreateSecretVersion($input: CreateSecretVersion!) {
+ createSecretVersion(input: $input) { id }
+}`
+ in := graphqlRequest{Query: query, Variables: map[string]any{"input": map[string]any{
+ "groupID": p.GroupID,
+ "plaintextValue": p.PlaintextValue,
+ "etag": p.Etag,
+ }}}
+ if err := graphqlCall(ctx, in, nil, true); err != nil {
+ return errors.Wrap(err, "create secret version")
+ }
+ return nil
+}
+
+type UpdateSecretGroupParams struct {
+ ID string
+ Etag *string
+
+ // Nil fore ach field here means it's kept unchanged.
+ Selector []gql.SecretSelector // nil means no changes
+ Archived *bool
+ Description *string
+}
+
+func UpdateSecretGroup(ctx context.Context, p UpdateSecretGroupParams) error {
+ query := `
+mutation UpdateSecretGroup($input: UpdateSecretGroup!) {
+ updateSecretGroup(input: $input) { id }
+}`
+
+ var selector map[string]any
+ if p.Selector != nil {
+ envTypes, envIDs, err := mapSecretSelector(p.Selector)
+ if err != nil {
+ return err
+ }
+ selector = map[string]any{
+ "envTypes": envTypes,
+ "envIDs": envIDs,
+ }
+ }
+
+ in := graphqlRequest{Query: query, Variables: map[string]any{"input": map[string]any{
+ "id": p.ID,
+ "etag": p.Etag,
+ "selector": selector,
+ "archived": p.Archived,
+ "description": p.Description,
+ }}}
+ if err := graphqlCall(ctx, in, nil, true); err != nil {
+ return errors.Wrap(err, "update secret group")
+ }
+ return nil
+}
+
+func mapSecretSelector(selector []gql.SecretSelector) (envTypes, envIDs []string, err error) {
+ envTypes, envIDs = []string{}, []string{}
+ for _, sel := range selector {
+ switch s := sel.(type) {
+ case *gql.SecretSelectorEnvType:
+ envTypes = append(envTypes, s.Kind)
+ case *gql.SecretSelectorSpecificEnv:
+ envIDs = append(envIDs, s.Env.ID)
+ default:
+ return nil, nil, errors.Newf("unknown secret selector type %T", s)
+ }
+ }
+ return envTypes, envIDs, nil
+}
diff --git a/cli/internal/telemetry/telemetry.go b/cli/internal/telemetry/telemetry.go
new file mode 100644
index 0000000000..36d483878f
--- /dev/null
+++ b/cli/internal/telemetry/telemetry.go
@@ -0,0 +1,206 @@
+package telemetry
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/hasura/go-graphql-client"
+ "github.com/rs/zerolog/log"
+
+ "encore.dev/types/uuid"
+ "encr.dev/internal/conf"
+ "encr.dev/pkg/fns"
+ "encr.dev/pkg/xos"
+)
+
+var singleton = func() *telemetry {
+ t := &telemetry{
+ client: graphql.NewClient(conf.APIBaseURL+"/graphql", conf.DefaultClient),
+ }
+ path, err := configPath()
+ if err != nil {
+ return t
+ }
+ data, err := os.ReadFile(path)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ // If the file does not exist, telemetry is enabled by default
+ t.cfg.Enabled = true
+ t.cfg.AnonID = uuid.Must(uuid.NewV4()).String()
+ t.cfg.SentEvents = make(map[string]struct{})
+ _ = t.saveConfig()
+ err = nil
+ }
+ return t
+ }
+ err = json.Unmarshal(data, &t.cfg)
+ if err != nil {
+ log.Debug().Err(err).Msg("failed to unmarshal telemetry config")
+ }
+ return t
+}()
+
+type telemetry struct {
+ mu sync.Mutex
+ cfg telemetryCfg
+ client *graphql.Client
+}
+
+type telemetryCfg struct {
+ Enabled bool `json:"enabled"`
+ AnonID string `json:"anon_id"`
+ SentEvents map[string]struct{} `json:"sent_events"`
+ ShownWarning bool `json:"shown_warning"`
+ Debug bool `json:"debug"`
+}
+
+type TelemetryMessage struct {
+ Event string `json:"event"`
+ AnonymousId string `json:"anonymousId"`
+ Properties map[string]any `json:"properties,omitempty"`
+}
+
+func (t *telemetry) sendOnce(event string, props ...map[string]any) {
+ t.mu.Lock()
+ if _, ok := t.cfg.SentEvents[event]; ok {
+ t.mu.Unlock()
+ return
+ }
+ t.cfg.SentEvents[event] = struct{}{}
+ if err := t.saveConfig(); err != nil {
+ log.Debug().Err(err).Msg("failed to save telemetry config")
+ }
+ t.mu.Unlock()
+ if err := t.send(event, props...); err != nil {
+ log.Debug().Err(err).Msg("failed to send telemetry message")
+ t.mu.Lock()
+ delete(t.cfg.SentEvents, event)
+ t.mu.Unlock()
+ }
+}
+
+func (t *telemetry) send(event string, props ...map[string]any) error {
+ var m struct {
+ Result bool `graphql:"telemetry(msg: $msg)"`
+ }
+ message := TelemetryMessage{
+ Event: event,
+ AnonymousId: t.cfg.AnonID,
+ Properties: fns.MergeMaps(props...),
+ }
+ if t.cfg.Debug {
+ data, err := json.Marshal(message)
+ if err != nil {
+ log.Info().Msgf("[telemetry] failed to marshal message")
+ } else {
+ log.Info().Msgf("[telemetry] %s", string(data))
+ }
+ }
+ err := t.client.Mutate(context.Background(), &m, map[string]any{
+ "msg": message})
+ if !m.Result {
+ return errors.New("failed to send telemetry message")
+ }
+ return err
+}
+
+func (t *telemetry) trySend(event string, props ...map[string]any) {
+ if err := t.send(event, props...); err != nil {
+ log.Debug().Msg("failed to send telemetry message")
+ }
+}
+
+func (t *telemetry) saveConfig() error {
+ // Write the telemetry configuration to a file
+ path, err := configPath()
+ if err != nil {
+ return err
+ }
+ data, err := json.Marshal(t.cfg)
+ if err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ return xos.WriteFile(path, data, 0644)
+}
+
+func IsEnabled() bool {
+ return singleton.cfg.Enabled
+}
+
+func SetEnabled(enabled bool) bool {
+ return UpdateConfig(singleton.cfg.AnonID, enabled, singleton.cfg.Debug)
+}
+
+func SetDebug(debug bool) bool {
+ return UpdateConfig(singleton.cfg.AnonID, singleton.cfg.Enabled, debug)
+}
+
+func UpdateConfig(anonID string, enabled, debug bool) (changed bool) {
+ changed = singleton.cfg.Enabled != enabled ||
+ singleton.cfg.Debug != debug ||
+ singleton.cfg.AnonID != anonID
+ singleton.cfg.AnonID = anonID
+ singleton.cfg.Enabled = enabled
+ singleton.cfg.Debug = debug
+ return changed
+}
+
+func ShouldShowWarning() bool {
+ return !singleton.cfg.ShownWarning && IsEnabled()
+}
+
+func SetShownWarning() {
+ singleton.cfg.ShownWarning = true
+ if err := singleton.saveConfig(); err != nil {
+ log.Debug().Err(err).Msg("failed to save telemetry config")
+ }
+}
+
+func SaveConfig() error {
+ return singleton.saveConfig()
+}
+
+func SendOnce(event string, props ...map[string]any) {
+ if !IsEnabled() {
+ return
+ }
+ go singleton.sendOnce(event, props...)
+}
+
+func Send(event string, props ...map[string]any) {
+ if !IsEnabled() {
+ return
+ }
+ go singleton.trySend(event, props...)
+}
+
+func SendSync(event string, props ...map[string]any) {
+ if !IsEnabled() {
+ return
+ }
+ singleton.trySend(event, props...)
+}
+
+func configPath() (string, error) {
+ dir, err := os.UserConfigDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(dir, "encore", "telemetry.json"), nil
+}
+
+func GetAnonID() string {
+ return singleton.cfg.AnonID
+}
+
+func IsDebug() bool {
+ return singleton.cfg.Debug
+}
diff --git a/cli/internal/update/update.go b/cli/internal/update/update.go
new file mode 100644
index 0000000000..5630d4e100
--- /dev/null
+++ b/cli/internal/update/update.go
@@ -0,0 +1,287 @@
+package update
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/semver"
+
+ "encr.dev/internal/conf"
+ "encr.dev/internal/version"
+)
+
+var ErrUnknownVersion = errors.New("unknown version")
+
+// Check checks for the latest Encore version.
+// It reports ErrUnknownVersion if it cannot determine the version.
+func Check(ctx context.Context) (latestVersion *LatestVersion, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("update.Check: %w", err)
+ }
+ }()
+
+ releaseAPI, err := url.Parse("https://encore.dev/api/releases")
+ if err != nil {
+ return nil, fmt.Errorf("parse release api url: %w", err)
+ }
+
+ // Filter the request down to the release for the current version.
+ qry := releaseAPI.Query()
+
+ // These three are used to determine the latest release for the given channel, os and arch
+ qry.Set("channel", string(version.Channel))
+ qry.Set("os", runtime.GOOS)
+ qry.Set("arch", runtime.GOARCH)
+
+ // This is used to determine if the returned release contains security updates not present
+ // in the currently running version of Encore, as well as if we need to force an upgrade
+ // on the user due to a critical security issue.
+ qry.Set("current", version.Version)
+
+ // For specific app ID's or user ID's we can provide pre-releases to them
+ // Mainly used if they've encountered a bug and we need to get them a fix asap for testing
+ if cfg, err := conf.CurrentUser(); err == nil && cfg != nil {
+ qry.Set("actor", cfg.Actor)
+ }
+
+ releaseAPI.RawQuery = qry.Encode()
+
+ // url := "https://encore.dev/api/releases"
+ req, err := http.NewRequestWithContext(ctx, "GET", releaseAPI.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = resp.Body.Close() }()
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("GET %s: responded with %s: %s", releaseAPI, resp.Status, body)
+ }
+
+ latestVersion = &LatestVersion{}
+ if err := json.NewDecoder(resp.Body).Decode(latestVersion); err != nil {
+ return nil, fmt.Errorf("GET %s: invalid json: %v", releaseAPI, err)
+ }
+
+ if !latestVersion.Supported && latestVersion.Channel != version.DevBuild {
+ return nil, ErrUnknownVersion
+ }
+
+ return latestVersion, nil
+}
+
+// LatestVersion contains the parsed response from the update server
+type LatestVersion struct {
+ // The channel the release is from
+ Channel version.ReleaseChannel `json:"channel"`
+
+ // Whether the requested target is supported or not
+ Supported bool `json:"supported"`
+
+ // The latest version available
+ // Access via Version() to ensure the version is prefixed with "v" for GA releases
+ RawVersion string `json:"version"`
+
+ // The URL for that version (if supported)
+ URL string `json:"url,omitempty"`
+
+ // Whether the version contains a security fix from the current version running
+ SecurityUpdate bool `json:"security_update"`
+
+ // Optional notes about what the security update fixes and why the user should install it
+ SecurityNotes string `json:"security_notes,omitempty"`
+
+ // If we need to force an upgrade. This is only used for security updates and only for
+ // the most urgent ones, i.e we should never use it unless the world is on fire.
+ ForceUpgrade bool `json:"force_upgrade,omitempty"`
+}
+
+// Version returns the version string referenced by the LatestVersion.
+// ensuring that it is prefixed with "v" for GA releases.
+func (lv *LatestVersion) Version() string {
+ // Server side doesn't include the "v" in nightly versions.
+ if lv.Channel == version.GA {
+ // Note: this trim prefix is future proofing in case we decide to start returning versions
+ // which include the "v" prefix
+ return "v" + strings.TrimPrefix(lv.RawVersion, "v")
+ }
+
+ return lv.RawVersion
+}
+
+// IsNewer returns true if LatestVersion is newer than current
+//
+// This is safe to call on a nil LatestVersion
+func (lv *LatestVersion) IsNewer(current string) bool {
+ if lv == nil {
+ return false
+ }
+
+ switch lv.Channel {
+ case version.GA:
+ return semver.Compare(lv.Version(), current) > 0
+ case version.Nightly:
+ return nightlyToNumber(lv.Version()) > nightlyToNumber(current)
+ }
+
+ return false
+}
+
+// DoUpgrade upgrades Encore.
+//
+// Adapted from flyctl: https://github.com/superfly/flyctl
+func (lv *LatestVersion) DoUpgrade(stdout, stderr io.Writer) error {
+ // What shell do we need to run?
+ arg := "-c"
+ shell, ok := os.LookupEnv("SHELL")
+ if !ok {
+ //goland:noinspection GoBoolExpressions
+ if runtime.GOOS == "windows" {
+ shell = "powershell.exe"
+ arg = "-Command"
+ } else {
+ shell = "/bin/bash"
+ }
+ }
+
+ // Base script for *nix systems
+ script := "curl -L \"https://encore.dev/install.sh\" | sh"
+
+ brewManaged := false
+
+ // Script overrides for windows and systems with homebrew installed
+ switch runtime.GOOS {
+ case "windows":
+ script = "iwr https://encore.dev/install.ps1 -useb | iex"
+ case "darwin", "linux":
+ // Upgrade via homebrew if we can
+ if wasInstalledViaHomebrew(shell, arg, lv.Channel) {
+ brewManaged = true
+ script = "brew upgrade encore --fetch-head"
+ }
+ }
+
+ // Sainty check we can perform the update
+ switch lv.Channel {
+ case version.GA:
+ // no-op
+ case version.Nightly:
+ if brewManaged {
+ script = "brew upgrade encore-nightly --fetch-head"
+ } else {
+ return errors.New("nightly can not be automatically updated without homebrew")
+ }
+ case version.Beta:
+ if brewManaged {
+ script = "brew upgrade encore-beta --fetch-head"
+ } else {
+ return errors.New("beta can not be automatically updated without homebrew")
+ }
+ case version.DevBuild:
+ return errors.New("dev builds can not be automatically updated")
+ default:
+ return fmt.Errorf("unknown release channel %s", lv.Channel)
+ }
+
+ fmt.Println("Running update [" + script + "]")
+
+ if brewManaged {
+ updateBrewTap(stdout, stderr)
+ }
+
+ // nosemgrep
+ cmd := exec.Command(shell, arg, script)
+
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Stdin = os.Stdin
+ return cmd.Run()
+}
+
+func nightlyToNumber(version string) int64 {
+ // version looks like: nightly-20221010
+ if !strings.HasPrefix(version, "nightly-") || len(version) != 16 {
+ return 0
+ }
+
+ // slice(8) removes "nightly-"
+ date, err := strconv.ParseInt(version[8:], 10, 64)
+ if err != nil {
+ return 0
+ }
+
+ return date
+}
+
+func wasInstalledViaHomebrew(shell string, arg string, channel version.ReleaseChannel) bool {
+ if _, err := exec.LookPath("brew"); err != nil {
+ return false
+ }
+
+ formulaName := "encore"
+ if channel == version.Nightly {
+ formulaName = "encore-nightly"
+ } else if channel == version.Beta {
+ formulaName = "encore-beta"
+ }
+
+ buf := new(bytes.Buffer)
+ // nosemgrep
+ cmd := exec.Command(shell, arg, fmt.Sprintf("brew list %s -1", formulaName))
+ cmd.Stdout = buf
+ cmd.Stderr = buf
+ cmd.Stdin = os.Stdin
+
+ // No error means it was installed via homebrew, error means homebrew doesn't know about it
+ // or isn't installed
+ return cmd.Run() == nil
+}
+
+func updateBrewTap(stdout, stderr io.Writer) {
+ // Attempt to update the tap if it exists.
+ var outBuf bytes.Buffer
+ cmd := exec.Command("brew", "--prefix")
+ cmd.Stdout = &outBuf
+ if err := cmd.Run(); err == nil {
+ gitDir := filepath.Join(strings.TrimSpace(outBuf.String()), "Library", "Taps", "encoredev", "homebrew-tap")
+ if _, err := os.Stat(gitDir); err == nil {
+ // Get the current branch
+ branchName := "main"
+ {
+ outBuf.Reset()
+ cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD")
+ cmd.Stdout = &outBuf
+ cmd.Stderr = stderr
+ cmd.Dir = gitDir
+ if err := cmd.Run(); err == nil {
+ branchName = strings.TrimSpace(outBuf.String())
+ }
+ }
+
+ // Only update if we're on the main branch.
+ if branchName == "main" {
+ cmd := exec.Command("git", "pull", "--rebase", "origin", "main")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Dir = gitDir
+ _ = cmd.Run()
+ }
+ }
+ }
+}
diff --git a/cli/internal/wgtunnel/wgtunnel.go b/cli/internal/wgtunnel/wgtunnel.go
deleted file mode 100644
index ef372918f7..0000000000
--- a/cli/internal/wgtunnel/wgtunnel.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Package wgtunnel sets up and configures Encore's WireGuard tunnel for
-// authenticating against private environments.
-package wgtunnel
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "time"
-
- "encr.dev/cli/internal/conf"
- "golang.org/x/oauth2"
- "golang.zx2c4.com/wireguard/wgctrl"
- "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
-)
-
-// GenKey generates a public/private key pair for the WireGuard tunnel.
-func GenKey() (pub, priv wgtypes.Key, err error) {
- priv, err = wgtypes.GeneratePrivateKey()
- if err == nil {
- pub = priv.PublicKey()
- }
- return
-}
-
-// RegisterDevice registers the public key with Encore
-// and returns the allocated IP address for use with WireGuard.
-func RegisterDevice(ctx context.Context, pubKey wgtypes.Key) (ip string, err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("wgtunnel.RegisterDevice: %v", err)
- }
- }()
-
- reqData, _ := json.Marshal(map[string]string{"public_key": pubKey.String()})
- url := "https://api.encore.dev/user/devices:register"
- req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(reqData))
- if err != nil {
- return "", err
- }
- req.Header.Set("Content-Type", "application/json")
-
- client := oauth2.NewClient(ctx, &conf.TokenSource{})
- resp, err := client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- body, _ := ioutil.ReadAll(resp.Body)
- return "", fmt.Errorf("request failed: %s: %s", resp.Status, body)
- }
-
- var respData struct {
- OK bool
- Data struct {
- IPAddress string `json:"ip_address"`
- } `json:"data"`
- Error struct {
- Code string `json:"code"`
- } `json:"error"`
- }
- if err := json.NewDecoder(resp.Body).Decode(&respData); err != nil {
- return "", err
- } else if !respData.OK {
- return "", fmt.Errorf("request failed with code: %v", respData.Error.Code)
- }
- return respData.Data.IPAddress, nil
-}
-
-// ClientConfig is the configuration necessary to setup WireGuard.
-type ClientConfig struct {
- // Addr is our WireGuard address.
- Addr string
- // PrivKey is our private key.
- PrivKey wgtypes.Key
-}
-
-// ServerPeer is the required configuration to configure a WireGuard peer.
-type ServerPeer struct {
- // Endpoint is the WireGuard endpoint for the server.
- Endpoint net.UDPAddr
- // PublicKey is the server's public key.
- PublicKey wgtypes.Key
- // Subnets are the network subnet that should be routed
- // through WireGuard.
- Subnets []net.IPNet
-}
-
-// ServerConfig is the configuration to set up WireGuard peers.
-type ServerConfig struct {
- Peers []ServerPeer
-}
-
-// DefaultServerConfig is the well-known default configuration of Encore's API Gateway.
-var DefaultServerConfig = &ServerConfig{
- Peers: []ServerPeer{
- {
- Endpoint: net.UDPAddr{
- IP: net.ParseIP("159.65.210.129"),
- Port: 51820,
- },
- PublicKey: mustParseKey("mQzDYCJufL+rNqbS1fBtxx3vxLX/4VaKKUDNS/yhQBs="),
- Subnets: []net.IPNet{
- {
- IP: net.ParseIP("100.26.25.109"),
- Mask: net.IPv4Mask(255, 255, 255, 255),
- },
- {
- IP: net.ParseIP("18.214.237.181"),
- Mask: net.IPv4Mask(255, 255, 255, 255),
- },
- {
- IP: net.ParseIP("54.170.142.107"),
- Mask: net.IPv4Mask(255, 255, 255, 255),
- },
- {
- IP: net.ParseIP("54.74.172.84"),
- Mask: net.IPv4Mask(255, 255, 255, 255),
- },
- },
- },
- },
-}
-
-// Start starts the WireGuard tunnel in the background.
-func Start(cc *ClientConfig, sc *ServerConfig) error {
- if sc == nil {
- sc = DefaultServerConfig
- }
- return start(cc, sc)
-}
-
-// Stop stops the WireGuard tunnel.
-func Stop() error {
- return stop()
-}
-
-// Status reports whether the tunnel is running.
-func Status() (running bool, err error) {
- return status()
-}
-
-func setConf(device string, cc *ClientConfig, sc *ServerConfig) error {
- cfg := wgtypes.Config{
- PrivateKey: &cc.PrivKey,
- ReplacePeers: true,
- }
- keepAlive := 25 * time.Second
- for _, r := range sc.Peers {
- cfg.Peers = append(cfg.Peers, wgtypes.PeerConfig{
- PublicKey: r.PublicKey,
- Endpoint: &r.Endpoint,
- ReplaceAllowedIPs: true,
- AllowedIPs: r.Subnets,
- PersistentKeepaliveInterval: &keepAlive,
- })
- }
-
- cl, err := wgctrl.New()
- if err != nil {
- return err
- }
- defer cl.Close()
- return cl.ConfigureDevice(device, cfg)
-}
-
-func mustParseKey(s string) wgtypes.Key {
- k, err := wgtypes.ParseKey(s)
- if err != nil {
- panic(err)
- }
- return k
-}
-
-// Run synchronously runs the tunnel.
-func Run() error {
- return run()
-}
diff --git a/cli/internal/wgtunnel/wgtunnel_darwin.go b/cli/internal/wgtunnel/wgtunnel_darwin.go
deleted file mode 100644
index 8f783e86c0..0000000000
--- a/cli/internal/wgtunnel/wgtunnel_darwin.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// +build darwin
-
-package wgtunnel
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "os/exec"
- "os/signal"
- "path/filepath"
- "regexp"
- "strconv"
- "syscall"
- "time"
-
- "golang.zx2c4.com/wireguard/device"
- "golang.zx2c4.com/wireguard/ipc"
- "golang.zx2c4.com/wireguard/tun"
-)
-
-const (
- basePath = "/var/run/wireguard"
- tunnelName = "encore"
-)
-
-type tunnel struct {
- name string
- device string
- cmd *exec.Cmd
- stop func()
-
- quit chan struct{}
- err error // can be read after quit is closed
-}
-
-func start(cc *ClientConfig, sc *ServerConfig) error {
- iface, err := createIface()
- if err != nil {
- return err
- }
- if err := configure(iface, cc, sc); err != nil {
- delIface(iface)
- return err
- }
- return nil
-}
-
-func stop() error {
- device, err := getIface()
- if err != nil {
- return err
- } else if device != "" {
- return delIface(device)
- }
- return nil
-}
-
-func status() (bool, error) {
- device, err := getIface()
- return device != "", err
-}
-
-func createIface() (device string, err error) {
- if err := os.MkdirAll(basePath, 0755); err != nil {
- return "", err
- }
- namePath := filepath.Join(basePath, tunnelName+".name")
- exe, err := os.Executable()
- if err != nil {
- return "", err
- }
- cmd := exec.Command(exe, "vpn", "__run")
- cmd.Env = append(os.Environ(), "WG_TUN_NAME_FILE="+namePath)
- cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
-
- var output bytes.Buffer
- cmd.Stdout = &output
- cmd.Stderr = &output
-
- if err := cmd.Start(); err != nil {
- return "", err
- }
-
- tunnelErr := make(chan error, 1)
- go func() {
- err := cmd.Wait()
- tunnelErr <- fmt.Errorf("%s: %s", err, output.String())
- }()
-
- // Wait for the file to get populated
- ticker := time.NewTicker(100 * time.Millisecond)
- timeout := time.After(5 * time.Second)
- for {
- select {
- case <-ticker.C:
- if iface, err := ioutil.ReadFile(namePath); err == nil {
- device = string(bytes.TrimSpace(iface))
- return device, nil
- }
- case <-timeout:
- cmd.Process.Kill()
- return "", fmt.Errorf("could not determine device name after 5s")
- case err := <-tunnelErr:
- return "", fmt.Errorf("wireguard exited: %v", err)
- }
- }
-}
-
-func getIface() (device string, err error) {
- namePath := filepath.Join(basePath, tunnelName+".name")
- data, err := ioutil.ReadFile(namePath)
- if os.IsNotExist(err) {
- return "", nil
- } else if err != nil {
- return "", err
- }
- deviceName := string(bytes.TrimSpace(data))
- devicePath := filepath.Join(basePath, deviceName+".sock")
-
- nm, err1 := os.Stat(namePath)
- dev, err2 := os.Stat(devicePath)
- if os.IsNotExist(err1) || os.IsNotExist(err2) {
- return "", nil
- } else if err1 != nil {
- return "", err1
- } else if err2 != nil {
- return "", err2
- }
-
- diff := nm.ModTime().Sub(dev.ModTime())
- if diff < 2*time.Second && diff > -2*time.Second {
- return deviceName, nil
- }
- return "", nil
-}
-
-func delIface(device string) error {
- namePath := filepath.Join(basePath, tunnelName+".name")
- if err := os.Remove(namePath); err != nil && !os.IsNotExist(err) {
- return err
- }
- if device != "" {
- devicePath := filepath.Join(basePath, device+".sock")
- if err := os.Remove(devicePath); err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- return nil
-}
-
-func configure(device string, cc *ClientConfig, sc *ServerConfig) error {
- ops := []func(string, *ClientConfig, *ServerConfig) error{
- setConf, addAddr, setMtus, enableIface, addPeers,
- }
- for _, op := range ops {
- if err := op(device, cc, sc); err != nil {
- return err
- }
- }
- return nil
-}
-
-func addAddr(device string, cc *ClientConfig, sc *ServerConfig) error {
- out, err := exec.Command("/sbin/ifconfig", device, "inet", cc.Addr, cc.Addr, "alias").CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not add route: %v: %s", err, out)
- }
- return nil
-}
-
-var (
- routeRe = regexp.MustCompile(`(?m)interface: ([^ ]+)$`)
- mtuRe = regexp.MustCompile(`mtu ([0-9]+)`)
-)
-
-func addPeers(device string, cc *ClientConfig, sc *ServerConfig) error {
- for _, r := range sc.Peers {
- for _, s := range r.Subnets {
- if err := addRoute(device, cc, s); err != nil {
- return fmt.Errorf("adding route: %v", err)
- }
- }
- }
- return nil
-}
-
-func addRoute(device string, cc *ClientConfig, subnet net.IPNet) error {
- // Determine if this is already routed via this interface
- out, err := exec.Command("/sbin/route", "-n", "get", "-inet", subnet.String()).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could get route: %v: %s", err, out)
- }
- m := routeRe.FindSubmatch(out)
- if m != nil && string(m[1]) == device {
- return nil
- }
-
- out, err = exec.Command("/sbin/route", "-n", "add", "-inet", subnet.String(), "-interface", device).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not add route: %v: %s", err, out)
- }
- return nil
-}
-
-func setMtus(device string, cc *ClientConfig, sc *ServerConfig) error {
- for _, r := range sc.Peers {
- if err := setMtu(device, cc, r); err != nil {
- return fmt.Errorf("setting mtu: %v", err)
- }
- }
- return nil
-}
-
-func setMtu(device string, cc *ClientConfig, r ServerPeer) error {
- // Parse the underlying interface that will actually send the packets
- // to our destination endpoint.
- out, err := exec.Command("/sbin/route", "-n", "get", "-inet", r.Endpoint.IP.String()).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could get route: %v: %s", err, out)
- }
- m := routeRe.FindSubmatch(out)
- if m == nil {
- return fmt.Errorf("could not determine routing interface")
- }
- ifaceName := string(m[1])
-
- out, err = exec.Command("/sbin/ifconfig", ifaceName).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not get iface info: %v: %s", err, out)
- }
- m = mtuRe.FindSubmatch(out)
- if m == nil {
- return fmt.Errorf("could not determine MTU")
- }
- mtu, err := strconv.Atoi(string(m[1]))
- if err != nil {
- return fmt.Errorf("could not parse MTU: %v", err)
- }
- mtu = mtu - 80
-
- out, err = exec.Command("/sbin/ifconfig", device, "mtu", strconv.Itoa(mtu)).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not set MTU: %v: %s", err, out)
- }
- return nil
-}
-
-func enableIface(device string, cc *ClientConfig, sc *ServerConfig) error {
- out, err := exec.Command("/sbin/ifconfig", device, "up").CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not enable device: %v: %s", err, out)
- }
- return nil
-}
-
-func run() error {
- tun, err := tun.CreateTUN("utun", device.DefaultMTU)
- if err != nil {
- return err
- }
- name, err := tun.Name()
- if err != nil {
- return err
- }
- fileUAPI, err := ipc.UAPIOpen(name)
- if err != nil {
- return fmt.Errorf("uapi open: %v", err)
- }
- uapi, err := ipc.UAPIListen(name, fileUAPI)
- if err != nil {
- return fmt.Errorf("failed to listen on uapi socket: %v", err)
- }
-
- logger := device.NewLogger(
- device.LogLevelError,
- "vpn: ",
- )
-
- device := device.NewDevice(tun, logger)
-
- term := make(chan os.Signal, 1)
- errs := make(chan error)
-
- signal.Notify(term, syscall.SIGTERM)
- signal.Notify(term, os.Interrupt)
-
- go func() {
- for {
- conn, err := uapi.Accept()
- if err != nil {
- errs <- err
- return
- }
- go device.IpcHandle(conn)
- }
- }()
-
- select {
- case <-term:
- case <-device.Wait():
- case err = <-errs:
- }
-
- uapi.Close()
- device.Close()
- return err
-}
diff --git a/cli/internal/wgtunnel/wgtunnel_linux.go b/cli/internal/wgtunnel/wgtunnel_linux.go
deleted file mode 100644
index e53f2d53a3..0000000000
--- a/cli/internal/wgtunnel/wgtunnel_linux.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// +build linux
-
-package wgtunnel
-
-import (
- "bytes"
- "fmt"
- "net"
- "os"
- "os/exec"
- "os/signal"
- "path/filepath"
- "regexp"
- "strconv"
- "syscall"
- "time"
-
- "golang.zx2c4.com/wireguard/device"
- "golang.zx2c4.com/wireguard/ipc"
- "golang.zx2c4.com/wireguard/tun"
-)
-
-const (
- basePath = "/var/run/wireguard"
- iface = "encore"
-)
-
-type tunnel struct {
- name string
- device string
- cmd *exec.Cmd
- stop func()
-
- quit chan struct{}
- err error // can be read after quit is closed
-}
-
-func start(cc *ClientConfig, sc *ServerConfig) error {
- if hasIface() {
- if err := delIface(); err != nil {
- return err
- }
- }
-
- iface, err := createIface()
- if err != nil {
- return err
- }
-
- if err := configure(iface, cc, sc); err != nil {
- delIface()
- return err
- }
- return nil
-}
-
-func stop() error {
- return delIface()
-}
-
-func status() (bool, error) {
- return hasIface(), nil
-}
-
-func hasIface() bool {
- out, _ := exec.Command("ip", "link", "show", "dev", iface).Output()
- return len(out) != 0
-}
-
-func delIface() error {
- out, err := exec.Command("ip", "link", "delete", "dev", iface).CombinedOutput()
- if err == nil {
- return nil
- } else if bytes.Contains(out, []byte("Cannot find device")) {
- return nil
- }
- return fmt.Errorf("could not delete device: %s", out)
-}
-
-func createIface() (device string, err error) {
- if err := os.MkdirAll(basePath, 0755); err != nil {
- return "", err
- }
-
- // Use the kernel module if possible
- out, err := exec.Command("ip", "link", "add", iface, "type", "wireguard").CombinedOutput()
- if err == nil {
- return iface, nil
- }
- // Do we have the kernel module installed?
- if _, err := os.Stat("/sys/module/wireguard"); err == nil {
- return "", fmt.Errorf("could not setup WireGuard device: %s", out)
- }
- fmt.Println("encore: missing WireGuard kernel module. Falling back to slow userspace implementation.")
- fmt.Println("encore: Install WireGuard kernel module to hide this message.")
-
- namePath := filepath.Join(basePath, iface+".name")
- exe, err := os.Executable()
- if err != nil {
- return "", err
- }
-
- cmd := exec.Command(exe, "vpn", "__run")
- cmd.Env = append(os.Environ(), "WG_TUN_NAME_FILE="+namePath)
- cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
-
- var output bytes.Buffer
- cmd.Stdout = &output
- cmd.Stderr = &output
-
- if err := cmd.Start(); err != nil {
- return "", err
- }
-
- tunnelErr := make(chan error, 1)
- go func() {
- err := cmd.Wait()
- tunnelErr <- fmt.Errorf("%s: %s", err, output.String())
- }()
-
- // Wait for the file to get populated
- ticker := time.NewTicker(100 * time.Millisecond)
- timeout := time.After(5 * time.Second)
- for {
- select {
- case <-ticker.C:
- if iface, err := os.ReadFile(namePath); err == nil {
- device = string(bytes.TrimSpace(iface))
- return device, nil
- }
- case <-timeout:
- cmd.Process.Kill()
- return "", fmt.Errorf("could not determine device name after 5s")
- case err := <-tunnelErr:
- return "", fmt.Errorf("wireguard exited: %v", err)
- }
- }
-}
-
-func configure(device string, cc *ClientConfig, sc *ServerConfig) error {
- ops := []func(string, *ClientConfig, *ServerConfig) error{
- setConf, addAddr, setMtus, addPeers,
- }
- for _, op := range ops {
- if err := op(device, cc, sc); err != nil {
- return err
- }
- }
- return nil
-}
-
-func addAddr(device string, cc *ClientConfig, sc *ServerConfig) error {
- out, err := exec.Command("ip", "-4", "address", "add", cc.Addr, "dev", device).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not add address: %v: %s", err, out)
- }
- return nil
-}
-
-var (
- routeRe = regexp.MustCompile(`(?m)interface: ([^ ]+)$`)
- mtuRe = regexp.MustCompile(`mtu ([0-9]+)`)
- devRe = regexp.MustCompile(`dev ([^ ]+)`)
-)
-
-func addPeers(device string, cc *ClientConfig, sc *ServerConfig) error {
- for _, r := range sc.Peers {
- for _, s := range r.Subnets {
- if err := addRoute(device, cc, s); err != nil {
- return fmt.Errorf("adding route for %s: %v", s, err)
- }
- }
- }
- return nil
-}
-
-func addRoute(device string, cc *ClientConfig, subnet net.IPNet) error {
- // Determine if this is already routed via this interface
- out, err := exec.Command("ip", "-4", "route", "show", "dev", device, "match", subnet.String()).Output()
- if err == nil && len(out) > 0 {
- // Already routed
- return nil
- }
- out, err = exec.Command("ip", "-4", "route", "add", subnet.String(), "dev", device).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not add route: %v: %s", err, out)
- }
- return nil
-}
-
-func setMtus(device string, cc *ClientConfig, sc *ServerConfig) error {
- for _, r := range sc.Peers {
- if err := setMtu(device, cc, r); err != nil {
- return fmt.Errorf("setting mtu: %v", err)
- }
- }
- return nil
-}
-
-func setMtu(device string, cc *ClientConfig, r ServerPeer) error {
- var mtu int
- if out, err := exec.Command("ip", "route", "get", r.Endpoint.IP.String()).CombinedOutput(); err == nil {
- if m := mtuRe.FindSubmatch(out); m != nil {
- mtu, _ = strconv.Atoi(string(m[1]))
- }
- if mtu == 0 {
- // Try again by looking for the link device and looking up that
- if d := devRe.FindSubmatch(out); d != nil {
- if out, err := exec.Command("ip", "link", "show", "dev", string(d[1])).CombinedOutput(); err == nil {
- if m := mtuRe.FindSubmatch(out); m != nil {
- mtu, _ = strconv.Atoi(string(m[1]))
- }
- }
- }
- }
- }
-
- // If we still don't have an mtu, fall back to the default
- if mtu == 0 {
- if out, err := exec.Command("ip", "route", "show", "default").CombinedOutput(); err == nil {
- if m := mtuRe.FindSubmatch(out); m != nil {
- mtu, _ = strconv.Atoi(string(m[1]))
- }
- }
- }
-
- if mtu == 0 {
- mtu = 1500
- }
- mtu = mtu - 80
-
- out, err := exec.Command("ip", "link", "set", "mtu", strconv.Itoa(mtu), "up", "dev", device).CombinedOutput()
- if err != nil {
- return fmt.Errorf("could not set MTU: %v: %s", err, out)
- }
- return nil
-}
-
-func run() error {
- tun, err := tun.CreateTUN("utun", device.DefaultMTU)
- if err != nil {
- return err
- }
- name, err := tun.Name()
- if err != nil {
- return err
- }
- fileUAPI, err := ipc.UAPIOpen(name)
- if err != nil {
- return fmt.Errorf("uapi open: %v", err)
- }
- uapi, err := ipc.UAPIListen(name, fileUAPI)
- if err != nil {
- return fmt.Errorf("failed to listen on uapi socket: %v", err)
- }
-
- logger := device.NewLogger(
- device.LogLevelError,
- "vpn: ",
- )
-
- device := device.NewDevice(tun, logger)
-
- term := make(chan os.Signal, 1)
- errs := make(chan error)
-
- signal.Notify(term, syscall.SIGTERM)
- signal.Notify(term, os.Interrupt)
-
- go func() {
- for {
- conn, err := uapi.Accept()
- if err != nil {
- errs <- err
- return
- }
- go device.IpcHandle(conn)
- }
- }()
-
- select {
- case <-term:
- case <-device.Wait():
- case err = <-errs:
- }
-
- uapi.Close()
- device.Close()
- return err
-}
diff --git a/cli/internal/wgtunnel/wgtunnel_windows.go b/cli/internal/wgtunnel/wgtunnel_windows.go
deleted file mode 100644
index 1cb9d866c5..0000000000
--- a/cli/internal/wgtunnel/wgtunnel_windows.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// +build windows
-
-package wgtunnel
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "syscall"
-
- "golang.org/x/sys/windows"
- "golang.org/x/sys/windows/svc/mgr"
- "golang.zx2c4.com/wireguard/windows/services"
-)
-
-const tunnelName = "encore-wg"
-
-func start(cc *ClientConfig, sc *ServerConfig) error {
- configDir, err := os.UserConfigDir()
- if err != nil {
- return err
- }
- confPath := filepath.Join(configDir, "encore", tunnelName+".conf")
- if err := os.MkdirAll(filepath.Dir(confPath), 0755); err != nil {
- return err
- } else if err := writeConf(confPath, cc, sc); err != nil {
- return fmt.Errorf("cannot write WireGuard conf: %v", err)
- }
-
- err = installTunnel(confPath)
- if err != nil {
- return fmt.Errorf("could not install tunnel: %v", err)
- }
- return nil
-}
-
-func stop() error {
- if running, err := status(); err != nil || running {
- return uninstallTunnel(tunnelName)
- }
- return nil
-}
-
-func status() (bool, error) {
- serviceName, err := services.ServiceNameOfTunnel(tunnelName)
- if err != nil {
- return false, err
- }
-
- h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
- if err != nil {
- return false, err
- }
- m := &mgr.Mgr{Handle: h}
- list, err := m.ListServices()
- if err != nil {
- return false, err
- }
- for _, svc := range list {
- if svc == serviceName {
- return true, nil
- }
- }
- return false, nil
-}
-
-func writeConf(confPath string, cc *ClientConfig, sc *ServerConfig) error {
- var peers []string
- for _, r := range sc.Peers {
- var subnets []string
- for _, s := range r.Subnets {
- subnets = append(subnets, s.String())
- }
- peer := fmt.Sprintf(`[Peer]
-PublicKey = %s
-AllowedIPs = %s
-Endpoint = %s
-PersistentKeepalive = 25
-`, r.PublicKey, strings.Join(subnets, ", "), r.Endpoint)
- peers = append(peers, peer)
- }
-
- cfg := fmt.Sprintf("[Interface]\nPrivateKey = %s\nAddress = %s\n\n", cc.PrivKey, cc.Addr) + strings.Join(peers, "\n\n")
-
- err := ioutil.WriteFile(confPath, []byte(cfg), 0600)
- if err != nil {
- return err
- }
- return nil
-}
-
-func installTunnel(configPath string) error {
- exe, err := os.Executable()
- if err != nil {
- return err
- }
- err = runElevatedShellCommand(exe, "vpn", "svc-install", configPath)
- if err != nil {
- return fmt.Errorf("could not install tunnel: %v", err)
- }
- return nil
-}
-
-func uninstallTunnel(name string) error {
- exe, err := os.Executable()
- if err != nil {
- return err
- }
- err = runElevatedShellCommand(exe, "vpn", "svc-uninstall", name)
- if err != nil {
- return fmt.Errorf("could not uninstall tunnel: %v", err)
- }
- return nil
-}
-
-func runElevatedShellCommand(cmd string, args ...string) error {
- verb := "runas"
- cwd, err := os.Getwd()
- if err != nil {
- return err
- }
-
- // Escape args if they contain spaces or quotes
- for i, arg := range args {
- args[i] = shellEscape(arg)
- }
- argStr := strings.Join(args, " ")
-
- verbPtr, _ := syscall.UTF16PtrFromString(verb)
- exePtr, _ := syscall.UTF16PtrFromString(cmd)
- cwdPtr, _ := syscall.UTF16PtrFromString(cwd)
- argPtr, _ := syscall.UTF16PtrFromString(argStr)
- var showCmd int32 = 0 // SW_NORMAL
- return windows.ShellExecute(0, verbPtr, exePtr, argPtr, cwdPtr, showCmd)
-}
-
-func shellEscape(arg string) string {
- if strings.ContainsAny(arg, `" `) {
- arg = "\"" + strings.Replace(arg, "\"", "\"\"", -1) + "\""
- }
- return arg
-}
-
-func run() error {
- return fmt.Errorf("Run() is not implemented on windows")
-}
diff --git a/cli/internal/winsvc/winsvc.go b/cli/internal/winsvc/winsvc.go
deleted file mode 100644
index 6fbf535415..0000000000
--- a/cli/internal/winsvc/winsvc.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// +build windows
-
-package winsvc
-
-import (
- "errors"
- "os"
- "strings"
- "time"
-
- "golang.org/x/sys/windows"
- "golang.org/x/sys/windows/svc"
- "golang.org/x/sys/windows/svc/mgr"
- "golang.zx2c4.com/wireguard/windows/conf"
- "golang.zx2c4.com/wireguard/windows/services"
- "golang.zx2c4.com/wireguard/windows/tunnel"
-)
-
-var cachedServiceManager *mgr.Mgr
-
-func serviceManager() (*mgr.Mgr, error) {
- if cachedServiceManager != nil {
- return cachedServiceManager, nil
- }
- m, err := mgr.Connect()
- if err != nil {
- return nil, err
- }
- cachedServiceManager = m
- return cachedServiceManager, nil
-}
-
-func Status(name string) (installed bool, err error) {
- serviceName, err := services.ServiceNameOfTunnel(name)
- if err != nil {
- return false, err
- }
-
- h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
- if err != nil {
- return false, err
- }
- m := &mgr.Mgr{Handle: h}
- list, err := m.ListServices()
- if err != nil {
- return false, err
- }
- for _, svc := range list {
- if svc == serviceName {
- return true, nil
- }
- }
- return false, nil
-}
-
-func Install(configPath string) error {
- m, err := serviceManager()
- if err != nil {
- return err
- }
- path, err := os.Executable()
- if err != nil {
- return nil
- }
-
- name, err := conf.NameFromPath(configPath)
- if err != nil {
- return err
- }
-
- serviceName, err := services.ServiceNameOfTunnel(name)
- if err != nil {
- return err
- }
- service, err := m.OpenService(serviceName)
- if err == nil {
- status, err := service.Query()
- if err != nil && err != windows.ERROR_SERVICE_MARKED_FOR_DELETE {
- service.Close()
- return err
- }
- if status.State != svc.Stopped && err != windows.ERROR_SERVICE_MARKED_FOR_DELETE {
- service.Close()
- return errors.New("Tunnel already installed and running")
- }
- err = service.Delete()
- service.Close()
- if err != nil && err != windows.ERROR_SERVICE_MARKED_FOR_DELETE {
- return err
- }
- for {
- service, err = m.OpenService(serviceName)
- if err != nil && err != windows.ERROR_SERVICE_MARKED_FOR_DELETE {
- break
- }
- service.Close()
- time.Sleep(time.Second / 3)
- }
- }
-
- config := mgr.Config{
- ServiceType: windows.SERVICE_WIN32_OWN_PROCESS,
- StartType: mgr.StartAutomatic,
- ErrorControl: mgr.ErrorNormal,
- Dependencies: []string{"Nsi"},
- DisplayName: "Encore WireGuard Tunnel: " + name,
- SidType: windows.SERVICE_SID_TYPE_UNRESTRICTED,
- }
- service, err = m.CreateService(serviceName, path, config, "vpn", "svc-run", configPath)
- if err != nil {
- return err
- }
-
- err = service.Start()
- // go trackTunnelService(name, service) // Pass off reference to handle.
- return err
-}
-
-func Uninstall(name string) error {
- m, err := serviceManager()
- if err != nil {
- return err
- }
- serviceName, err := services.ServiceNameOfTunnel(name)
- if err != nil {
- return err
- }
- service, err := m.OpenService(serviceName)
- if err != nil {
- return err
- }
- service.Control(svc.Stop)
- err = service.Delete()
- err2 := service.Close()
- if err != nil && err != windows.ERROR_SERVICE_MARKED_FOR_DELETE {
- return err
- }
- return err2
-}
-
-func Run(confPath string) error {
- return tunnel.Run(confPath)
-}
-
-func ShellEscape(arg string) string {
- if strings.ContainsAny(arg, `" `) {
- arg = "\"" + strings.Replace(arg, "\"", "\"\"", -1) + "\""
- }
- return arg
-}
diff --git a/cli/internal/winsvc/winsvc_test.go b/cli/internal/winsvc/winsvc_test.go
deleted file mode 100644
index 2e9ec17597..0000000000
--- a/cli/internal/winsvc/winsvc_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build windows
-
-package winsvc
-
-import "testing"
-
-func TestShellEscape(t *testing.T) {
- tests := []struct {
- Arg string
- Expected string
- }{
- {
- "foo",
- "foo",
- },
- {
- "foo bar",
- `"foo bar"`,
- },
- {
- `foo "bar" baz`,
- `"foo ""bar"" baz"`,
- },
- }
- for _, test := range tests {
- if got, want := ShellEscape(test.Arg), test.Expected; got != want {
- t.Errorf("ShellEscape(%q) = %q, want %q", test.Arg, got, want)
- }
- }
-}
diff --git a/cli/internal/xos/xos_unix.go b/cli/internal/xos/xos_unix.go
deleted file mode 100644
index ed1709c41b..0000000000
--- a/cli/internal/xos/xos_unix.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build !windows
-
-// Package xos provides cross-platform helper functions.
-package xos
-
-import (
- "os"
- "os/exec"
- "os/user"
- "syscall"
-)
-
-func CreateNewProcessGroup() *syscall.SysProcAttr {
- return &syscall.SysProcAttr{Setpgid: true}
-}
-
-func SocketStat(path string) (interface{}, error) {
- return os.Stat(path)
-}
-
-func SameSocket(a, b interface{}) bool {
- ai := a.(os.FileInfo)
- bi := b.(os.FileInfo)
- return os.SameFile(ai, bi)
-}
-
-func ArrangeExtraFiles(cmd *exec.Cmd, files ...*os.File) error {
- cmd.ExtraFiles = files
- return nil
-}
-
-func IsAdminUser() (bool, error) {
- usr, err := user.Current()
- if err != nil {
- return false, err
- }
- return usr.Gid == "0", nil
-}
diff --git a/cli/internal/xos/xos_windows.go b/cli/internal/xos/xos_windows.go
deleted file mode 100644
index b9feca93b8..0000000000
--- a/cli/internal/xos/xos_windows.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// +build windows
-
-package xos
-
-import (
- "fmt"
- "os"
- "os/exec"
- "strconv"
- "strings"
- "syscall"
-
- "golang.org/x/sys/windows"
-)
-
-func CreateNewProcessGroup() *syscall.SysProcAttr {
- return &syscall.SysProcAttr{
- CreationFlags: windows.CREATE_NEW_PROCESS_GROUP,
- }
-}
-
-func SocketStat(path string) (interface{}, error) {
- fd, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.GENERIC_READ, 0, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_OPEN_REPARSE_POINT|windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
- if err != nil {
- return nil, fmt.Errorf("CreateFile %s: %w", path, err)
- }
- defer windows.CloseHandle(fd)
-
- var d syscall.ByHandleFileInformation
- err = syscall.GetFileInformationByHandle(syscall.Handle(fd), &d)
- if err != nil {
- return nil, &os.PathError{"GetFileInformationByHandle", path, err}
- }
- return &d, nil
-}
-
-func SameSocket(a, b interface{}) bool {
- ai := a.(*syscall.ByHandleFileInformation)
- bi := b.(*syscall.ByHandleFileInformation)
- return ai.VolumeSerialNumber == bi.VolumeSerialNumber && ai.FileIndexHigh == bi.FileIndexHigh && ai.FileIndexLow == bi.FileIndexLow
-}
-
-func ArrangeExtraFiles(cmd *exec.Cmd, files ...*os.File) error {
- // Flag the files to bbe inherited by the child process
- var fds []string
- for _, f := range files {
- fd := f.Fd()
- fds = append(fds, strconv.FormatUint(uint64(fd), 10))
- err := windows.SetHandleInformation(windows.Handle(fd), windows.HANDLE_FLAG_INHERIT, 1)
- if err != nil {
- return fmt.Errorf("xos.ArrangeExtraFiles: SetHandleInformation: %v", err)
- }
- }
- // If the env hasn't been set, copy over this process' env so we preserve the cmd.Env semantics.
- if cmd.Env == nil {
- cmd.Env = os.Environ()
- }
- cmd.Env = append(cmd.Env, "ENCORE_EXTRA_FILES="+strings.Join(fds, ","))
- return nil
-}
-
-func IsAdminUser() (bool, error) {
- // For Windows we elevate permissions on demand, so pretend we are admin
- return true, nil
-}
diff --git a/clippy.toml b/clippy.toml
new file mode 100644
index 0000000000..87fed5b0e3
--- /dev/null
+++ b/clippy.toml
@@ -0,0 +1,2 @@
+ignore-interior-mutability = ["bytes::Bytes", "http::header::HeaderName"]
+
diff --git a/compiler/ast.go b/compiler/ast.go
deleted file mode 100644
index aba6d6f917..0000000000
--- a/compiler/ast.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package compiler
-
-import (
- "go/ast"
- "go/token"
- "strconv"
-)
-
-func usesImport(f *ast.File, pkgName, path string, exceptions map[*ast.SelectorExpr]bool) bool {
- // Find if the import has been given a different name
- for _, s := range f.Imports {
- if p, _ := strconv.Unquote(s.Path.Value); p == path {
- if s.Name != nil {
- pkgName = s.Name.Name
- }
- break
- }
- }
-
- var used bool
- ast.Walk(visitFn(func(n ast.Node) {
- sel, ok := n.(*ast.SelectorExpr)
- if ok && isTopName(sel.X, pkgName) && !exceptions[sel] {
- used = true
- }
- }), f)
- return used
-}
-
-func findImport(f *ast.File, path string) (*ast.ImportSpec, *ast.GenDecl, bool) {
- for _, decl := range f.Decls {
- gen, ok := decl.(*ast.GenDecl)
- if !ok || gen.Tok != token.IMPORT {
- continue
- }
- for _, spec := range gen.Specs {
- impspec := spec.(*ast.ImportSpec)
- if p, _ := strconv.Unquote(impspec.Path.Value); p == path {
- return impspec, gen, true
- }
- }
- }
- return nil, nil, false
-}
-
-type visitFn func(node ast.Node)
-
-func (fn visitFn) Visit(node ast.Node) ast.Visitor {
- fn(node)
- return fn
-}
-
-// isTopName returns true if n is a top-level unresolved identifier with the given name.
-func isTopName(n ast.Expr, name string) bool {
- id, ok := n.(*ast.Ident)
- return ok && id.Name == name && id.Obj == nil
-}
diff --git a/compiler/build.go b/compiler/build.go
deleted file mode 100644
index 9b6f2c1466..0000000000
--- a/compiler/build.go
+++ /dev/null
@@ -1,353 +0,0 @@
-package compiler
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
-
- "encr.dev/parser"
- "golang.org/x/mod/modfile"
- "golang.org/x/mod/semver"
-)
-
-type Config struct {
- // Version specifies the app version to encode
- // into the app metadata.
- Version string
-
- // WorkingDir is the path relative to the app root from which the user
- // is running the build. It is used to resolve relative filenames.
- // If empty it defaults to "." which resolves to the app root.
- WorkingDir string
-
- // GOOS sets the GOOS to build for, if nonempty.
- GOOS string
-
- // CgoEnabled decides whether to build with cgo enabled.
- CgoEnabled bool
-
- // Debug specifies whether to compile in debug mode.
- Debug bool
-
- // EncoreRuntimePath if set, causes builds to introduce a temporary replace directive
- // that replaces the module path to the "encore.dev" module.
- // This lets us replace the implementation for building.
- EncoreRuntimePath string
-
- // EncoreGoRoot is the path to the Encore GOROOT.
- EncoreGoRoot string
-
- // Test is the specific settings for running tests.
- Test *TestConfig
-
- // If Parse is set, the build will skip parsing the app again
- // and use the information provided.
- Parse *parser.Result
-}
-
-// Validate validates the config.
-func (cfg *Config) Validate() error {
- if cfg.EncoreRuntimePath == "" {
- return errors.New("empty EncoreRuntimePath")
- } else if cfg.EncoreGoRoot == "" {
- return errors.New("empty EncoreGoRoot")
- }
- return nil
-}
-
-// Result is the combined results of a build.
-type Result struct {
- Dir string // absolute path to build temp dir
- Exe string // absolute path to the build executable
- Parse *parser.Result
-}
-
-// Build builds the application.
-//
-// On success, it is the caller's responsibility to delete the temp dir
-// returned in Result.Dir.
-func Build(appRoot string, cfg *Config) (*Result, error) {
- if err := cfg.Validate(); err != nil {
- return nil, err
- } else if appRoot, err = filepath.Abs(appRoot); err != nil {
- return nil, err
- }
-
- b := &builder{
- cfg: cfg,
- appRoot: appRoot,
- }
- return b.Build()
-}
-
-type builder struct {
- // inputs
- cfg *Config
- appRoot string
- parseTests bool
-
- workdir string
- modfile *modfile.File
- overlay map[string]string
-
- res *parser.Result
-}
-
-func (b *builder) Build() (res *Result, err error) {
- defer func() {
- if e := recover(); e != nil {
- if b, ok := e.(bailout); ok {
- err = b.err
- } else {
- panic(e)
- }
- }
- }()
-
- b.workdir, err = ioutil.TempDir("", "encore-build")
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- os.RemoveAll(b.workdir)
- }
- }()
-
- for _, fn := range []func() error{
- b.parseApp,
- b.writeModFile,
- b.writeSumFile,
- b.writePackages,
- b.writeMainPkg,
- b.buildMain,
- } {
- if err := fn(); err != nil {
- return nil, err
- }
- }
-
- return &Result{
- Dir: b.workdir,
- Exe: filepath.Join(b.workdir, binaryName+exe),
- Parse: b.res,
- }, nil
-}
-
-// parseApp parses the app situated at appRoot.
-func (b *builder) parseApp() error {
- modPath := filepath.Join(b.appRoot, "go.mod")
- modData, err := ioutil.ReadFile(modPath)
- if err != nil {
- return err
- }
- b.modfile, err = modfile.Parse(modPath, modData, nil)
- if err != nil {
- return err
- }
-
- if pc := b.cfg.Parse; pc != nil {
- b.res = pc
- return nil
- }
-
- cfg := &parser.Config{
- AppRoot: b.appRoot,
- Version: b.cfg.Version,
- ModulePath: b.modfile.Module.Mod.Path,
- WorkingDir: b.cfg.WorkingDir,
- ParseTests: b.parseTests,
- }
- b.res, err = parser.Parse(cfg)
- return err
-}
-
-func (b *builder) writeModFile() error {
- newPath := b.cfg.EncoreRuntimePath
- oldPath := "encore.dev"
- if err := b.modfile.AddRequire("encore.dev", "v0.0.0"); err != nil {
- return fmt.Errorf("could not add require encore.dev path: %v", err)
- }
- if err := b.modfile.AddReplace(oldPath, "", newPath, ""); err != nil {
- return fmt.Errorf("could not replace encore.dev path: %v", err)
- }
- b.modfile.Cleanup()
-
- runtimeModData, err := os.ReadFile(filepath.Join(newPath, "go.mod"))
- if err != nil {
- return err
- }
- runtimeModfile, err := modfile.Parse("encore-runtime/go.mod", runtimeModData, nil)
- if err != nil {
- return err
- }
- mergeModfiles(b.modfile, runtimeModfile)
-
- modBytes := modfile.Format(b.modfile.Syntax)
- dstGomod := filepath.Join(b.workdir, "go.mod")
- return ioutil.WriteFile(dstGomod, modBytes, 0644)
-}
-
-func (b *builder) writeSumFile() error {
- appSum, err := ioutil.ReadFile(filepath.Join(b.appRoot, "go.sum"))
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- runtimeSum, err := ioutil.ReadFile(filepath.Join(b.cfg.EncoreRuntimePath, "go.sum"))
- if err != nil {
- return err
- }
- if !bytes.HasSuffix(appSum, []byte{'\n'}) {
- appSum = append(appSum, '\n')
- }
- data := append(appSum, runtimeSum...)
- dstGosum := filepath.Join(b.workdir, "go.sum")
- return ioutil.WriteFile(dstGosum, data, 0644)
-}
-
-func (b *builder) writePackages() error {
- // Copy all the packages into the workdir
- for _, pkg := range b.res.App.Packages {
- targetDir := filepath.Join(b.workdir, filepath.FromSlash(pkg.RelPath))
- if err := os.MkdirAll(targetDir, 0755); err != nil {
- return err
- } else if err := b.rewritePkg(pkg, targetDir); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (b *builder) buildMain() error {
- overlayData, _ := json.Marshal(map[string]interface{}{"Replace": b.overlay})
- overlayPath := filepath.Join(b.workdir, "overlay.json")
- if err := ioutil.WriteFile(overlayPath, overlayData, 0644); err != nil {
- return err
- }
-
- args := []string{
- "build",
- "-tags=encore",
- "-overlay=" + overlayPath,
- "-modfile=" + filepath.Join(b.workdir, "go.mod"),
- "-mod=mod",
- "-o=" + filepath.Join(b.workdir, "out"+exe),
- }
- args = append(args, "./"+mainPkgName)
- cmd := exec.Command(filepath.Join(b.cfg.EncoreGoRoot, "bin", "go"+exe), args...)
- env := []string{
- "GO111MODULE=on",
- "GOROOT=" + b.cfg.EncoreGoRoot,
- }
- if goos := b.cfg.GOOS; goos != "" {
- env = append(env, "GOOS="+goos)
- }
- if !b.cfg.CgoEnabled {
- env = append(env, "CGO_ENABLED=0")
- }
- cmd.Env = append(os.Environ(), env...)
- cmd.Dir = b.appRoot
- if out, err := cmd.CombinedOutput(); err != nil {
- if len(out) == 0 {
- out = []byte(err.Error())
- }
- out = makeErrsRelative(out, b.workdir, b.appRoot, b.cfg.WorkingDir)
- return &Error{Output: out}
- }
- return nil
-}
-
-func (b *builder) addOverlay(src, dst string) {
- if b.overlay == nil {
- b.overlay = make(map[string]string)
- }
- b.overlay[src] = dst
-}
-
-// mergeModFiles merges two modfiles, adding the require statements from the latter to the former.
-// If both files have the same module requirement, it keeps the one with the greater semver version.
-func mergeModfiles(src, add *modfile.File) {
- reqs := src.Require
- for _, a := range add.Require {
- found := false
- for _, r := range src.Require {
- if r.Mod.Path == a.Mod.Path {
- found = true
- // Update the version if the one to add is greater.
- if semver.Compare(a.Mod.Version, r.Mod.Version) > 0 {
- r.Mod.Version = a.Mod.Version
- }
- }
- }
- if !found {
- reqs = append(reqs, a)
- }
- }
- src.SetRequire(reqs)
- src.Cleanup()
-}
-
-type Error struct {
- Output []byte
-}
-
-func (err *Error) Error() string {
- return string(err.Output)
-}
-
-type bailout struct {
- err error
-}
-
-func (b *builder) err(msg string) {
- panic(bailout{errors.New(msg)})
-}
-
-func (b *builder) errf(format string, args ...interface{}) {
- b.err(fmt.Sprintf(format, args...))
-}
-
-const binaryName = "out"
-
-var exe string
-
-func init() {
- if runtime.GOOS == "windows" {
- exe = ".exe"
- }
-}
-
-// makeErrsRelative goes through the errors and tweaks the filename to be relative
-// to the relwd.
-func makeErrsRelative(out []byte, workdir, appRoot, relwd string) []byte {
- wdroot := filepath.Join(appRoot, relwd)
- lines := bytes.Split(out, []byte{'\n'})
- prefix := append([]byte(workdir), '/')
- modified := false
- for i, line := range lines {
- if !bytes.HasPrefix(line, prefix) {
- continue
- }
- idx := bytes.IndexByte(line, ':')
- if idx == -1 || idx < len(prefix) {
- continue
- }
- filename := line[:idx]
- appPath := filepath.Join(appRoot, string(filename[len(prefix):]))
- if rel, err := filepath.Rel(wdroot, appPath); err == nil {
- lines[i] = append([]byte(rel), line[idx:]...)
- modified = true
- }
- }
-
- if !modified {
- return out
- }
- return bytes.Join(lines, []byte{'\n'})
-}
diff --git a/compiler/build_test.go b/compiler/build_test.go
deleted file mode 100644
index 8f9614ed55..0000000000
--- a/compiler/build_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package compiler
-
-import (
- "testing"
-
- qt "github.com/frankban/quicktest"
- "golang.org/x/mod/modfile"
-)
-
-func TestMergeModfiles(t *testing.T) {
- c := qt.New(t)
- foo := `module foo
-
-require (
- a v1.2.0
- b v1.2.0
- c v1.2.0
-)
-`
- bar := `module bar
-
-require (
- a v1.3.0
- b v1.1.0
- d v1.5.0
-)
-`
- modFoo, err := modfile.Parse("foo", []byte(foo), nil)
- c.Assert(err, qt.IsNil)
- modBar, err := modfile.Parse("bar", []byte(bar), nil)
- c.Assert(err, qt.IsNil)
-
- mergeModfiles(modFoo, modBar)
- out := modfile.Format(modFoo.Syntax)
-
- c.Assert(string(out), qt.Equals, `module foo
-
-require (
- a v1.3.0
- b v1.2.0
- c v1.2.0
- d v1.5.0
-)
-`)
-}
diff --git a/compiler/internal/rewrite/rewrite.go b/compiler/internal/rewrite/rewrite.go
deleted file mode 100644
index 9fa38a69b5..0000000000
--- a/compiler/internal/rewrite/rewrite.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package rewrite
-
-import (
- "bytes"
- "fmt"
- "go/token"
-)
-
-func New(data []byte, base int) *Rewriter {
- return &Rewriter{
- base: 0,
- segs: []segment{{
- data: data,
- start: base,
- end: base + len(data),
- }},
- }
-}
-
-type Rewriter struct {
- base int
- segs []segment
-}
-
-func (r *Rewriter) Replace(start, end token.Pos, data []byte) {
- si, so := r.seg(start)
- ei, eo := r.seg(end)
- r.replace(si, so, ei, eo, data)
-}
-
-func (r *Rewriter) Insert(start token.Pos, data []byte) {
- si, so := r.seg(start)
- r.replace(si, so, si, so, data)
-}
-
-func (r *Rewriter) Delete(start, end token.Pos) {
- si, so := r.seg(start)
- ei, eo := r.seg(end)
- r.replace(si, so, ei, eo, nil)
-}
-
-func (r *Rewriter) Data() []byte {
- var buf bytes.Buffer
- for _, seg := range r.segs {
- buf.Write(seg.data)
- }
- return buf.Bytes()
-}
-
-func (r *Rewriter) replace(si, so, ei, eo int, data []byte) {
- if si == ei {
- // Same segment; cut it into two
- start := r.segs[si]
- end := segment{
- start: start.start + eo,
- end: start.end,
- data: start.data[eo:],
- }
- start.data = start.data[:so]
- start.end = start.start + so
- mid := segment{
- start: start.end,
- end: end.start,
- data: data,
- }
- r.segs = append(r.segs[:si], append([]segment{start, mid, end}, r.segs[ei+1:]...)...)
- } else {
- // Already different segments; adjust start/end and replace segments in-between
- start := r.segs[si]
- end := r.segs[ei]
- start.end = start.start + so
- start.data = start.data[:so]
- end.start += eo
- end.data = end.data[eo:]
- mid := segment{
- start: start.end,
- end: end.start,
- data: data,
- }
- r.segs = append(r.segs[:si], append([]segment{start, mid, end}, r.segs[ei+1:]...)...)
- }
-}
-
-func (r *Rewriter) seg(pos token.Pos) (idx int, offset int) {
- p := int(pos)
- for i, seg := range r.segs {
- if seg.start <= p && p < seg.end {
- return i, int(p - seg.start)
- }
- }
- panic(fmt.Sprintf("original file does not contain pos %v", pos))
-}
-
-type segment struct {
- start, end int
- data []byte
-}
diff --git a/compiler/internal/rewrite/rewrite_test.go b/compiler/internal/rewrite/rewrite_test.go
deleted file mode 100644
index c346914366..0000000000
--- a/compiler/internal/rewrite/rewrite_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package rewrite
-
-import (
- "bytes"
- "testing"
-)
-
-func TestSplit(t *testing.T) {
- rw := New([]byte("test"), 1)
- rw.Replace(2, 4, []byte("ou")) // "tout"
- rw.Replace(1, 2, []byte("rea")) // "reaout"
- rw.Insert(4, []byte("h")) // "reaouht"
- rw.Insert(4, []byte("a")) // "reaouaht"
- if got, want := rw.Data(), []byte("reaouhat"); !bytes.Equal(got, want) {
- t.Errorf("got data %s, want %s", got, want)
- }
-}
-
-func TestReplaceAcrossSegments(t *testing.T) {
- rw := New([]byte("foo bar"), 1)
- rw.Replace(5, 6, []byte("l")) // "foo lar"
- rw.Replace(2, 7, []byte("hi")) // "fhir"
- if got, want := rw.Data(), []byte("fhir"); !bytes.Equal(got, want) {
- t.Errorf("got data %s, want %s", got, want)
- }
-}
-
-func TestReplaceTwice(t *testing.T) {
- rw := New([]byte("foo bar"), 1)
- rw.Replace(5, 6, []byte("l")) // "foo lar"
- rw.Replace(2, 7, []byte("hi")) // "fhir"
- rw.Replace(2, 7, []byte("hello")) // "fhellor"
- if got, want := rw.Data(), []byte("fhellor"); !bytes.Equal(got, want) {
- t.Errorf("got data %s, want %s", got, want)
- }
-}
-
-func TestDelete(t *testing.T) {
- rw := New([]byte("foo bar"), 1)
- rw.Replace(5, 6, []byte("l")) // "foo lar"
- rw.Delete(2, 7)
- if got, want := rw.Data(), []byte("fr"); !bytes.Equal(got, want) {
- t.Errorf("got data %s, want %s", got, want)
- }
-}
diff --git a/compiler/rewrite.go b/compiler/rewrite.go
deleted file mode 100644
index f0f13de311..0000000000
--- a/compiler/rewrite.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package compiler
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "io/ioutil"
- "path/filepath"
- "strconv"
-
- "encr.dev/compiler/internal/rewrite"
- "encr.dev/parser/est"
- "golang.org/x/tools/go/ast/astutil"
-)
-
-// rewritePkg writes out modified files to targetDir.
-func (b *builder) rewritePkg(pkg *est.Package, targetDir string) error {
- fset := b.res.FileSet
- seenWrappers := make(map[string]bool)
- var wrappers []*est.RPC
- nodes := b.res.Nodes[pkg]
- for _, file := range pkg.Files {
- if len(file.References) == 0 {
- // No references to other RPCs, we can skip it immediately
- continue
- }
-
- rewrittenPkgs := make(map[*est.Package]bool)
- rw := rewrite.New(file.Contents, file.Token.Base())
-
- useExceptions := make(map[*ast.SelectorExpr]bool)
- astutil.Apply(file.AST, func(c *astutil.Cursor) bool {
- node := c.Node()
- rewrite, ok := file.References[node]
- if !ok {
- return true
- }
-
- switch rewrite.Type {
- case est.SQLDBNode:
- call := c.Node().(*ast.CallExpr)
- tx := nodes[call]
- lp := fset.Position(call.Lparen)
- rw.Insert(call.Lparen+1, []byte(fmt.Sprintf("%d, %s,/*line :%d:%d*/",
- tx.Id, strconv.Quote(pkg.Service.Name), lp.Line, lp.Column+1)))
- return true
-
- case est.RLogNode:
- call := c.Node().(*ast.CallExpr)
- tx := nodes[call]
- lp := fset.Position(call.Lparen)
- rw.Insert(call.Lparen+1, []byte(fmt.Sprintf("%d,/*line :%d:%d*/",
- tx.Id, lp.Line, lp.Column+1)))
- return true
-
- case est.RPCCallNode:
- rpc := rewrite.RPC
- wrapperName := "encore_" + rpc.Svc.Name + "_" + rpc.Name
- call := c.Node().(*ast.CallExpr)
-
- // Capture rewrites that should be ignored when computing if an import
- // is still in use. The func is generally a SelectorExpr but if we call
- // an API within the same package it's an ident, and can be safely ignored.
- if sel, ok := call.Fun.(*ast.SelectorExpr); ok {
- useExceptions[sel] = true
- }
-
- rw.Replace(call.Fun.Pos(), call.Fun.End(), []byte(wrapperName))
- lp := fset.Position(call.Lparen)
- callTx := nodes[call]
- rpcTx := b.res.Nodes[rpc.Svc.Root][rpc.Func]
- rw.Insert(call.Lparen+1, []byte(fmt.Sprintf("%d, %d,/*line :%d:%d*/",
- callTx.Id, rpcTx.Id, lp.Line, lp.Column+1)))
- rewrittenPkgs[rpc.Svc.Root] = true
-
- if !seenWrappers[wrapperName] {
- wrappers = append(wrappers, rpc)
- seenWrappers[wrapperName] = true
- }
- return true
-
- case est.RPCDefNode:
- // Do nothing
- return true
-
- case est.SecretsNode:
- spec := c.Node().(*ast.ValueSpec)
-
- var buf bytes.Buffer
- buf.WriteString("{\n")
- for _, secret := range pkg.Secrets {
- fmt.Fprintf(&buf, "\t%s: __encore_runtime.LoadSecret(%s),\n", secret, strconv.Quote(secret))
- }
- ep := fset.Position(spec.End())
- fmt.Fprintf(&buf, "}/*line :%d:%d*/", ep.Line, ep.Column)
-
- rw.Insert(spec.Type.Pos(), []byte("= "))
- rw.Insert(spec.End(), buf.Bytes())
-
- decl := file.AST.Decls[0]
- ln := fset.Position(decl.Pos())
- rw.Insert(decl.Pos(), []byte(fmt.Sprintf("import __encore_runtime %s\n/*line :%d:%d*/", strconv.Quote("encore.dev/runtime"), ln.Line, ln.Column)))
- return true
-
- default:
- panic(fmt.Sprintf("unhandled rewrite type: %v", rewrite.Type))
- }
- }, nil)
-
- // Determine if we have some imports that are now unused that we should remove.
- for pkg := range rewrittenPkgs {
- if !usesImport(file.AST, pkg.Name, pkg.ImportPath, useExceptions) {
- spec, decl, ok := findImport(file.AST, pkg.ImportPath)
- if ok {
- // If the decl contains multiple imports, only delete the spec
- if len(decl.Specs) > 1 {
- rw.Delete(spec.Pos(), spec.End())
- } else {
- rw.Delete(decl.Pos(), decl.End())
- }
- }
- }
- }
-
- // Write out the file
- name := filepath.Base(file.Path)
- dst := filepath.Join(targetDir, name)
- if err := ioutil.WriteFile(dst, rw.Data(), 0644); err != nil {
- return err
- }
- b.addOverlay(file.Path, dst)
- }
-
- if len(wrappers) > 0 {
- name := "encore_rpc_wrappers.go"
- wrapperPath := filepath.Join(targetDir, name)
- if err := b.generateWrappers(pkg, wrappers, wrapperPath); err != nil {
- return err
- }
- b.addOverlay(filepath.Join(pkg.Dir, name), wrapperPath)
- }
-
- return nil
-}
diff --git a/compiler/runtime/beta/auth/auth.go b/compiler/runtime/beta/auth/auth.go
deleted file mode 100644
index 053c7ce3c3..0000000000
--- a/compiler/runtime/beta/auth/auth.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package auth
-
-import "encore.dev/runtime"
-
-// UID is a unique identifier representing a user (a user id).
-type UID = runtime.UID
-
-// UserID reports the uid of the user making the request.
-// The second result is true if there is a user and false
-// if the request was made without authentication details.
-func UserID() (UID, bool) {
- req, _, ok := runtime.CurrentRequest()
- if ok {
- return req.UID, req.UID != ""
- }
- return "", false
-}
-
-// Data returns the structured auth data for the request.
-// It returns nil if the request was made without authentication details,
-// and the API endpoint does not require them.
-//
-// Expected usage is to immediately cast it to the registered auth data type:
-//
-// usr, ok := auth.Data().(*user.Data)
-// if !ok { /* ... */ }
-//
-func Data() interface{} {
- req, _, ok := runtime.CurrentRequest()
- if ok {
- return req.AuthData
- }
- return nil
-}
diff --git a/compiler/runtime/go.mod b/compiler/runtime/go.mod
deleted file mode 100644
index 934beb8ffa..0000000000
--- a/compiler/runtime/go.mod
+++ /dev/null
@@ -1,11 +0,0 @@
-module encore.dev
-
-require (
- github.com/felixge/httpsnoop v1.0.1
- github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce
- github.com/jackc/pgx/v4 v4.10.1
- github.com/json-iterator/go v1.1.10
- github.com/rs/zerolog v1.20.0
-)
-
-go 1.13
diff --git a/compiler/runtime/go.sum b/compiler/runtime/go.sum
deleted file mode 100644
index d104c40ff6..0000000000
--- a/compiler/runtime/go.sum
+++ /dev/null
@@ -1,182 +0,0 @@
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
-github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
-github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo=
-github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
-github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
-github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
-github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
-github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
-github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
-github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
-github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
-github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
-github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
-github.com/jackc/pgconn v1.8.0 h1:FmjZ0rOyXTr1wfWs45i4a9vjnjWUAGpMuQLD9OSs+lw=
-github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
-github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
-github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
-github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=
-github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
-github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
-github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
-github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
-github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8=
-github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
-github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
-github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
-github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
-github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
-github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
-github.com/jackc/pgtype v1.6.2 h1:b3pDeuhbbzBYcg5kwNmNDun4pFUD/0AAr1kLXZLeNt8=
-github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
-github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
-github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
-github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
-github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
-github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
-github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
-github.com/jackc/pgx/v4 v4.10.1 h1:/6Q3ye4myIj6AaplUm+eRcz4OhK9HAvFf4ePsG40LJY=
-github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
-github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
-github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
-github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
-github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
-github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
-github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
-github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY=
-github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/compiler/runtime/internal/rlog/rlog.go b/compiler/runtime/internal/rlog/rlog.go
deleted file mode 100644
index 396ea96dec..0000000000
--- a/compiler/runtime/internal/rlog/rlog.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package rlog
-
-import (
- "encoding/binary"
- "time"
-
- "encore.dev/runtime"
- "encore.dev/types/uuid"
- "github.com/rs/zerolog"
-)
-
-type Ctx struct {
- ctx zerolog.Context
-}
-
-func Debug(traceExpr int32, msg string, keysAndValues ...interface{}) {
- log := runtime.Logger()
- doLog(log.Debug(), msg, keysAndValues...)
-}
-
-func Info(traceExpr int32, msg string, keysAndValues ...interface{}) {
- log := runtime.Logger()
- doLog(log.Info(), msg, keysAndValues...)
-}
-
-func Error(traceExpr int32, msg string, keysAndValues ...interface{}) {
- log := runtime.Logger()
- doLog(log.Error(), msg, keysAndValues...)
-}
-
-func With(keysAndValues ...interface{}) Ctx {
- ctx := runtime.Logger().With()
- for i := 0; i < len(keysAndValues); i += 2 {
- key := keysAndValues[i].(string)
- val := keysAndValues[i+1]
- ctx = addContext(ctx, key, val)
- }
- return Ctx{ctx: ctx}
-}
-
-func Debugc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- l := ctx.ctx.Logger()
- doLog(l.Debug(), msg, keysAndValues...)
-}
-
-func Infoc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- l := ctx.ctx.Logger()
- doLog(l.Info(), msg, keysAndValues...)
-}
-
-func Errorc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- l := ctx.ctx.Logger()
- doLog(l.Error(), msg, keysAndValues...)
-}
-
-func doLog(ev *zerolog.Event, msg string, keysAndValues ...interface{}) {
- for i := 0; i < len(keysAndValues); i += 2 {
- key := keysAndValues[i].(string)
- val := keysAndValues[i+1]
- addEventEntry(ev, key, val)
- }
- ev.Msg(msg)
-}
-
-func addEventEntry(ev *zerolog.Event, key string, val interface{}) {
- switch val := val.(type) {
- case error:
- ev.AnErr(key, val)
- case string:
- ev.Str(key, val)
- case bool:
- ev.Bool(key, val)
-
- case time.Time:
- ev.Time(key, val)
- case time.Duration:
- ev.Dur(key, val)
- case uuid.UUID:
- ev.Str(key, val.String())
-
- default:
- ev.Interface(key, val)
-
- case int8:
- ev.Int8(key, val)
- case int16:
- ev.Int16(key, val)
- case int32:
- ev.Int32(key, val)
- case int64:
- ev.Int64(key, val)
- case int:
- ev.Int(key, val)
-
- case uint8:
- ev.Uint8(key, val)
- case uint16:
- ev.Uint16(key, val)
- case uint32:
- ev.Uint32(key, val)
- case uint64:
- ev.Uint64(key, val)
- case uint:
- ev.Uint(key, val)
-
- case float32:
- ev.Float32(key, val)
- case float64:
- ev.Float64(key, val)
- }
-}
-
-func addContext(ctx zerolog.Context, key string, val interface{}) zerolog.Context {
- switch val := val.(type) {
- case error:
- return ctx.AnErr(key, val)
- case string:
- return ctx.Str(key, val)
- case bool:
- return ctx.Bool(key, val)
-
- case time.Time:
- return ctx.Time(key, val)
- case time.Duration:
- return ctx.Dur(key, val)
- case uuid.UUID:
- return ctx.Str(key, val.String())
-
- default:
- return ctx.Interface(key, val)
-
- case int8:
- return ctx.Int8(key, val)
- case int16:
- return ctx.Int16(key, val)
- case int32:
- return ctx.Int32(key, val)
- case int64:
- return ctx.Int64(key, val)
- case int:
- return ctx.Int(key, val)
-
- case uint8:
- return ctx.Uint8(key, val)
- case uint16:
- return ctx.Uint16(key, val)
- case uint32:
- return ctx.Uint32(key, val)
- case uint64:
- return ctx.Uint64(key, val)
- case uint:
- return ctx.Uint(key, val)
-
- case float32:
- return ctx.Float32(key, val)
- case float64:
- return ctx.Float64(key, val)
- }
-}
-
-var bin = binary.BigEndian
diff --git a/compiler/runtime/rlog/rlog.go b/compiler/runtime/rlog/rlog.go
deleted file mode 100644
index 0ad5f8ba5d..0000000000
--- a/compiler/runtime/rlog/rlog.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package rlog
-
-import (
- "encore.dev/internal/rlog"
-)
-
-type Ctx = rlog.Ctx
-
-func Debug(traceExpr int32, msg string, keysAndValues ...interface{}) {
- rlog.Debug(traceExpr, msg, keysAndValues...)
-}
-
-func Info(traceExpr int32, msg string, keysAndValues ...interface{}) {
- rlog.Info(traceExpr, msg, keysAndValues...)
-}
-
-func Error(traceExpr int32, msg string, keysAndValues ...interface{}) {
- rlog.Error(traceExpr, msg, keysAndValues...)
-}
-
-func With(keysAndValues ...interface{}) Ctx {
- return rlog.With(keysAndValues...)
-}
-
-func Debugc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- rlog.Debugc(traceExpr, ctx, msg, keysAndValues...)
-}
-
-func Infoc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- rlog.Infoc(traceExpr, ctx, msg, keysAndValues...)
-}
-
-func Errorc(traceExpr int32, ctx Ctx, msg string, keysAndValues ...interface{}) {
- rlog.Errorc(traceExpr, ctx, msg, keysAndValues...)
-}
diff --git a/compiler/runtime/runtime/config/config.go b/compiler/runtime/runtime/config/config.go
deleted file mode 100644
index 5769cd07ab..0000000000
--- a/compiler/runtime/runtime/config/config.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package config
-
-import (
- "net/http"
-)
-
-type ServerConfig struct {
- Testing bool
- Services []*Service
-}
-
-type Service struct {
- Name string
- RelPath string // relative path to service pkg (from app root)
- Endpoints []*Endpoint
- SQLDB bool // does the service use sqldb?
-}
-
-type Endpoint struct {
- Name string
- Raw bool
- Handler func(w http.ResponseWriter, req *http.Request)
-}
diff --git a/compiler/runtime/runtime/http.go b/compiler/runtime/runtime/http.go
deleted file mode 100644
index 1f309a1066..0000000000
--- a/compiler/runtime/runtime/http.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package runtime
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "io"
- "net/http"
- "net/http/httptrace"
- "net/textproto"
- "sync"
- "sync/atomic"
-)
-
-var httpReqIDCtr uint64
-
-type httpRoundTrip struct {
- ReqID uint64
- SpanID SpanID
-
- mu sync.Mutex
- events []httpEvent
-}
-
-func (rt *httpRoundTrip) getConn(hostPort string) {
- rt.addEvent(getConn, &getConnEvent{hostPort: hostPort})
-}
-
-func (rt *httpRoundTrip) gotConn(info httptrace.GotConnInfo) {
- rt.addEvent(gotConn, &gotConnEvent{info: info})
-}
-
-func (rt *httpRoundTrip) gotFirstResponseByte() {
- rt.addEvent(gotFirstResponseByte, nil)
-}
-
-func (rt *httpRoundTrip) got1xxResponse(code int, header textproto.MIMEHeader) error {
- rt.addEvent(got1xxResponse, &got1xxResponseEvent{code: code, header: header})
- return nil
-}
-
-func (rt *httpRoundTrip) dnsStart(info httptrace.DNSStartInfo) {
- rt.addEvent(dnsStart, &dnsStartEvent{info: info})
-}
-
-func (rt *httpRoundTrip) dnsDone(info httptrace.DNSDoneInfo) {
- rt.addEvent(dnsDone, &dnsDoneEvent{info: info})
-}
-
-func (rt *httpRoundTrip) connectStart(network, addr string) {
- rt.addEvent(connectStart, &connectStartEvent{network: network, addr: addr})
-}
-
-func (rt *httpRoundTrip) connectDone(network, addr string, err error) {
- rt.addEvent(connectDone, &connectDoneEvent{network: network, addr: addr, err: err})
-}
-
-func (rt *httpRoundTrip) tlsHandshakeStart() {
- rt.addEvent(tlsHandshakeStart, nil)
-}
-
-func (rt *httpRoundTrip) tlsHandshakeDone(state tls.ConnectionState, err error) {
- rt.addEvent(tlsHandshakeDone, &tlsHandshakeDoneEvent{info: state, err: err})
-}
-
-func (rt *httpRoundTrip) wroteHeaders() {
- rt.addEvent(wroteHeaders, nil)
-}
-
-func (rt *httpRoundTrip) wroteRequest(info httptrace.WroteRequestInfo) {
- rt.addEvent(wroteRequest, &wroteRequestEvent{info: info})
-}
-
-func (rt *httpRoundTrip) wait100Continue() {
- rt.addEvent(wait100Continue, nil)
-}
-
-func (rt *httpRoundTrip) addEvent(code httpEventCode, data httpEventData) {
- ts := nanotime()
- rt.mu.Lock()
- defer rt.mu.Unlock()
-
- rt.events = append(rt.events, httpEvent{
- code: code,
- ts: ts,
- data: data,
- })
-}
-
-func (rt *httpRoundTrip) encodeEvents(tb *TraceBuf) {
- rt.mu.Lock()
- n := len(rt.events)
- evs := rt.events[:]
- rt.mu.Unlock()
-
- tb.UVarint(uint64(n))
- for _, e := range evs {
- tb.Bytes([]byte{byte(e.code)})
- tb.Int64(e.ts)
- if e.data != nil {
- e.data.Encode(tb)
- }
- }
-}
-
-func httpBeginRoundTrip(req *http.Request) (context.Context, error) {
- g := encoreGetG()
- if g == nil || g.req == nil || !g.req.data.Traced {
- return req.Context(), nil
- } else if req.URL == nil {
- return nil, fmt.Errorf("http: nil Request.URL")
- }
-
- spanID, err := genSpanID()
- if err != nil {
- return nil, err
- }
-
- reqID := atomic.AddUint64(&httpReqIDCtr, 1)
-
- tb := NewTraceBuf(8 + 4 + 4 + 4 + len(req.Method) + 128)
- tb.UVarint(reqID)
- tb.Bytes(g.req.data.SpanID[:])
- tb.Bytes(spanID[:])
- tb.UVarint(uint64(g.goid))
- tb.String(req.Method)
- tb.String(req.URL.String())
-
- encoreTraceEvent(HTTPCallStart, tb.Buf())
-
- rt := &httpRoundTrip{
- ReqID: reqID,
- SpanID: spanID,
- }
- ctx := context.WithValue(req.Context(), rtKey, rt)
- tr := &httptrace.ClientTrace{
- GetConn: rt.getConn,
- GotConn: rt.gotConn,
- GotFirstResponseByte: rt.gotFirstResponseByte,
- Got1xxResponse: rt.got1xxResponse,
- DNSStart: rt.dnsStart,
- DNSDone: rt.dnsDone,
- ConnectStart: rt.connectStart,
- ConnectDone: rt.connectDone,
- TLSHandshakeStart: rt.tlsHandshakeStart,
- TLSHandshakeDone: rt.tlsHandshakeDone,
- WroteHeaders: rt.wroteHeaders,
- Wait100Continue: rt.wait100Continue,
- WroteRequest: rt.wroteRequest,
- }
- return httptrace.WithClientTrace(ctx, tr), nil
-}
-
-func httpCompleteRoundTrip(req *http.Request, resp *http.Response, err error) {
- rt, ok := req.Context().Value(rtKey).(*httpRoundTrip)
- if !ok {
- return
- }
-
- tb := NewTraceBuf(8 + 4 + 4 + 4)
- tb.UVarint(rt.ReqID)
- if err != nil {
- msg := err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- tb.String(msg)
- tb.UVarint(0)
- } else {
- tb.String("")
- tb.UVarint(uint64(resp.StatusCode))
- }
- rt.encodeEvents(&tb)
- encoreTraceEvent(HTTPCallEnd, tb.Buf())
-
- if req.Method != "HEAD" {
- resp.Body = wrapRespBody(resp.Body, rt)
- }
-}
-
-func (rt *httpRoundTrip) ClosedBody(err error) {
- tb := NewTraceBuf(8 + 4)
- tb.UVarint(rt.ReqID)
- if err != nil {
- msg := err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- tb.String(msg)
- } else {
- tb.String("")
- }
- encoreTraceEvent(HTTPCallBodyClosed, tb.Buf())
-}
-
-func wrapRespBody(body io.ReadCloser, rt *httpRoundTrip) io.ReadCloser {
- readWriteCloser, ok := body.(io.ReadWriteCloser)
- if ok {
- return writerCloseTracker{readWriteCloser, rt}
- } else {
- return closeTracker{body, rt}
- }
-
-}
-
-type closeTracker struct {
- io.ReadCloser
- rt *httpRoundTrip
-}
-
-func (c closeTracker) Close() error {
- err := c.ReadCloser.Close()
- c.rt.ClosedBody(err)
- return err
-}
-
-type writerCloseTracker struct {
- io.ReadWriteCloser
- rt *httpRoundTrip
-}
-
-func (c writerCloseTracker) Close() error {
- err := c.ReadWriteCloser.Close()
- c.rt.ClosedBody(err)
- return err
-}
-
-type httpEvent struct {
- code httpEventCode
- ts int64
- data httpEventData // or nil
-}
-
-type httpEventData interface {
- Encode(tb *TraceBuf)
-}
-
-type httpEventCode byte
-
-const (
- getConn = 0x01
- gotConn = 0x02
- gotFirstResponseByte = 0x03
- got1xxResponse = 0x04
- dnsStart = 0x05
- dnsDone = 0x06
- connectStart = 0x07
- connectDone = 0x08
- tlsHandshakeStart = 0x09
- tlsHandshakeDone = 0x0A
- wroteHeaders = 0x0B
- wroteRequest = 0x0C
- wait100Continue = 0x0D
-)
-
-type getConnEvent struct {
- hostPort string
-}
-
-func (e *getConnEvent) Encode(tb *TraceBuf) {
- tb.String(e.hostPort)
-}
-
-type gotConnEvent struct {
- info httptrace.GotConnInfo
-}
-
-func (e *gotConnEvent) Encode(tb *TraceBuf) {
- tb.Bool(e.info.Reused)
- tb.Bool(e.info.WasIdle)
- tb.Int64(int64(e.info.IdleTime))
-}
-
-type got1xxResponseEvent struct {
- code int
- header textproto.MIMEHeader
-}
-
-func (e *got1xxResponseEvent) Encode(tb *TraceBuf) {
- tb.Varint(int64(e.code))
- // TODO: write header as well?
-}
-
-type dnsStartEvent struct {
- info httptrace.DNSStartInfo
-}
-
-func (e *dnsStartEvent) Encode(tb *TraceBuf) {
- tb.String(e.info.Host)
-}
-
-type dnsDoneEvent struct {
- info httptrace.DNSDoneInfo
-}
-
-func (e *dnsDoneEvent) Encode(tb *TraceBuf) {
- if err := e.info.Err; err != nil {
- msg := err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- tb.String(msg)
- } else {
- tb.String("")
- }
- tb.UVarint(uint64(len(e.info.Addrs)))
- for _, a := range e.info.Addrs {
- tb.ByteString(a.IP)
- }
-}
-
-type connectStartEvent struct {
- network string
- addr string
-}
-
-func (e *connectStartEvent) Encode(tb *TraceBuf) {
- tb.String(e.network)
- tb.String(e.addr)
-}
-
-type connectDoneEvent struct {
- network string
- addr string
- err error
-}
-
-func (e *connectDoneEvent) Encode(tb *TraceBuf) {
- tb.String(e.network)
- tb.String(e.addr)
- tb.Err(e.err)
-}
-
-type tlsHandshakeDoneEvent struct {
- info tls.ConnectionState
- err error
-}
-
-func (e *tlsHandshakeDoneEvent) Encode(tb *TraceBuf) {
- tb.Err(e.err)
- tb.Uint32(uint32(e.info.Version))
- tb.Uint32(uint32(e.info.CipherSuite))
- tb.String(e.info.ServerName)
- tb.String(e.info.NegotiatedProtocol)
-}
-
-type wroteRequestEvent struct {
- info httptrace.WroteRequestInfo
-}
-
-func (e *wroteRequestEvent) Encode(tb *TraceBuf) {
- tb.Err(e.info.Err)
-}
-
-type contextKey int
-
-const (
- rtKey contextKey = iota
-)
diff --git a/compiler/runtime/runtime/request.go b/compiler/runtime/runtime/request.go
deleted file mode 100644
index 027d788eb1..0000000000
--- a/compiler/runtime/runtime/request.go
+++ /dev/null
@@ -1,349 +0,0 @@
-package runtime
-
-import (
- "fmt"
- "sync/atomic"
- "time"
-
- "encore.dev/runtime/config"
- jsoniter "github.com/json-iterator/go"
- "github.com/rs/zerolog"
-
- // These imports are used only by the generated wrappers in the compiler,
- // but add them here so the 'go' command doesn't remove them from go.mod.
- _ "github.com/felixge/httpsnoop"
-)
-
-var (
- reqIDCtr uint32
- callIDCtr uint64
-)
-
-var (
- RootLogger *zerolog.Logger
- Config *config.ServerConfig
-)
-
-var json = jsoniter.Config{
- EscapeHTML: false,
- SortMapKeys: false,
- ValidateJsonRawMessage: true,
-}.Froze()
-
-type UID string
-
-func BeginOperation() {
- encoreBeginOp(true /* always trace */)
-}
-
-func FinishOperation() {
- encoreFinishOp()
-}
-
-type SpanID [8]byte
-
-type Type byte
-
-const (
- RPCCall Type = 0x01
- AuthHandler Type = 0x02
-)
-
-type Request struct {
- Type Type
- SpanID SpanID
- ParentID SpanID
- UID UID
- AuthData interface{}
-
- Service string
- Endpoint string
- Start time.Time
- Logger zerolog.Logger
- Traced bool
-}
-
-type RequestData struct {
- Type Type
- Service string
- Endpoint string
- CallExprIdx int32
- EndpointExprIdx int32
- Inputs [][]byte
- UID UID
- AuthData interface{}
-}
-
-func BeginRequest(data RequestData) error {
- spanID, err := genSpanID()
- if err != nil {
- return err
- }
- return beginReq(spanID, data)
-}
-
-func FinishRequest(status int, outputs [][]byte, err error) {
- finishReq(status, outputs, err)
-}
-
-type Call struct {
- CallID uint64
- SpanID SpanID
-}
-
-type CallParams struct {
- Service string
- Endpoint string
- CallExprIdx int32
- EndpointExprIdx int32
-}
-
-func BeginCall(params CallParams) (*Call, error) {
- spanID, err := genSpanID()
- if err != nil {
- return nil, err
- }
-
- callID := atomic.AddUint64(&callIDCtr, 1)
-
- if g := encoreGetG(); g != nil && g.req != nil && g.req.data.Traced {
- tb := NewTraceBuf(8 + 4 + 4 + 4)
- tb.UVarint(callID)
- tb.Bytes(g.req.data.SpanID[:])
- tb.Bytes(spanID[:])
- tb.UVarint(uint64(g.goid))
- tb.UVarint(uint64(params.CallExprIdx))
- tb.UVarint(uint64(params.EndpointExprIdx))
- encoreTraceEvent(CallStart, tb.Buf())
- }
-
- return &Call{
- CallID: callID,
- SpanID: spanID,
- }, nil
-}
-
-func (c *Call) Finish(err error) {
- if g := encoreGetG(); g != nil && g.req != nil && g.req.data.Traced {
- tb := NewTraceBuf(8 + 4 + 4 + 4)
- tb.UVarint(c.CallID)
- if err != nil {
- msg := err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- tb.String(msg)
- } else {
- tb.String("")
- }
- encoreTraceEvent(CallEnd, tb.Buf())
- }
-}
-
-func (c *Call) BeginReq(data RequestData) error {
- return beginReq(c.SpanID, data)
-}
-
-func (c *Call) FinishReq(status int, outputs [][]byte, err error) {
- finishReq(status, outputs, err)
-}
-
-type AuthCall struct {
- SpanID SpanID
- CallID uint64
-}
-
-func BeginAuth(authHandlerExprIdx int32, token string) (*AuthCall, error) {
- spanID, err := genSpanID()
- if err != nil {
- return nil, fmt.Errorf("could not generate request id: %v", err)
- }
- callID := atomic.AddUint64(&callIDCtr, 1)
-
- if g := encoreGetG(); g != nil && g.op.trace != nil {
- tb := NewTraceBuf(8 + 4 + 4 + 4)
- tb.UVarint(callID)
- tb.Bytes(spanID[:])
- tb.UVarint(uint64(g.goid))
- tb.UVarint(uint64(authHandlerExprIdx))
- encoreTraceEvent(AuthStart, tb.Buf())
- }
-
- return &AuthCall{
- SpanID: spanID,
- CallID: callID,
- }, nil
-}
-
-func (ac *AuthCall) Finish(uid UID, err error) {
- if g := encoreGetG(); g != nil && g.op.trace != nil {
- tb := NewTraceBuf(64)
- tb.UVarint(ac.CallID)
- tb.String(string(uid))
- if err != nil {
- msg := err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- tb.String(msg)
- } else {
- tb.String("")
- }
- encoreTraceEvent(AuthEnd, tb.Buf())
- }
-}
-
-func (ac *AuthCall) BeginReq(data RequestData) error {
- return beginReq(ac.SpanID, data)
-}
-
-func (ac *AuthCall) FinishReq(status int, outputs [][]byte, err error) {
- finishReq(status, outputs, err)
-}
-
-func Logger() *zerolog.Logger {
- if req, _, ok := CurrentRequest(); ok {
- return &req.Logger
- }
- return RootLogger
-}
-
-func CurrentRequest() (*Request, uint32, bool) {
- return currentReq()
-}
-
-func currentReq() (*Request, uint32, bool) {
- if g := encoreGetG(); g != nil && g.req != nil {
- return g.req.data, g.goid, true
- }
- return nil, 0, false
-}
-
-func TraceLog(event TraceEvent, data []byte) {
- encoreTraceEvent(event, data)
-}
-
-func SerializeInputs(inputs ...interface{}) ([][]byte, error) {
- var res [][]byte
- for _, input := range inputs {
- data, err := json.Marshal(input)
- if err != nil {
- return nil, fmt.Errorf("could not serialize input %v: %v", input, err)
- }
- res = append(res, data)
- }
- return res, nil
-}
-
-func CopyInputs(inputs [][]byte, outputs []interface{}) error {
- if len(inputs) != len(outputs) {
- panic(fmt.Sprintf("encore.dev/runtime.CopyInputs: len(inputs) != len(outputs): %v != %v",
- len(inputs), len(outputs)))
- }
- for i, data := range inputs {
- if err := json.Unmarshal(data, outputs[i]); err != nil {
- return fmt.Errorf("could not serialize input #%d: %v", i, err)
- }
- }
- return nil
-}
-
-func beginReq(spanID SpanID, data RequestData) error {
- req := &Request{
- Type: data.Type,
- SpanID: spanID,
- Service: data.Service,
- Endpoint: data.Endpoint,
- Start: time.Now(),
- UID: data.UID,
- AuthData: data.AuthData,
- }
-
- if prev, _, ok := currentReq(); ok {
- req.UID = prev.UID
- req.AuthData = prev.AuthData
- req.ParentID = prev.SpanID
- encoreClearReq()
- }
-
- encoreBeginReq(spanID, req, true /* always trace */)
-
- ctx := RootLogger.With().
- Str("service", req.Service).
- Str("endpoint", req.Endpoint)
- if req.UID != "" {
- ctx = ctx.Str("uid", string(req.UID))
- }
- req.Logger = ctx.Logger()
-
- g := encoreGetG()
- req.Traced = g.op.trace != nil
- if req.Traced {
- tb := NewTraceBuf(1 + 8 + 8 + 8 + 8 + 8 + 8 + 64)
- tb.Bytes([]byte{byte(req.Type)})
- tb.Bytes(req.SpanID[:])
- tb.Bytes(req.ParentID[:])
- tb.Now()
- tb.UVarint(uint64(g.goid))
- tb.UVarint(uint64(data.CallExprIdx))
- tb.UVarint(uint64(data.EndpointExprIdx))
- tb.String(string(req.UID))
- tb.UVarint(uint64(len(data.Inputs)))
- for _, input := range data.Inputs {
- tb.UVarint(uint64(len(input)))
- tb.Bytes(input)
- }
- encoreTraceEvent(RequestStart, tb.Buf())
- }
-
- switch data.Type {
- case AuthHandler:
- req.Logger.Info().Msg("running auth handler")
- default:
- req.Logger.Info().Msg("starting request")
- }
- return nil
-}
-
-func finishReq(status int, outputs [][]byte, err error) {
- g := encoreGetG()
- if g == nil || g.req == nil {
- panic("encore: no current request running")
- }
-
- req := g.req.data
- if err != nil {
- switch req.Type {
- case AuthHandler:
- req.Logger.Error().Err(err).Msg("auth handler failed")
- default:
- req.Logger.Error().Err(err).Msg("request failed")
- }
- }
-
- if req.Traced {
- tb := NewTraceBuf(64)
- tb.Bytes(req.SpanID[:])
- if err == nil {
- tb.Bytes([]byte{0}) // no error
- tb.UVarint(uint64(len(outputs)))
- for _, output := range outputs {
- tb.UVarint(uint64(len(output)))
- tb.Bytes(output)
- }
- } else {
- tb.Bytes([]byte{1})
- tb.String(err.Error())
- }
- encoreTraceEvent(RequestEnd, tb.Buf())
- }
-
- dur := time.Since(req.Start)
- switch req.Type {
- case AuthHandler:
- req.Logger.Info().Dur("duration", dur).Msg("auth handler completed")
- default:
- req.Logger.Info().Dur("duration", dur).Int("status", status).Msg("request completed")
- }
- encoreCompleteReq()
-}
diff --git a/compiler/runtime/runtime/rtc.go b/compiler/runtime/runtime/rtc.go
deleted file mode 100644
index cb50e2aeb3..0000000000
--- a/compiler/runtime/runtime/rtc.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package runtime
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "os"
-)
-
-var (
- runtimeAddr string
- procID string
-)
-
-func RecordTrace(ctx context.Context, traceID [16]byte, data []byte) error {
- req, err := http.NewRequest("POST", runtimeAddr+"/trace", bytes.NewReader(data))
- if err != nil {
- return err
- }
- id := base64.RawURLEncoding.EncodeToString(traceID[:])
- req.Header.Set("Content-Type", "application/vnd.google.protobuf")
- req.Header.Set("X-Encore-Trace-Version", "v3")
- req.Header.Set("X-Encore-Trace-ID", id)
- req.Header.Set("X-Encore-Proc-ID", procID)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- body, _ := ioutil.ReadAll(resp.Body)
- return fmt.Errorf("could not record trace: HTTP %s: %s", resp.Status, body)
- }
- return nil
-}
-
-func init() {
- envs := []string{
- "ENCORE_RUNTIME_ADDRESS",
- "ENCORE_PROC_ID",
- }
- var vals []string
- for _, env := range envs {
- val := os.Getenv(env)
- if val == "" {
- log.Fatalf("encore: internal error: %s not set", env)
- }
- vals = append(vals, val)
- }
-
- runtimeAddr = "http://" + vals[0]
- procID = vals[1]
-}
diff --git a/compiler/runtime/runtime/runtime.go b/compiler/runtime/runtime/runtime.go
deleted file mode 100644
index 7d3bb05520..0000000000
--- a/compiler/runtime/runtime/runtime.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package runtime
-
-import (
- "context"
- "net/http"
- "unsafe"
-)
-
-// encoreG tracks per-goroutine Encore-specific data.
-// This must match the definition in the encore-go runtime.
-type encoreG struct {
- // op is the current operation the goroutine is a part of.
- op *encoreOp
-
- // req is request-specific data defined in the Encore runtime.
- req *encoreReq
-
- // goid is the per-op goroutine id.
- goid uint32
-}
-
-// encoreGetG gets the encore data for the current g, or nil.
-//go:linkname encoreGetG runtime.encoreGetG
-func encoreGetG() *encoreG
-
-// encoreOp represents an Encore operation.
-// This must match the definition in the encore-go runtime.
-type encoreOp struct {
- // start is the start time of the operation
- start int64 // start time of trace from nanotime()
-
- // trace is the trace log; it is nil if the op is not traced.
- trace unsafe.Pointer
-
- // refs is the op refcount. It is 1 + number of requests
- // that reference this op (see doc comment above).
- // It is accessed atomically.
- refs int32
-
- // goidCtr is a per-operation goroutine counter, for telling
- // apart goroutines participating in the operation.
- goidCtr uint32
-}
-
-// encoreReq represents an Encore API request.
-type encoreReq struct {
- // spanID is the request span id.
- spanID SpanID
- // data is request-specific data defined in the Encore runtime.
- data *Request
-}
-
-// encoreBeginOp begins a new Encore operation.
-// The trace parameter determines whether tracing is enabled.
-//
-// It tags the current goroutine with the op.
-// It panics if the goroutine is already part of an op.
-//go:linkname encoreBeginOp runtime.encoreBeginOp
-func encoreBeginOp(trace bool) *encoreOp
-
-// encoreFinishOp marks an operation as finished.
-// It must be part of an operation.
-//go:linkname encoreFinishOp runtime.encoreFinishOp
-func encoreFinishOp()
-
-// encoreTraceEvent adds the event to the trace.
-// The g must already be part of an operation.
-//go:linkname encoreTraceEvent runtime.encoreTraceEvent
-func encoreTraceEvent(event TraceEvent, data []byte)
-
-// encoreBeginReq sets the request data for the current g,
-// and increases the ref count on the operation.
-// It must already be part of an operation.
-//go:linkname encoreBeginReq runtime.encoreBeginReq
-func encoreBeginReq(spanID SpanID, req *Request, trace bool)
-
-// encoreCompleteReq completes the request and decreases the
-// ref count on the operation.
-// The g must be processing a request.
-//go:linkname encoreCompleteReq runtime.encoreCompleteReq
-func encoreCompleteReq()
-
-// encoreClearReq clears request data from the running g
-// without decrementing the ref count.
-// The g must be processing a request.
-//go:linkname encoreClearReq runtime.encoreClearReq
-func encoreClearReq()
-
-// encoreTraceID represents an Encore trace id.
-type encoreTraceID [16]byte
-
-// encoreSendTrace is called by Encore's go runtime to send a trace.
-//go:linkname encoreSendTrace runtime.encoreSendTrace
-func encoreSendTrace(data []byte) {
- go asyncSendTrace(data)
-}
-
-//go:linkname encoreBeginRoundTrip net/http.encoreBeginRoundTrip
-func encoreBeginRoundTrip(req *http.Request) (context.Context, error) {
- return httpBeginRoundTrip(req)
-}
-
-//go:linkname encoreFinishRoundTrip net/http.encoreFinishRoundTrip
-func encoreFinishRoundTrip(req *http.Request, resp *http.Response, err error) {
- httpCompleteRoundTrip(req, resp, err)
-}
-
-//go:linkname nanotime runtime.nanotime
-func nanotime() int64
diff --git a/compiler/runtime/runtime/secrets.go b/compiler/runtime/runtime/secrets.go
deleted file mode 100644
index e6c74ba877..0000000000
--- a/compiler/runtime/runtime/secrets.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "os"
- "strings"
-)
-
-func LoadSecret(key string) string {
- val, ok := secrets[key]
- if !ok {
- fmt.Fprintln(os.Stderr, "encore: could not find secret", key)
- os.Exit(2)
- }
- return val
-}
-
-var secrets = loadSecrets()
-
-func loadSecrets() map[string]string {
- const env = "ENCORE_SECRETS"
- encoded := os.Getenv(env)
- os.Unsetenv(env)
- if encoded == "" {
- return nil
- }
-
- // Format is "key1=val1,key2=val2" where values are base64-encoded using RawStdEncoding.
- secrets := make(map[string]string)
- fields := strings.Split(encoded, ",")
- for _, f := range fields {
- eql := strings.IndexByte(f, '=')
- if eql == -1 {
- fmt.Fprintln(os.Stderr, "encore: internal error: invalid ENCORE_SECRETS format")
- os.Exit(2)
- }
- key := f[:eql]
- value, err := base64.RawStdEncoding.DecodeString(f[eql+1:])
- if err != nil {
- fmt.Fprintln(os.Stderr, "encore: internal error: invalid ENCORE_SECRETS format")
- os.Exit(2)
- }
- secrets[key] = string(value)
- }
- return secrets
-}
diff --git a/compiler/runtime/runtime/setup.go b/compiler/runtime/runtime/setup.go
deleted file mode 100644
index 262054fefa..0000000000
--- a/compiler/runtime/runtime/setup.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package runtime
-
-import (
- "fmt"
- "io"
- "net/http"
- "os"
- "runtime"
- "strconv"
- "strings"
-
- "encore.dev/runtime/config"
- "github.com/hashicorp/yamux"
- "github.com/rs/zerolog"
-)
-
-type Server struct {
- logger zerolog.Logger
- handlers map[string]*config.Endpoint
-}
-
-func (srv *Server) handleRPC(service string, endpoint *config.Endpoint) {
- srv.logger.Info().Str("service", service).Str("endpoint", endpoint.Name).Msg("registered endpoint")
- key := service + "." + endpoint.Name
- srv.handlers[key] = endpoint
-}
-
-func (srv *Server) ListenAndServe() error {
- rwc, err := srv.setupConn()
- if err != nil {
- return err
- }
- s, err := yamux.Server(rwc, yamux.DefaultConfig())
- if err != nil {
- return err
- }
- httpsrv := &http.Server{
- Handler: http.HandlerFunc(srv.handler),
- }
- return httpsrv.Serve(s)
-}
-
-func (srv *Server) setupConn() (io.ReadWriteCloser, error) {
- var in, out *os.File
- if runtime.GOOS == "windows" {
- extraFiles := os.Getenv("ENCORE_EXTRA_FILES")
- fds := strings.Split(extraFiles, ",")
- if len(fds) < 2 {
- return nil, fmt.Errorf("could not get request/response file descriptors: %q", extraFiles)
- }
- infd, err1 := strconv.Atoi(fds[0])
- outfd, err2 := strconv.Atoi(fds[1])
- if err1 != nil || err2 != nil {
- return nil, fmt.Errorf("could not parse request/response file descriptors: %q", extraFiles)
- }
- in = os.NewFile(uintptr(infd), "encore-stdin")
- out = os.NewFile(uintptr(outfd), "encore-stdout")
- } else {
- in = os.NewFile(uintptr(3), "encore-stdin")
- out = os.NewFile(uintptr(4), "encore-stdout")
- }
-
- rwc := struct {
- io.Reader
- io.WriteCloser
- }{
- Reader: in,
- WriteCloser: out,
- }
- return rwc, nil
-}
-
-func (srv *Server) handler(w http.ResponseWriter, req *http.Request) {
- endpoint := srv.handlers[strings.TrimPrefix(req.URL.Path, "/")]
- if endpoint == nil {
- http.Error(w, "Endpoint Not Found", http.StatusNotFound)
- return
- }
- endpoint.Handler(w, req)
-}
-
-func Setup(cfg *config.ServerConfig) *Server {
- logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
- RootLogger = &logger
- Config = cfg
-
- srv := &Server{
- logger: logger,
- handlers: make(map[string]*config.Endpoint),
- }
- for _, svc := range cfg.Services {
- for _, endpoint := range svc.Endpoints {
- srv.handleRPC(svc.Name, endpoint)
- }
- }
- return srv
-}
-
-type dummyAddr struct{}
-
-func (dummyAddr) Network() string {
- return "encore"
-}
-
-func (dummyAddr) String() string {
- return "encore://localhost"
-}
diff --git a/compiler/runtime/runtime/tracebuf.go b/compiler/runtime/runtime/tracebuf.go
deleted file mode 100644
index 2fd6655095..0000000000
--- a/compiler/runtime/runtime/tracebuf.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package runtime
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "os"
- "time"
-
- _ "unsafe" // for go:linkname
-)
-
-type TraceEvent byte
-
-const (
- RequestStart TraceEvent = 0x01
- RequestEnd TraceEvent = 0x02
- GoStart TraceEvent = 0x03
- GoEnd TraceEvent = 0x04
- GoClear TraceEvent = 0x05
- TxStart TraceEvent = 0x06
- TxEnd TraceEvent = 0x07
- QueryStart TraceEvent = 0x08
- QueryEnd TraceEvent = 0x09
- CallStart TraceEvent = 0x0A
- CallEnd TraceEvent = 0x0B
- AuthStart TraceEvent = 0x0C
- AuthEnd TraceEvent = 0x0D
- HTTPCallStart TraceEvent = 0x0E
- HTTPCallEnd TraceEvent = 0x0F
- HTTPCallBodyClosed TraceEvent = 0x10
-)
-
-// genTraceID generates a new trace id and root span id.
-func genTraceID() ([16]byte, error) {
- var traceID [16]byte
- _, err := rand.Read(traceID[:])
- return traceID, err
-}
-
-// genSpanID generates a span id.
-func genSpanID() (span SpanID, err error) {
- _, err = rand.Read(span[:])
- return
-}
-
-func asyncSendTrace(data []byte) {
- if Config.Testing {
- // Don't send traces when running tests
- return
- }
-
- traceID, err := genTraceID()
- if err != nil {
- fmt.Fprintln(os.Stderr, "encore: could not generate trace id:", err)
- return
- }
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- err = RecordTrace(ctx, traceID, data)
- cancel()
- if err != nil {
- fmt.Fprintln(os.Stderr, "encore: could not record trace:", err)
- }
-}
-
-type TraceBuf struct {
- scratch [10]byte
- buf []byte
-}
-
-func NewTraceBuf(size int) TraceBuf {
- return TraceBuf{buf: make([]byte, 0, size)}
-}
-
-func (tb *TraceBuf) Buf() []byte {
- return tb.buf
-}
-
-func (tb *TraceBuf) Bytes(b []byte) {
- tb.buf = append(tb.buf, b...)
-}
-
-func (tb *TraceBuf) String(s string) {
- tb.UVarint(uint64(len(s)))
- tb.Bytes([]byte(s))
-}
-
-func (tb *TraceBuf) ByteString(b []byte) {
- tb.UVarint(uint64(len(b)))
- tb.Bytes(b)
-}
-
-func (tb *TraceBuf) Now() {
- now := time.Now().UnixNano()
- tb.Int64(now)
-}
-
-func (tb *TraceBuf) Bool(b bool) {
- if b {
- tb.Bytes([]byte{1})
- } else {
- tb.Bytes([]byte{0})
- }
-}
-
-func (tb *TraceBuf) Err(err error) {
- msg := ""
- if err != nil {
- msg = err.Error()
- if msg == "" {
- msg = "unknown error"
- }
- }
- tb.String(msg)
-}
-
-func (tb *TraceBuf) Int32(x int32) {
- var u uint32
- if x < 0 {
- u = (^uint32(x) << 1) | 1 // complement i, bit 0 is 1
- } else {
- u = (uint32(x) << 1) // do not complement i, bit 0 is 0
- }
- tb.Uint32(u)
-}
-
-func (tb *TraceBuf) Uint32(x uint32) {
- tb.buf = append(tb.buf,
- byte(x),
- byte(x>>8),
- byte(x>>16),
- byte(x>>24),
- )
-}
-
-func (tb *TraceBuf) Int64(x int64) {
- var u uint64
- if x < 0 {
- u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
- } else {
- u = (uint64(x) << 1) // do not complement i, bit 0 is 0
- }
- tb.Uint64(u)
-}
-
-func (tb *TraceBuf) Uint64(x uint64) {
- tb.buf = append(tb.buf,
- byte(x),
- byte(x>>8),
- byte(x>>16),
- byte(x>>24),
- byte(x>>32),
- byte(x>>40),
- byte(x>>48),
- byte(x>>56),
- )
-}
-
-func (tb *TraceBuf) Varint(x int64) {
- var u uint64
- if x < 0 {
- u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
- } else {
- u = (uint64(x) << 1) // do not complement i, bit 0 is 0
- }
- tb.UVarint(u)
-}
-
-func (tb *TraceBuf) UVarint(u uint64) {
- i := 0
- for u >= 0x80 {
- tb.scratch[i] = byte(u) | 0x80
- u >>= 7
- i++
- }
- tb.scratch[i] = byte(u)
- i++
- tb.Bytes(tb.scratch[:i])
-}
diff --git a/compiler/runtime/storage/sqldb/sqldb.go b/compiler/runtime/storage/sqldb/sqldb.go
deleted file mode 100644
index 3ed0416366..0000000000
--- a/compiler/runtime/storage/sqldb/sqldb.go
+++ /dev/null
@@ -1,380 +0,0 @@
-package sqldb
-
-import (
- "context"
- "database/sql"
- "fmt"
- "os"
- "sync/atomic"
-
- "encore.dev/runtime"
- "encore.dev/runtime/config"
- "github.com/jackc/pgx/v4"
- "github.com/jackc/pgx/v4/pgxpool"
-)
-
-var (
- txidCounter uint64
- queryCounter uint64
-)
-
-// ExecResult is the result of an Exec query.
-type ExecResult interface {
- // RowsAffected returns the number of rows affected. If the result was not
- // for a row affecting command (e.g. "CREATE TABLE") then it returns 0.
- RowsAffected() int64
-}
-
-func Exec(traceExpr int32, svc string, ctx context.Context, query string, args ...interface{}) (ExecResult, error) {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(0) // no tx
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- res, err := getDB(svc).Exec(ctx, query, args...)
- err = convertErr(err)
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- return res, err
-}
-
-func Query(traceExpr int32, svc string, ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(0) // no tx
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- rows, err := getDB(svc).Query(ctx, query, args...)
- err = convertErr(err)
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- if err != nil {
- return nil, err
- }
- return &Rows{std: rows}, nil
-}
-
-func QueryRow(traceExpr int32, svc string, ctx context.Context, query string, args ...interface{}) *Row {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(0) // no tx
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- rows, err := getDB(svc).Query(ctx, query, args...)
- err = convertErr(err)
- r := &Row{rows: rows, err: err}
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err := r.Err(); err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- return r
-}
-
-type Tx struct {
- txid uint64
- std pgx.Tx
-}
-
-func Begin(traceExpr int32, svc string, ctx context.Context) (*Tx, error) {
- tx, err := getDB(svc).Begin(ctx)
- err = convertErr(err)
- if err != nil {
- return nil, err
- }
- txid := atomic.AddUint64(&txidCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(txid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- runtime.TraceLog(runtime.TxStart, tb.Buf())
- }
-
- return &Tx{txid: txid, std: tx}, nil
-}
-
-func Commit(traceExpr int32, svc string, tx *Tx) error {
- err := tx.std.Commit(context.Background())
- err = convertErr(err)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(tx.txid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(uint64(goid))
- tb.Bytes([]byte{1})
- tb.UVarint(uint64(traceExpr))
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.TxEnd, tb.Buf())
- }
- return err
-}
-
-func Rollback(traceExpr int32, svc string, tx *Tx) error {
- err := tx.std.Rollback(context.Background())
- err = convertErr(err)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(tx.txid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(uint64(goid))
- tb.Bytes([]byte{0})
- tb.UVarint(uint64(traceExpr))
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.TxEnd, tb.Buf())
- }
- return err
-}
-
-func ExecTx(traceExpr int32, svc string, tx *Tx, ctx context.Context, query string, args ...interface{}) (ExecResult, error) {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(tx.txid)
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- res, err := tx.std.Exec(ctx, query, args...)
- err = convertErr(err)
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- return res, err
-}
-
-func QueryTx(traceExpr int32, svc string, tx *Tx, ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(tx.txid)
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- rows, err := tx.std.Query(ctx, query, args...)
- err = convertErr(err)
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- if err != nil {
- return nil, err
- }
- return &Rows{std: rows}, nil
-}
-
-func QueryRowTx(traceExpr int32, svc string, tx *Tx, ctx context.Context, query string, args ...interface{}) *Row {
- qid := atomic.AddUint64(&queryCounter, 1)
- req, goid, _ := runtime.CurrentRequest()
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- tb.Bytes(req.SpanID[:])
- tb.UVarint(tx.txid)
- tb.UVarint(uint64(goid))
- tb.UVarint(uint64(traceExpr))
- tb.String(query)
- runtime.TraceLog(runtime.QueryStart, tb.Buf())
- }
-
- // pgx currently does not support .Err() on Row.
- // Work around this by using Query.
- rows, err := tx.std.Query(ctx, query, args...)
- err = convertErr(err)
- r := &Row{rows: rows, err: err}
-
- if req != nil && req.Traced {
- var tb runtime.TraceBuf
- tb.UVarint(qid)
- if err := r.Err(); err != nil {
- tb.String(err.Error())
- } else {
- tb.String("")
- }
- runtime.TraceLog(runtime.QueryEnd, tb.Buf())
- }
-
- return r
-}
-
-type Rows struct {
- std pgx.Rows
-}
-
-func (r *Rows) Close() { r.std.Close() }
-func (r *Rows) Scan(dest ...interface{}) error { return r.std.Scan(dest...) }
-func (r *Rows) Err() error { return r.std.Err() }
-func (r *Rows) Next() bool { return r.std.Next() }
-
-type Row struct {
- rows pgx.Rows
- err error
-}
-
-func (r *Row) Scan(dest ...interface{}) error {
- if r.err != nil {
- return r.err
- }
- if !r.rows.Next() {
- if err := r.rows.Err(); err != nil {
- return convertErr(err)
- }
- return sql.ErrNoRows
- }
- r.rows.Scan(dest...)
- r.rows.Close()
- return convertErr(r.rows.Err())
-}
-
-func (r *Row) Err() error {
- if r.err != nil {
- return r.err
- }
- return convertErr(r.rows.Err())
-}
-
-var dbMap atomic.Value
-
-func setDBs(dbs map[string]*pgxpool.Pool) {
- dbMap.Store(dbs)
-}
-
-func getDB(svc string) *pgxpool.Pool {
- dbs, ok := dbMap.Load().(map[string]*pgxpool.Pool)
- if !ok {
- panic("sqldb: service not initialized: " + svc)
- }
- db, ok := dbs[svc]
- if !ok {
- panic("sqldb: could not find database for service " + svc)
- }
- return db
-}
-
-func Setup(cfg *config.ServerConfig) {
- addr := os.Getenv("ENCORE_SQLDB_ADDRESS")
- passwd := os.Getenv("ENCORE_SQLDB_PASSWORD")
- dbs := make(map[string]*pgxpool.Pool)
- for _, svc := range cfg.Services {
- if svc.SQLDB {
- if addr == "" {
- panic("sqldb: ENCORE_SQLDB_ADDRESS not set")
- }
-
- uri := fmt.Sprintf("postgresql://encore:%s@%s/%s?sslmode=disable",
- passwd, addr, svc.Name)
- cfg, err := pgxpool.ParseConfig(uri)
- if err != nil {
- panic("sqldb: invalid database uri: " + err.Error())
- }
- cfg.LazyConnect = true
- cfg.MaxConns = 30
- pool, err := pgxpool.ConnectConfig(context.Background(), cfg)
- if err != nil {
- panic("sqldb: setup db: " + err.Error())
- }
- dbs[svc.Name] = pool
- }
- }
- setDBs(dbs)
-}
-
-func convertErr(err error) error {
- switch err {
- case pgx.ErrNoRows:
- return sql.ErrNoRows
- default:
- return err
- }
-}
diff --git a/compiler/runtime/types/uuid/generator.go b/compiler/runtime/types/uuid/generator.go
deleted file mode 100644
index 00cf88e065..0000000000
--- a/compiler/runtime/types/uuid/generator.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "encoding/binary"
- "hash"
- "io"
- "os"
- "sync"
- "time"
-)
-
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-type epochFunc func() time.Time
-
-var (
- posixUID = uint32(os.Getuid())
- posixGID = uint32(os.Getgid())
-)
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- return g.NewV3(ns, name)
-}
-
-// NewV4 returns a randomly generated UUID.
-func NewV4() (UUID, error) {
- return g.NewV4()
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- return g.NewV5(ns, name)
-}
-
-// gen is a reference UUID generator based on the specifications laid out in
-// RFC-4122 and DCE 1.1: Authentication and Security Services. This type
-// satisfies the Generator interface as defined in this package.
-//
-// For consumers who are generating V1 UUIDs, but don't want to expose the MAC
-// address of the node generating the UUIDs, the NewGenWithHWAF() function has been
-// provided as a convenience. See the function's documentation for more info.
-//
-// The authors of this package do not feel that the majority of users will need
-// to obfuscate their MAC address, and so we recommend using NewGen() to create
-// a new generator.
-type gen struct {
- clockSequenceOnce sync.Once
- storageMutex sync.Mutex
-
- rand io.Reader
-
- epochFunc epochFunc
- lastTime uint64
- clockSequence uint16
-}
-
-// newGen returns a new instance of gen with some default values set.
-var g = &gen{
- epochFunc: time.Now,
- rand: rand.Reader,
-}
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func (g *gen) NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(V3)
- u.setVariant(variantRFC4122)
-
- return u
-}
-
-// NewV4 returns a randomly generated UUID.
-func (g *gen) NewV4() (UUID, error) {
- u := UUID{}
- if _, err := io.ReadFull(g.rand, u[:]); err != nil {
- return Nil, err
- }
- u.SetVersion(V4)
- u.setVariant(variantRFC4122)
-
- return u, nil
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func (g *gen) NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(V5)
- u.setVariant(variantRFC4122)
-
- return u
-}
-
-// Returns the epoch and clock sequence.
-func (g *gen) getClockSequence() (uint64, uint16, error) {
- var err error
- g.clockSequenceOnce.Do(func() {
- buf := make([]byte, 2)
- if _, err = io.ReadFull(g.rand, buf); err != nil {
- return
- }
- g.clockSequence = binary.BigEndian.Uint16(buf)
- })
- if err != nil {
- return 0, 0, err
- }
-
- g.storageMutex.Lock()
- defer g.storageMutex.Unlock()
-
- timeNow := g.getEpoch()
- // Clock didn't change since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= g.lastTime {
- g.clockSequence++
- }
- g.lastTime = timeNow
-
- return timeNow, g.clockSequence, nil
-}
-
-// Returns the difference between UUID epoch (October 15, 1582)
-// and current time in 100-nanosecond intervals.
-func (g *gen) getEpoch() uint64 {
- return epochStart + uint64(g.epochFunc().UnixNano()/100)
-}
-
-// Returns the UUID based on the hashing of the namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
- return u
-}
diff --git a/compiler/runtime/types/uuid/generator_test.go b/compiler/runtime/types/uuid/generator_test.go
deleted file mode 100644
index 7d7b4c71be..0000000000
--- a/compiler/runtime/types/uuid/generator_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "bytes"
- "crypto/rand"
- "fmt"
- "testing"
- "time"
-)
-
-func TestGenerator(t *testing.T) {
- t.Run("NewV3", testNewV3)
- t.Run("NewV4", testNewV4)
- t.Run("NewV5", testNewV5)
-}
-
-func testNewV3(t *testing.T) {
- t.Run("Basic", testNewV3Basic)
- t.Run("EqualNames", testNewV3EqualNames)
- t.Run("DifferentNamespaces", testNewV3DifferentNamespaces)
-}
-
-func testNewV3Basic(t *testing.T) {
- ns := namespaceDNS
- name := "www.example.com"
- u := NewV3(ns, name)
- if got, want := u.Version(), V3; got != want {
- t.Errorf("NewV3(%v, %q): got version %d, want %d", ns, name, got, want)
- }
- if got, want := u.variant(), variantRFC4122; got != want {
- t.Errorf("NewV3(%v, %q): got variant %d, want %d", ns, name, got, want)
- }
- want := "5df41881-3aed-3515-88a7-2f4a814cf09e"
- if got := u.String(); got != want {
- t.Errorf("NewV3(%v, %q) = %q, want %q", ns, name, got, want)
- }
-}
-
-func testNewV3EqualNames(t *testing.T) {
- ns := namespaceDNS
- name := "example.com"
- u1 := NewV3(ns, name)
- u2 := NewV3(ns, name)
- if u1 != u2 {
- t.Errorf("NewV3(%v, %q) generated %v and %v across two calls", ns, name, u1, u2)
- }
-}
-
-func testNewV3DifferentNamespaces(t *testing.T) {
- name := "example.com"
- ns1 := namespaceDNS
- ns2 := namespaceURL
- u1 := NewV3(ns1, name)
- u2 := NewV3(ns2, name)
- if u1 == u2 {
- t.Errorf("NewV3(%v, %q) == NewV3(%d, %q) (%v)", ns1, name, ns2, name, u1)
- }
-}
-
-func testNewV4(t *testing.T) {
- t.Run("Basic", testNewV4Basic)
- t.Run("DifferentAcrossCalls", testNewV4DifferentAcrossCalls)
- t.Run("FaultyRand", testNewV4FaultyRand)
- t.Run("ShortRandomRead", testNewV4ShortRandomRead)
-}
-
-func testNewV4Basic(t *testing.T) {
- u, err := NewV4()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := u.Version(), V4; got != want {
- t.Errorf("got version %d, want %d", got, want)
- }
- if got, want := u.variant(), variantRFC4122; got != want {
- t.Errorf("got variant %d, want %d", got, want)
- }
-}
-
-func testNewV4DifferentAcrossCalls(t *testing.T) {
- u1, err := NewV4()
- if err != nil {
- t.Fatal(err)
- }
- u2, err := NewV4()
- if err != nil {
- t.Fatal(err)
- }
- if u1 == u2 {
- t.Errorf("generated identical UUIDs across calls: %v", u1)
- }
-}
-
-func testNewV4FaultyRand(t *testing.T) {
- g := &gen{
- epochFunc: time.Now,
- rand: &faultyReader{
- readToFail: 0, // fail immediately
- },
- }
- u, err := g.NewV4()
- if err == nil {
- t.Errorf("got %v, nil error", u)
- }
-}
-
-func testNewV4ShortRandomRead(t *testing.T) {
- g := &gen{
- epochFunc: time.Now,
- rand: bytes.NewReader([]byte{42}),
- }
- u, err := g.NewV4()
- if err == nil {
- t.Errorf("got %v, nil error", u)
- }
-}
-
-func testNewV5(t *testing.T) {
- t.Run("Basic", testNewV5Basic)
- t.Run("EqualNames", testNewV5EqualNames)
- t.Run("DifferentNamespaces", testNewV5DifferentNamespaces)
-}
-
-func testNewV5Basic(t *testing.T) {
- ns := namespaceDNS
- name := "www.example.com"
- u := NewV5(ns, name)
- if got, want := u.Version(), V5; got != want {
- t.Errorf("NewV5(%v, %q): got version %d, want %d", ns, name, got, want)
- }
- if got, want := u.variant(), variantRFC4122; got != want {
- t.Errorf("NewV5(%v, %q): got variant %d, want %d", ns, name, got, want)
- }
- want := "2ed6657d-e927-568b-95e1-2665a8aea6a2"
- if got := u.String(); got != want {
- t.Errorf("NewV5(%v, %q) = %q, want %q", ns, name, got, want)
- }
-}
-
-func testNewV5EqualNames(t *testing.T) {
- ns := namespaceDNS
- name := "example.com"
- u1 := NewV5(ns, name)
- u2 := NewV5(ns, name)
- if u1 != u2 {
- t.Errorf("NewV5(%v, %q) generated %v and %v across two calls", ns, name, u1, u2)
- }
-}
-
-func testNewV5DifferentNamespaces(t *testing.T) {
- name := "example.com"
- ns1 := namespaceDNS
- ns2 := namespaceURL
- u1 := NewV5(ns1, name)
- u2 := NewV5(ns2, name)
- if u1 == u2 {
- t.Errorf("NewV5(%v, %q) == NewV5(%v, %q) (%v)", ns1, name, ns2, name, u1)
- }
-}
-
-func BenchmarkGenerator(b *testing.B) {
- b.Run("NewV3", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV3(namespaceDNS, "www.example.com")
- }
- })
- b.Run("NewV4", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV4()
- }
- })
- b.Run("NewV5", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV5(namespaceDNS, "www.example.com")
- }
- })
-}
-
-type faultyReader struct {
- callsNum int
- readToFail int // Read call number to fail
-}
-
-func (r *faultyReader) Read(dest []byte) (int, error) {
- r.callsNum++
- if (r.callsNum - 1) == r.readToFail {
- return 0, fmt.Errorf("io: reader is faulty")
- }
- return rand.Read(dest)
-}
diff --git a/compiler/test.go b/compiler/test.go
deleted file mode 100644
index 5947041963..0000000000
--- a/compiler/test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package compiler
-
-import (
- "context"
- "encoding/json"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
-)
-
-type TestConfig struct {
- // Env sets environment variables for "go test".
- Env []string
-
- // Args sets extra arguments for "go test".
- Args []string
-
- // Stdout and Stderr are where to redirect "go test" output.
- Stdout, Stderr io.Writer
-}
-
-// Test tests the application.
-//
-// On success, it is the caller's responsibility to delete the temp dir
-// returned in Result.Dir.
-func Test(ctx context.Context, appRoot string, cfg *Config) error {
- if err := cfg.Validate(); err != nil {
- return err
- }
-
- b := &builder{
- cfg: cfg,
- appRoot: appRoot,
- parseTests: true,
- }
- return b.Test(ctx)
-}
-
-func (b *builder) Test(ctx context.Context) (err error) {
- defer func() {
- if e := recover(); e != nil {
- if b, ok := e.(bailout); ok {
- err = b.err
- } else {
- panic(e)
- }
- }
- }()
-
- b.workdir, err = ioutil.TempDir("", "encore-test")
- if err != nil {
- return err
- }
- defer os.RemoveAll(b.workdir)
-
- for _, fn := range []func() error{
- b.parseApp,
- b.writeModFile,
- b.writeSumFile,
- b.writePackages,
- b.writeTestMains,
- } {
- if err := fn(); err != nil {
- return err
- }
- }
- return b.runTests(ctx)
-}
-
-func (b *builder) writeTestMains() error {
- for _, pkg := range b.res.App.Packages {
- if err := b.generateTestMain(pkg); err != nil {
- return err
- }
- }
- return nil
-}
-
-// runTests runs "go test".
-func (b *builder) runTests(ctx context.Context) error {
- overlayData, _ := json.Marshal(map[string]interface{}{"Replace": b.overlay})
- overlayPath := filepath.Join(b.workdir, "overlay.json")
- if err := ioutil.WriteFile(overlayPath, overlayData, 0644); err != nil {
- return err
- }
-
- args := []string{
- "test",
- "-tags=encore",
- "-overlay=" + overlayPath,
- "-modfile=" + filepath.Join(b.workdir, "go.mod"),
- "-mod=mod",
- "-vet=off",
- }
- args = append(args, b.cfg.Test.Args...)
- cmd := exec.CommandContext(ctx, filepath.Join(b.cfg.EncoreGoRoot, "bin", "go"+exe), args...)
- env := []string{
- "GO111MODULE=on",
- "GOROOT=" + b.cfg.EncoreGoRoot,
- }
- if !b.cfg.CgoEnabled {
- env = append(env, "CGO_ENABLED=0")
- }
- env = append(env, b.cfg.Test.Env...)
- cmd.Env = append(os.Environ(), env...)
- cmd.Dir = filepath.Join(b.appRoot, b.cfg.WorkingDir)
- cmd.Stdout = b.cfg.Test.Stdout
- cmd.Stderr = b.cfg.Test.Stderr
- return cmd.Run()
-}
diff --git a/compiler/tmpl/main.go.tmpl b/compiler/tmpl/main.go.tmpl
deleted file mode 100644
index 0a0a4665e9..0000000000
--- a/compiler/tmpl/main.go.tmpl
+++ /dev/null
@@ -1,263 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "runtime/debug"
- "strconv"
- "strings"
-
- {{if .AuthHandler}}"encore.dev/beta/auth"{{end}}
- "encore.dev/runtime"
- "encore.dev/runtime/config"
- "encore.dev/storage/sqldb"
- jsoniter "github.com/json-iterator/go"
- "github.com/felixge/httpsnoop"
-
- {{range .Imports -}}
- {{if .Named}}{{.Name}} {{end}}"{{.Path}}"
- {{end}}
-)
-
-// Prevent unused imports
-var (
- _ = context.Background
- _ = debug.Stack
- _ = fmt.Errorf
- _ = http.Error
- _ = io.EOF
- _ = ioutil.ReadAll
- _ = strconv.Quote
- _ = strings.HasPrefix
- _ = httpsnoop.CaptureMetrics
-)
-
-var json = jsoniter.Config{
- EscapeHTML: false,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
-}.Froze()
-
-{{range .Svcs}}
-{{- range .RPCs}}
-func encore_{{.Svc.Name}}_{{.Name}}(w http.ResponseWriter, req *http.Request) {
- runtime.BeginOperation()
- defer runtime.FinishOperation()
- var err error
-{{- if $.AuthHandler}}
-
- var (
- uid auth.UID
- token string
- authData interface{}
- )
- if auth := req.Header.Get("Authorization"); strings.HasPrefix(auth, "Bearer ") {
- token = auth[len("Bearer "):]
- uid, authData, err = validateAuth(req.Context(), token)
- if err != nil {
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- return
- }
- }
-{{- if requiresAuth .}}
- if uid == "" {
- if token == "" {
- runtime.Logger().Info().
- Str("service", "{{.Svc.Name}}").
- Str("endpoint", "{{.Name}}").
- Msg("rejecting request due to missing auth token")
- }
- http.Error(w, "Unauthorized", http.StatusUnauthorized)
- return
- }
-{{- end}}
-{{- end}}
-
-{{- if .Raw}}
- err = runtime.BeginRequest(runtime.RequestData{
- Type: runtime.RPCCall,
- Service: "{{.Svc.Name}}",
- Endpoint: "{{.Name}}",
- CallExprIdx: 0,
- EndpointExprIdx: {{traceExpr .}},
- Inputs: nil,
-{{- if $.AuthHandler}}
- UID: uid,
- AuthData: authData,
-{{- end}}
- })
- if err != nil {
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- return
- }
-
- // Call the endpoint
- m := httpsnoop.CaptureMetrics(http.HandlerFunc({{.Svc.Name}}.{{.Name}}), w, req)
- if m.Code >= 400 {
- err = fmt.Errorf("response status code %d", m.Code)
- }
- runtime.FinishRequest(m.Code, nil, err)
-{{- else }}
-{{- if .Request}}
-
- payload, err := ioutil.ReadAll(req.Body)
- if err != nil {
- http.Error(w, "Bad Request", http.StatusBadRequest)
- return
- }
- inputs := [][]byte{payload}
-{{- end}}
-
- err = runtime.BeginRequest(runtime.RequestData{
- Type: runtime.RPCCall,
- Service: "{{.Svc.Name}}",
- Endpoint: "{{.Name}}",
- CallExprIdx: 0,
- EndpointExprIdx: {{traceExpr .}},
- Inputs: {{if .Request}}inputs{{else}}nil{{end}},
-{{- if $.AuthHandler}}
- UID: uid,
- AuthData: authData,
-{{- end}}
- })
- if err != nil {
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- return
- }
-{{- if .Request}}
-
- // Parse the request payload
- var params {{typeName .Request}}
- err = json.Unmarshal(payload, ¶ms)
- if err != nil {
- if len(payload) == 0 {
- err = fmt.Errorf("no request data given") // improve error message
- }
- runtime.FinishRequest(400, nil, fmt.Errorf("could not parse request: %v", err))
- http.Error(w, "could not parse request: " + err.Error(), http.StatusBadRequest)
- return
- }
-{{- end}}
-
- // Call the endpoint
- defer func() {
- // Catch handler panic
- if err := recover(); err != nil {
- runtime.FinishRequest(500, nil, fmt.Errorf("panic handling request: %v\n%s", err, debug.Stack()))
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- }
- }()
- {{if .Response}}resp, {{end}}respErr := {{pkgName .Svc.Root.ImportPath}}.{{.Name}}(req.Context(){{if .Request}}, params{{end}})
- if respErr != nil {
- runtime.FinishRequest(500, nil, respErr)
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- return
- }
-
-{{- if .Response}}
-
- // Serialize the response
- var respData []byte
- respData, marshalErr := json.MarshalIndent(resp, "", " ")
- if marshalErr != nil {
- runtime.FinishRequest(500, nil, marshalErr)
- http.Error(w, "Internal Server Error", http.StatusInternalServerError)
- return
- }
- respData = append(respData, '\n')
- output := [][]byte{respData}
- runtime.FinishRequest(200, output, nil)
- w.WriteHeader(200)
- w.Write(respData)
-{{- else}}
- runtime.FinishRequest(200, nil, nil)
- w.WriteHeader(200)
-{{- end}}
-{{- end}}
-}
-{{end}}
-{{end}}
-
-{{- with .AuthHandler}}
-func validateAuth(ctx context.Context, token string) (uid auth.UID, authData interface{}, authErr error) {
- if token == "" {
- return "", nil, nil
- }
- done := make(chan struct{})
- call, err := runtime.BeginAuth({{traceExpr .}}, token)
- if err != nil {
- return "", nil, err
- }
-
- go func() {
- defer close(done)
- authErr = call.BeginReq(runtime.RequestData{
- Type: runtime.AuthHandler,
- Service: "{{.Svc.Name}}",
- Endpoint: "{{.Name}}",
- CallExprIdx: 0,
- EndpointExprIdx: {{traceExpr .}},
- Inputs: [][]byte{[]byte(strconv.Quote(token))},
- })
- if authErr != nil {
- return
- }
- defer func() {
- if err2 := recover(); err2 != nil {
- call.FinishReq(500, nil, fmt.Errorf("auth handler panicked: %v\n%s", err2, debug.Stack()))
- authErr = fmt.Errorf("auth handler panicked: %v\n%s", err2, debug.Stack())
- }
- }()
-{{- if $.AuthHandler.AuthData}}
- uid, authData, authErr = {{pkgName .Svc.Root.ImportPath}}.{{.Name}}(ctx, token)
- serialized, _ := runtime.SerializeInputs(uid, authData)
-{{- else}}
- uid, authErr = {{pkgName .Svc.Root.ImportPath}}.{{.Name}}(ctx, token)
- serialized, _ := runtime.SerializeInputs(uid)
-{{- end}}
- if authErr != nil {
- call.FinishReq(500, nil, authErr)
- } else {
- call.FinishReq(200, serialized, nil)
- }
- }()
- <-done
- call.Finish(uid, authErr)
- return uid, authData, authErr
-}
-{{- end}}
-
-var srv *runtime.Server
-
-func main() {
- // Register the Encore services
- services := []*config.Service{
-{{- range .Svcs}}
- {
- Name: "{{.Name}}",
- RelPath: "{{.Root.RelPath}}",
- SQLDB: {{usesSQLDB .}},
- Endpoints: []*config.Endpoint{
-{{- range .RPCs}}
- {
- Name: "{{.Name}}",
- Raw: {{.Raw}},
- Handler: encore_{{.Svc.Name}}_{{.Name}},
- },
-{{- end}}
- },
- },
-{{- end}}
- }
-
- cfg := &config.ServerConfig{
- Services: services,
- Testing: false,
- }
- srv = runtime.Setup(cfg)
- sqldb.Setup(cfg)
- srv.ListenAndServe()
-}
\ No newline at end of file
diff --git a/compiler/tmpl/pkg.go.tmpl b/compiler/tmpl/pkg.go.tmpl
deleted file mode 100644
index a0d3f28c09..0000000000
--- a/compiler/tmpl/pkg.go.tmpl
+++ /dev/null
@@ -1,98 +0,0 @@
-package {{.Pkg.Name}}
-
-import (
- "context"
- "errors"
- "fmt"
- "runtime/debug"
-
- "encore.dev/runtime"
- {{range .Imports -}}
- {{if .Named}}{{.Name}} {{end}}"{{.Path}}"
- {{end}}
-)
-
-{{range $rpcIdx, $rpc := .RPCs}}
-func encore_{{$rpc.Svc}}_{{$rpc.Name}}(callExprIdx, endpointExprIdx int32, ctx context.Context
- {{- if $rpc.Req}}, req {{$rpc.Req}}{{end}}) (
- {{- if $rpc.Resp}}resp {{$rpc.Resp}}, {{end}}err error) {
-{{- if $rpc.Req}}
- inputs, err := runtime.SerializeInputs(req)
- if err != nil {
- return
- }
-{{- else}}
- var inputs [][]byte
-{{- end}}
- call, err := runtime.BeginCall(runtime.CallParams{
- Service: "{{$rpc.Svc}}",
- Endpoint: "{{$rpc.Name}}",
- CallExprIdx: callExprIdx,
- EndpointExprIdx: endpointExprIdx,
- })
- if err != nil {
- return
- }
-
- // Run the request in a different goroutine
- var response struct {
- data [][]byte
- err error
- panicked bool
- }
- done := make(chan struct{})
- go func() {
- defer close(done)
- err := call.BeginReq(runtime.RequestData{
- Type: runtime.RPCCall,
- Service: "{{$rpc.Svc}}",
- Endpoint: "{{$rpc.Name}}",
- CallExprIdx: callExprIdx,
- EndpointExprIdx: endpointExprIdx,
- Inputs: inputs,
- })
- if err != nil {
- response.err = err
- return
- }
- defer func() {
- if err2 := recover(); err2 != nil {
- call.FinishReq(500, nil, fmt.Errorf("{{$rpc.Svc}}.{{$rpc.Name}} panicked: %v\n%s", err2, debug.Stack()))
- response.err = fmt.Errorf("panic handling request: %v", err2)
- response.panicked = true
- }
- }()
-
-{{- if $rpc.Req}}
- var reqData {{$rpc.Req}}
- if response.err = runtime.CopyInputs(inputs, []interface{}{&reqData}); response.err != nil {
- call.FinishReq(500, nil, response.err)
- return
- }
-{{- end}}
-
- {{if $rpc.Resp}}rpcResp, {{end}}rpcErr := {{$rpc.Func}}(ctx{{if $rpc.Req}}, req{{end}})
-{{- if $rpc.Resp}}
- response.data, _ = runtime.SerializeInputs(rpcResp)
-{{- end}}
- if rpcErr != nil {
- call.FinishReq(500, nil, rpcErr)
- response.err = errors.New(rpcErr.Error())
- } else {
- call.FinishReq(200, response.data, nil)
- }
- }()
- <-done
-
- call.Finish(response.err)
-{{- if $rpc.Resp}}
- // If the handler panicked we won't have any response data.
- if !response.panicked {
- _ = runtime.CopyInputs(response.data, []interface{}{&resp})
- }
- return resp, response.err
-{{- else}}
- return response.err
-{{- end}}
-}
-{{end}}
\ No newline at end of file
diff --git a/compiler/tmpl/testmain.go.tmpl b/compiler/tmpl/testmain.go.tmpl
deleted file mode 100644
index 279bb24f9a..0000000000
--- a/compiler/tmpl/testmain.go.tmpl
+++ /dev/null
@@ -1,33 +0,0 @@
-package {{.Pkg.Name}}
-
-import (
- "os"
- "testing"
-
- "encore.dev/runtime"
- "encore.dev/runtime/config"
- "encore.dev/storage/sqldb"
-)
-
-func TestMain(m *testing.M) {
- // Register the Encore services
- services := []*config.Service{
-{{- range .Svcs}}
- {
- Name: "{{.Name}}",
- RelPath: "{{.Root.RelPath}}",
- SQLDB: {{usesSQLDB .}},
- Endpoints: nil,
- },
-{{- end}}
- }
-
- // Set up the Encore runtime
- cfg := &config.ServerConfig{
- Services: services,
- Testing: true,
- }
- runtime.Setup(cfg)
- sqldb.Setup(cfg)
- os.Exit(m.Run())
-}
\ No newline at end of file
diff --git a/compiler/wrappers.go b/compiler/wrappers.go
deleted file mode 100644
index 72f7482887..0000000000
--- a/compiler/wrappers.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package compiler
-
-import (
- _ "embed" // for go:embed
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "text/template"
-
- "encr.dev/parser/est"
-)
-
-var (
- //go:embed tmpl/main.go.tmpl
- mainTmpl string
- //go:embed tmpl/pkg.go.tmpl
- pkgTmpl string
- //go:embed tmpl/testmain.go.tmpl
- testMainTmpl string
-)
-
-const mainPkgName = "__encore_main"
-
-func (b *builder) writeMainPkg() error {
- imp := new(importMap)
-
- funcs := template.FuncMap{
- "pkgName": func(path string) string {
- return imp.Name(path)
- },
- "traceExpr": func(obj interface{}) int32 {
- switch obj := obj.(type) {
- case *est.RPC:
- return b.res.Nodes[obj.Svc.Root][obj.Func].Id
- case *est.AuthHandler:
- return b.res.Nodes[obj.Svc.Root][obj.Func].Id
- default:
- panic(fmt.Sprintf("unexpected obj %T in traceExpr", obj))
- }
- },
- "typeName": func(param *est.Param) string {
- return b.typeName(param, imp)
- },
- "usesSQLDB": func(svc *est.Service) bool {
- for _, s := range b.res.Meta.Svcs {
- if s.Name == svc.Name {
- return len(s.Migrations) > 0
- }
- }
- return false
- },
- "requiresAuth": func(rpc *est.RPC) bool {
- return rpc.Access == est.Auth
- },
- }
- tmpl := template.Must(template.New("mainPkg").Funcs(funcs).Parse(mainTmpl))
- // Write the file to disk
- dir := filepath.Join(b.workdir, mainPkgName)
- if err := os.Mkdir(dir, 0755); err != nil {
- return err
- }
- mainPath := filepath.Join(dir, "main.go")
- file, err := os.Create(mainPath)
- if err != nil {
- return err
- }
- defer func() {
- if err2 := file.Close(); err == nil {
- err = err2
- }
- }()
-
- for _, svc := range b.res.App.Services {
- imp.Add(svc.Name, svc.Root.ImportPath)
- for _, rpc := range svc.RPCs {
- if r := rpc.Request; r != nil {
- imp.Add(r.Decl.Loc.PkgName, r.Decl.Loc.PkgPath)
- }
- if r := rpc.Response; r != nil {
- imp.Add(r.Decl.Loc.PkgName, r.Decl.Loc.PkgPath)
- }
- }
- }
- if h := b.res.App.AuthHandler; h != nil {
- imp.Add(h.Svc.Name, h.Svc.Root.ImportPath)
- }
-
- tmplParams := struct {
- Imports []importName
- Svcs []*est.Service
- AppVersion string
- AppRoot string
- AuthHandler *est.AuthHandler
- }{
- Svcs: b.res.App.Services,
- AppVersion: b.cfg.Version,
- AppRoot: b.appRoot,
- AuthHandler: b.res.App.AuthHandler,
- Imports: imp.Imports(),
- }
-
- b.addOverlay(filepath.Join(b.appRoot, mainPkgName, "main.go"), mainPath)
- return tmpl.Execute(file, tmplParams)
-}
-
-func (b *builder) generateWrappers(pkg *est.Package, rpcs []*est.RPC, wrapperPath string) (err error) {
- type rpcDesc struct {
- Name string
- Svc string
- Req string
- Resp string
- Func string
- }
-
- tmpl := template.Must(template.New("pkg").Parse(pkgTmpl))
-
- file, err := os.Create(wrapperPath)
- if err != nil {
- return err
- }
- defer func() {
- if err2 := file.Close(); err == nil {
- err = err2
- }
- }()
-
- var rpcDescs []*rpcDesc
- imp := &importMap{from: pkg.ImportPath}
- for _, rpc := range rpcs {
- rpcPkg := rpc.Svc.Root
- req := b.typeName(rpc.Request, imp)
- resp := b.typeName(rpc.Response, imp)
- fn := rpc.Name
- if n := imp.Add(rpcPkg.Name, rpcPkg.ImportPath); n.Name != "" {
- fn = n.Name + "." + fn
- }
- rpcDescs = append(rpcDescs, &rpcDesc{
- Name: rpc.Name,
- Svc: rpc.Svc.Name,
- Req: req,
- Resp: resp,
- Func: fn,
- })
- }
-
- tmplParams := struct {
- Pkg *est.Package
- RPCs []*rpcDesc
- Imports []importName
- }{
- Pkg: pkg,
- RPCs: rpcDescs,
- Imports: imp.Imports(),
- }
-
- return tmpl.Execute(file, tmplParams)
-}
-
-func (b *builder) generateTestMain(pkg *est.Package) (err error) {
- funcs := template.FuncMap{
- "usesSQLDB": func(svc *est.Service) bool {
- for _, s := range b.res.Meta.Svcs {
- if s.Name == svc.Name {
- return len(s.Migrations) > 0
- }
- }
- return false
- },
- }
-
- tmpl := template.Must(template.New("testmain").Funcs(funcs).Parse(testMainTmpl))
-
- // Write the file to disk
- testMainPath := filepath.Join(b.workdir, filepath.FromSlash(pkg.RelPath), "encore_testmain_test.go")
- file, err := os.Create(testMainPath)
- if err != nil {
- return err
- }
- defer func() {
- if err2 := file.Close(); err == nil {
- err = err2
- }
- }()
-
- tmplParams := struct {
- Pkg *est.Package
- Svcs []*est.Service
- }{
- Pkg: pkg,
- Svcs: b.res.App.Services,
- }
- b.addOverlay(filepath.Join(pkg.Dir, "encore_testmain_test.go"), testMainPath)
- return tmpl.Execute(file, tmplParams)
-}
-
-// importMap manages imports for a given file, and ensures each import
-// is given a unique name even in the presence of name collisions.
-// The zero value is ready to be used.
-type importMap struct {
- from string // from is the import path the code is running in
- names map[string]importName
- paths map[string]importName
-}
-
-type importName struct {
- Name string
- Path string
- Named bool
-}
-
-func (i *importMap) Add(name, path string) importName {
- if path == i.from {
- return importName{}
- }
-
- if i.names == nil {
- i.names = make(map[string]importName)
- i.paths = make(map[string]importName)
- }
-
- named := false
- if p, ok := i.paths[path]; ok {
- // Already imported
- return p
- } else if _, ok := i.names[name]; ok {
- // Name collision; generate a unique name
- for j := 2; ; j++ {
- candidate := name + strconv.Itoa(j)
- if _, ok := i.names[candidate]; !ok {
- name = candidate
- named = true
- break
- }
- }
- }
-
- n := importName{
- Name: name,
- Path: path,
- Named: named,
- }
- i.names[name] = n
- i.paths[path] = n
- return n
-}
-
-func (i *importMap) Imports() []importName {
- var names []importName
- for _, n := range i.paths {
- names = append(names, n)
- }
- sort.Slice(names, func(i, j int) bool {
- return names[i].Path < names[j].Path
- })
- return names
-}
-
-func (i *importMap) Name(path string) string {
- name, ok := i.paths[path]
- if !ok {
- panic(fmt.Sprintf("internal error: no import found for %q", path))
- }
- return name.Name
-}
-
-// typeName computes the type name for a given param
-// from the perspective of from and if necessary
-// adds the import to the import map.
-//
-// If param is nil, it returns "".
-func (b *builder) typeName(param *est.Param, imp *importMap) string {
- if param == nil {
- return ""
- }
-
- decl := param.Decl
- typName := decl.Name
- n := imp.Add(decl.Loc.PkgName, decl.Loc.PkgPath)
- if n.Name != "" {
- typName = n.Name + "." + typName
- }
- if param.IsPtr {
- return "*" + typName
- }
- return typName
-}
diff --git a/docs/go/cli/cli-reference.md b/docs/go/cli/cli-reference.md
new file mode 100644
index 0000000000..9912303411
--- /dev/null
+++ b/docs/go/cli/cli-reference.md
@@ -0,0 +1,345 @@
+---
+seotitle: Encore CLI Reference
+seodesc: The Encore CLI lets you run your local development environment, create apps, and much more. See all CLI commands in this reference guide.
+title: CLI Reference
+subtitle: The Encore CLI lets you run your local environment and much more.
+lang: go
+---
+
+## Running
+
+#### Run
+
+Runs your application.
+
+```shell
+$ encore run [--debug] [--watch=true] [--port NUMBER] [flags]
+```
+
+#### Test
+
+Tests your application
+
+Takes all the same flags as `go test`.
+
+```shell
+$ encore test ./... [go test flags]
+```
+
+#### Check
+
+Checks your application for compile-time errors using Encore's compiler.
+
+```shell
+$ encore check
+```
+
+#### Exec
+
+Runs executable scripts against the local Encore app.
+
+Compiles and runs a go script with the local Encore app environment setup.
+
+```
+$ encore exec [...args]
+```
+
+The command directory should contain Go files with package main with a main function.
+
+The additional arguments are passed directly to the built binary.
+
+##### Example
+
+Run a database seed script
+
+```
+$ encore exec cmd/seed
+```
+
+## App
+
+Commands to create and link Encore apps
+
+#### Clone
+
+Clone an Encore app to your computer
+
+```shell
+$ encore app clone [app-id] [directory]
+```
+
+#### Create
+
+Create a new Encore app
+
+```shell
+$ encore app create [name]
+```
+
+### Init
+
+Create a new Encore app from an existing repository
+
+```shell
+$ encore app init [name]
+```
+
+#### Link
+
+Link an Encore app with the server
+
+```shell
+$ encore app link [app-id]
+```
+
+## Auth
+
+Commands to authenticate with Encore
+
+#### Login
+
+Log in to Encore
+
+```shell
+$ encore auth login
+```
+
+#### Logout
+
+Logs out the currently logged in user
+
+```shell
+$ encore auth logout
+```
+
+#### Signup
+
+Create a new Encore account
+
+```shell
+$ encore auth signup
+```
+
+#### Whoami
+
+Show the current logged in user
+
+```shell
+$ encore auth whoami
+```
+
+## Daemon
+
+Encore CLI daemon commands
+
+#### Restart
+
+If you experience unexpected behavior, try restarting the daemon using:
+
+```shell
+$ encore daemon
+```
+
+#### Env
+
+Outputs Encore environment information
+
+```shell
+$ encore daemon env
+```
+
+## Database Management
+
+Database management commands
+
+#### Connect to database via shell
+
+Connects to the database via psql shell
+
+Defaults to connecting to your local environment. Specify --env to connect to another environment.
+
+```shell
+$ encore db shell [--env=]
+```
+
+`encore db shell` defaults to read-only permissions. Use `--write`, `--admin` and `--superuser` flags to modify which permissions you connect with.
+
+#### Connection URI
+
+Outputs a database connection string for ``. Defaults to connecting to your local environment. Specify --env to connect to another environment.
+
+```shell
+$ encore db conn-uri [--env=] [flags]
+```
+
+#### Proxy
+
+Sets up local proxy that forwards any incoming connection to the databases in the specified environment.
+
+```shell
+$ encore db proxy [--env=] [flags]
+```
+
+#### Reset
+
+Resets the databases for the given services. Use --all to reset all databases.
+
+```shell
+$ encore db reset [service-names...] [flags]
+```
+
+## Code Generation
+
+Code generation commands
+
+#### Generate client
+
+Generates an API client for your app. For more information about the generated clients, see [this page](/docs/go/cli/client-generation).
+
+By default, `encore gen client` generates the client based on the version of your application currently running in your local environment.
+You can change this using the `--env` flag and specifying the environment name.
+
+Use `--lang=` to specify the language. Supported language codes are:
+
+- `go`: A Go client using the net/http package
+- `typescript`: A TypeScript client using the in-browser Fetch API
+- `javascript`: A JavaScript client using the in-browser Fetch API
+- `openapi`: An OpenAPI spec
+
+```shell
+$ encore gen client [] [--env=] [--services=foo,bar] [--excluded-services=baz,qux] [--lang=] [flags]
+```
+
+## Logs
+
+Streams logs from your application
+
+```shell
+$ encore logs [--env=prod] [--json]
+```
+
+## Kubernetes
+
+Kubernetes management commands
+
+#### Configure
+
+Updates your kubectl config to point to the Kubernetes cluster(s) for the specified environment
+
+```shell
+$ encore k8s configure --env=ENV_NAME
+```
+
+## Secrets Management
+
+Secret management commands
+
+#### Set
+
+Sets a secret value
+
+```shell
+$ encore secret set --type
+```
+
+Where `` defines which environment types the secret value applies to. Use a comma-separated list of `production`, `development`, `preview`, and `local`. Shorthands: `prod`, `dev`, `pr`.
+
+**Examples**
+
+Entering a secret directly in terminal:
+
+ $ encore secret set --type dev MySecret
+ Enter secret value: ...
+ Successfully created secret value for MySecret.
+
+Piping a secret from a file:
+
+ $ encore secret set --type dev,local MySecret < my-secret.txt
+ Successfully created secret value for MySecret.
+
+Note that this strips trailing newlines from the secret value.
+
+#### List
+
+Lists secrets, optionally for a specific key
+
+```shell
+$ encore secret list [keys...]
+```
+
+#### Archive
+
+Archives a secret value
+
+```shell
+$ encore secret archive
+```
+
+#### Unarchive
+
+Unarchives a secret value
+
+```shell
+$ encore secret unarchive
+```
+
+## Version
+
+Reports the current version of the encore application
+
+```shell
+$ encore version
+```
+
+#### Update
+
+Checks for an update of encore and, if one is available, runs the appropriate command to update it.
+
+```shell
+$ encore version update
+```
+
+## VPN
+
+VPN management commands
+
+#### Start
+
+Sets up a secure connection to private environments
+
+```shell
+$ encore vpn start
+```
+
+#### Status
+
+Determines the status of the VPN connection
+
+```shell
+$ encore vpn status
+```
+
+#### Stop
+
+Stops the VPN connection
+
+```shell
+$ encore vpn stop
+```
+
+## Build
+
+Generates an image for your app, which can be used to [self-host](/docs/go/self-host/docker-build) your app.
+
+#### Docker
+
+Builds a portable Docker image of your Encore application.
+
+```shell
+$ encore build docker
+```
+
+**Flags**
+
+`--base string` defines the base image to build from (default "scratch")
+`--push` pushes image to remote repository
diff --git a/docs/go/cli/client-generation.md b/docs/go/cli/client-generation.md
new file mode 100644
index 0000000000..e74bd09fd4
--- /dev/null
+++ b/docs/go/cli/client-generation.md
@@ -0,0 +1,199 @@
+---
+seotitle: Automatic API Client Generation
+seodesc: Learn how you can use automatic API client generation to get clients for your backend. See how to integrate with your frontend using a type-safe generated client.
+title: Client Library Generation
+subtitle: Stop writing the same types everywhere
+lang: go
+---
+
+Encore makes it simple to write scalable distributed backends by allowing you to make function calls that Encore translates into RPC calls. Encore also generates API clients with interfaces that look like the original Go functions, with the same parameters and response signature as the server.
+
+The generated clients are single files that use only the standard functionality of the target language, with full type safety. This allow anyone to look at the generated client and understand exactly how it works.
+
+The structure of the generated code varies by language, to ensure it's idiomatic and easy to use, but always includes all publicly accessible endpoints, data structures, and documentation strings.
+
+Encore currently supports generating the following clients:
+- **Go** - Using `net/http` for the underlying HTTP transport.
+- **TypeScript** - Using the browser `fetch` API for the underlying HTTP client.
+- **JavaScript** - Using the browser `fetch` API for the underlying HTTP client.
+- **OpenAPI** - Using the OpenAPI Specification's language-agnostic interface to HTTP APIs. (Experimental)
+
+If there's a language you think should be added, please submit a pull request or create a feature
+request on [GitHub](https://github.com/encoredev/encore/issues/new), or [reach out on Discord](/discord).
+
+
+
+If you ship the generated client to end customers, keep in mind that old clients will continue to be used after you make changes. To prevent issues with the generated clients, avoid making breaking changes in APIs that your clients access.
+
+
+
+
+
+## Generating a Client
+
+To generate a client, use the `encore gen client` command. It generates a type-safe client using the most recent API metadata
+running in a particular environment for the given Encore application. For example:
+
+```shell
+# Generate a TypeScript client for calling the hello-a8bc application based on the primary environment
+encore gen client hello-a8bc --output=./client.ts
+
+# Generate a Go client for the hello-a8bc application based on the locally running code
+encore gen client hello-a8bc --output=./client.go --env=local
+
+# Generate an OpenAPI client for the hello-a8bc application based on the primary environment
+encore gen client hello-a8bc --lang=openapi --output=./openapi.json
+```
+
+### Environment Selection
+
+By default, `encore gen client` generates the client based on the version of your application currently running in your local environment.
+You can change this using the `--env` flag and specifying the environment name.
+
+
+
+The generated client can be used with any environment, not just the one it was generated for. However, the APIs, data structures
+and marshalling logic will be based on whatever is present and running in that environment at the point in time the client is generated.
+
+
+
+### Service filtering
+
+By default `encore gen client` outputs code for all services with at least one publicly accessible (or authenticated) API.
+You can narrow down this set of services by specifying the `--services` (or `-s`) flag. It takes a comma-separated list
+of service names.
+
+For example, to generate a typescript client for the `email` and `users` services, run:
+```shell
+encore gen client --services=email,users -o client.ts
+```
+
+### Output Mode
+
+By default the client's code will be output to stdout, allowing you to pipe it into your clipboard, or another tool. However,
+using `--output` you can specify a file location to write the client to. If output is specified, you do not need to specify
+the language as Encore will detect the language based on the file extension.
+
+
+### Example Script
+You could combine this into a `package.json` file for your Typescript frontend, to allow you to run `npm run gen` in that
+project to update the client to match the code running in your staging environment.
+```json
+{
+ "scripts": {
+ // ...
+ "gen": "encore gen client hello-a8bc --output=./client.ts --env=staging"
+ // ...
+ }
+}
+```
+
+## Using the Client
+
+The generated client has all the data structures required as parameters or returned as response values as needed by any
+of the public or authenticated API's of your Encore application. Each service is exposed as object on the client, with
+each public or authenticated API exposed as a function on those objects.
+
+For instance, if you had a service called `email` with a function `Send`, on the generated client you would call this
+using; `client.email.Send(...)`.
+
+For more tips and examples of using a generated JavaScript/Typescript client, see the [Integrate with a web frontend](/docs/how-to/integrate-frontend#generating-a-request-client) docs.
+
+### Creating an instance
+
+When constructing a client, you need to pass a `BaseURL` as the first parameter; this is the URL at which the API can
+be accessed. The client provides two helpers:
+
+- `Local` - This is a constant provided, which will always point at your locally running instance environment.
+- `Environment("name")` - This is a function which allows you to specify an environment by name
+
+However, BaseURL is a string, so if the two helpers do not provide enough flexibility you can pass any valid URL to be
+used as the BaseURL.
+
+### Authentication
+
+If your application has any API's which require [authentication](/docs/develop/auth), then additional options will generated
+into the client, which can be used when constructing the client. Just like with API's schemas, the data type required by
+your application's `auth handler` will be part of the client library, allowing you to set it in two ways:
+
+If your credentials won't change during the lifetime of the client, simply passing the authentication data to the client
+through the `WithAuth` (Go) or `auth` (TypeScript) options.
+
+However, if the authentication credentials can change, you can also pass a function which will be called before each request
+and can return a new instance of the authentication data structure or return the existing instance.
+
+
+### HTTP Client Override
+
+If required, you can override the underlying HTTP implementation with your own implementation. This is useful if you want
+to perform logging of the requests being made, or route the traffic over a secured tunnel such as a VPN.
+
+In Go this can be configured using the `WithHTTPClient` option. You are required to provide an implementation of the
+`HTTPDoer` interface, which the [http.Client](https://pkg.go.dev/net/http#Client) implements. For TypeScript clients,
+this can be configured using the `fetcher` option and must conform to the same prototype as the browsers inbuilt [fetch
+API](https://developer.mozilla.org/en-US/docs/Web/API/fetch).
+
+### Structured Errors
+
+Errors created or wrapped using Encore's [`errs package`](/docs/develop/errors) will be returned to the client and deserialized
+as an `APIError`, allowing the client to perform adaptive error handling based on the type of error returned. You can perform
+a type check on errors caused by calling an API to see if it is an `APIError`, and once cast as an `APIError` you can access
+the `Code`, `Message` and `Details` fields. For TypeScript Encore generates a `isAPIError` type guard which can be used.
+
+The `Code` field is an enum with all the possible values generated in the library, alone with description of when we
+would expect them to be returned by your API. See the [errors documentation](/docs/develop/errors#error-codes) for
+an online reference of this list.
+
+## Example CLI Tool
+
+For instance, we could build a simple CLI application to use our [url shortener](/docs/tutorials/rest-api), and handle
+any structured errors in a way which makes sense for that error code.
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "shorten_cli/client"
+)
+
+func main() {
+ // Create a new client with the default BaseURL
+ client, err := client.New(
+ client.Environment("production"),
+ client.WithAuth(os.Getenv("SHORTEN_API_KEY")),
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ // Timeout if the request takes more than 5 seconds
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // Call the Shorten function in the URL service
+ resp, err := client.Url.Shorten(
+ ctx,
+ client.UrlShortenParams{ URL: os.Args[1] },
+ )
+ if err != nil {
+ // Check the error returned
+ if err, ok := err.(*client.APIError); ok {
+ switch err.Code {
+ case client.ErrUnauthenticated:
+ fmt.Println("SHORTEN_API_KEY was invalid, please check your environment")
+ os.Exit(1)
+ case client.ErrAlreadyExists:
+ fmt.Println("The URL you provided was already shortened")
+ os.Exit(0)
+ }
+ }
+ panic(err) // if here then something has gone wrong in an unexpected way
+ }
+ fmt.Printf("https://short.encr.app/%s", resp.ID)
+}
+```
diff --git a/docs/go/cli/config-reference.md b/docs/go/cli/config-reference.md
new file mode 100644
index 0000000000..5f05dbdc96
--- /dev/null
+++ b/docs/go/cli/config-reference.md
@@ -0,0 +1,50 @@
+---
+seotitle: Encore CLI Configuration Options
+seodesc: Configuration options to customize the behavior of the Encore CLI.
+title: Configuration Reference
+subtitle: Configuration options to customize the behavior of the Encore CLI.
+lang: go
+---
+
+
+The Encore CLI has a number of configuration options to customize its behavior.
+
+Configuration options can be set both for individual Encore applications, as well as
+globally for the local user.
+
+Configuration options can be set using `encore config `,
+and options can similarly be read using `encore config `.
+
+When running `encore config` within an Encore application, it automatically
+sets and gets configuration for that application.
+
+To set or get global configuration, use the `--global` flag.
+
+## Configuration files
+
+The configuration is stored in one ore more TOML files on the filesystem.
+
+The configuration is read from the following files, in order:
+
+### Global configuration
+* `$XDG_CONFIG_HOME/encore/config`
+* `$HOME/.config/encore/config`
+* `$HOME/.encoreconfig`
+
+### Application-specific configuration
+* `$APP_ROOT/.encore/config`
+
+Where `$APP_ROOT` is the directory containing the `encore.app` file.
+
+The files are read and merged, in the order defined above, with latter files taking precedence over earlier files.
+
+## Configuration options
+
+#### run.browser
+Type: string
+Default: auto
+Must be one of: always, never, or auto
+
+Whether to open the Local Development Dashboard in the browser on `encore run`.
+If set to "auto", the browser will be opened if the dashboard is not already open.
+
diff --git a/docs/go/cli/infra-namespaces.md b/docs/go/cli/infra-namespaces.md
new file mode 100644
index 0000000000..4f3b656832
--- /dev/null
+++ b/docs/go/cli/infra-namespaces.md
@@ -0,0 +1,63 @@
+---
+seotitle: Infrastructure Namespaces
+seodesc: Learn how Encore's infrastructure namespaces makes it easy to task switch. Stash your infrastructure state and switch to a different task with a single command.
+title: Infrastructure Namespaces
+subtitle: Task switching made easy
+lang: go
+---
+
+Encore's CLI allows you to create and switch between multiple, independent *infrastructure namespaces*.
+Infrastructure namespaces are isolated from each other, and each namespace contains its own independent data.
+
+This makes it trivial to switch tasks, confident your old state and data will be waiting for you when you return.
+
+If you've ever worked on a new feature that involves making changes to the database schema,
+only to context switch to reviewing a Pull Request and had to reset your database, you know the feeling.
+
+With Encore's infrastructure namespaces, this is a problem of the past.
+Run `encore namespace switch --create pr:123` (or `encore ns switch -c pr:123` for short) to create and switch to a new namespace.
+
+The next `encore run` will run in the new namespace, with a completely fresh database.
+When you're done, run `encore namespace switch -` to switch back to your previous namespace.
+
+## Usage
+
+Below are the commands for working with namespaces.
+Note that you can use `encore ns` as a short form for `encore namespace`.
+
+```shell
+# List your namespaces (* indicates the current namespace)
+$ encore namespace list
+
+# Create a new namespace
+$ encore namespace create my-ns
+
+# Switch to a namespace
+$ encore namespace switch my-ns
+
+# Switch to a namespace, creating it if it doesn't exist
+$ encore namespace switch --create my-ns
+
+# Switch to the previous namespace
+$ encore namespace switch -
+
+# Delete a namespace (and all associated data)
+$ encore namespace delete my-ns
+```
+
+Most other Encore commands that interact or use infrastructure take an optional
+`--namespace` (`-n` for short) that overrides the current namespace. If left unspecified,
+the current namespace is used.
+
+For example:
+
+```shell
+# Run the app using the "my-ns" namespace
+$ encore run --namespace my-ns
+
+# Open a database shell to the "my-ns" namespace
+$ encore db shell DATABASE_NAME --namespace my-ns
+
+# Reset all databases within the "my-ns" namespace
+$ encore db reset --all --namespace my-ns
+```
diff --git a/docs/go/cli/mcp.md b/docs/go/cli/mcp.md
new file mode 100644
index 0000000000..c5c03e156c
--- /dev/null
+++ b/docs/go/cli/mcp.md
@@ -0,0 +1,125 @@
+---
+seotitle: Encore MCP Server
+seodesc: Encore's Model Context Protocol (MCP) server provides deep introspection of your application to AI development tools.
+title: MCP Server
+subtitle: The Model Context Provider (MCP) exposes tools that provide application context to LLMs.
+lang: go
+---
+
+Encore provides an MCP server that implements the [Model Context Protocol](https://modelcontextprotocol.io/introduction), an open standard that enables large language models (LLMs) to access contextual information about your application. Think of MCP as a standardized interface—like a "USB-C port for AI applications"—that connects your Encore app's data and functionality to any LLM that supports the protocol.
+
+You can connect to Encore's MCP server from any MCP host (such as Claude Desktop, IDEs, or other AI tools) using either Server-Sent Events (SSE) or stdio transport. To set up this connection, simply run:
+
+```bash
+cd my-encore-app
+encore mcp start
+
+ MCP Service is running!
+
+ MCP SSE URL: http://localhost:9900/sse?app=your-app-id
+ MCP stdio Command: encore mcp run --app=your-app-id
+```
+
+Copy the appropriate URL or command to your MCP host's configuration, and you're ready to give your AI assistants rich context about your application.
+
+## Example: Integrating with Cursor
+
+[Cursor](https://cursor.com) is one of the most popular AI powered IDE's, and it's simple to use Encore's MCP server together with Cursor.
+
+In order to add the Encore MCP server to Cursor, the fastest way is via the button below (make sure to update `your-app-id` in the configuration to your actual Encore app ID).
+
+
+
+If you prefer to configure it manually, create the file `.cursor/mcp.json` with the following settings:
+
+```json
+{
+ "mcpServers": {
+ "encore-mcp": {
+ "command": "encore",
+ "args": ["mcp", "run", "--app=your-app-id"]
+ }
+ }
+}
+```
+
+Learn more in [Cursor's MCP docs](https://docs.cursor.com/context/model-context-protocol)
+
+Now when using Cursor's Agent mode, you can ask it to do advanced actions, such as:
+
+"Add an endpoint that publishes to a pub/sub topic, call it and verify that the publish is in the traces"
+
+## Command Reference
+
+#### Start
+
+Starts an SSE-based MCP server and displays connection information.
+
+```shell
+$ encore mcp start [--app=]
+```
+
+#### Run
+
+Establishes an stdio-based MCP session. This command is typically used by MCP hosts to communicate with the server through standard input/output streams.
+
+```shell
+$ encore mcp run [--app=]
+```
+
+## Exposed Tools
+
+Encore's MCP server exposes the following tools that provide AI models with detailed context about your application. These tools enable LLMs to understand your application's structure, retrieve relevant information, and take actions within your system.
+
+#### Database Tools
+
+- **get_databases**: Retrieve metadata about all SQL databases defined in the application, including their schema, tables, and relationships.
+- **query_database**: Execute SQL queries against one or more databases in the application.
+
+#### API Tools
+
+- **call_endpoint**: Make HTTP requests to any API endpoint in the application.
+- **get_services**: Retrieve comprehensive information about all services and their endpoints in the application.
+- **get_middleware**: Retrieve detailed information about all middleware components in the application.
+- **get_auth_handlers**: Retrieve information about all authentication handlers in the application.
+
+#### Trace Tools
+
+- **get_traces**: Retrieve a list of request traces from the application, including their timing, status, and associated metadata.
+- **get_trace_spans**: Retrieve detailed information about one or more traces, including all spans, timing information, and associated metadata.
+
+#### Source Code Tools
+
+- **get_metadata**: Retrieve the complete application metadata, including service definitions, database schemas, API endpoints, and other infrastructure components.
+- **get_src_files**: Retrieve the contents of one or more source files from the application.
+
+#### PubSub Tools
+
+- **get_pubsub**: Retrieve detailed information about all PubSub topics and their subscriptions in the application.
+
+#### Storage Tools
+
+- **get_storage_buckets**: Retrieve comprehensive information about all storage buckets in the application.
+- **get_objects**: List and retrieve metadata about objects stored in one or more storage buckets.
+
+#### Cache Tools
+
+- **get_cache_keyspaces**: Retrieve comprehensive information about all cache keyspaces in the application.
+
+#### Metrics Tools
+
+- **get_metrics**: Retrieve comprehensive information about all metrics defined in the application.
+
+#### Cron Tools
+
+- **get_cronjobs**: Retrieve detailed information about all scheduled cron jobs in the application.
+
+#### Secret Tools
+
+- **get_secrets**: Retrieve metadata about all secrets used in the application.
+
+#### Documentation Tools
+
+- **search_docs**: Search the Encore documentation using Algolia's search engine.
+- **get_docs**: Retrieve the full content of specific documentation pages.
+
diff --git a/docs/go/cli/telemetry.md b/docs/go/cli/telemetry.md
new file mode 100644
index 0000000000..58001898b7
--- /dev/null
+++ b/docs/go/cli/telemetry.md
@@ -0,0 +1,97 @@
+---
+seotitle: Encore Telemetry
+seodesc: Encore collects telemetry data about app usage
+title: Telemetry
+lang: go
+---
+Telemetry helps us improve the Encore by collecting usage data. This data provides insights into how Encore is used, enabling us to make informed decisions to enhance performance, add new features, and fix bugs more efficiently.
+
+Encore only collects telemetry data in the local development tools and the Encore Cloud dashboard. It does **not** collect any telemetry data from your running applications or cloud services, ensuring complete privacy and security for your operations.
+
+## Why We Collect Data
+
+We collect telemetry data for several important reasons:
+
+1. **Improvement of Features**: Understanding which features are most used helps us prioritize improvements and new feature development.
+2. **Performance Monitoring**: Tracking performance metrics enables us to identify and resolve issues, ensuring a smoother user experience.
+3. **Bug Detection**: Telemetry data can help us detect and fix bugs faster by providing context on how and when issues occur.
+4. **User Experience**: Insights from telemetry data guide us in making Encore more intuitive and user-friendly.
+
+## How Data is Collected
+
+Encore collects data in a way that prioritizes user privacy and security. Here's how we do it:
+
+1. **User Identifiable Data**: The data collected includes identifiable information that helps us understand specific user interactions and contexts.
+2. **Types of Data**: We collect data on usage patterns, performance metrics, and error reports.
+3. **Secure Transmission**: All data is transmitted securely using industry-standard encryption protocols.
+4. **Minimal Impact**: Data collection is designed to have minimal impact on Encore's performance.
+
+### Example of Data Being Sent
+
+Here is an example of the type of data that is sent:
+
+```json
+{
+ "event": "app.create",
+ "anonymousId": "a-uuid-unique-for-the-installation",
+ "properties": {
+ "error": false,
+ "lang": "go",
+ "template": "graphql"
+ }
+}
+```
+
+## Data We Don't Collect
+
+At Encore, we prioritize your privacy and ensure that no sensitive data is collected through our telemetry. Specifically, we do not collect:
+
+1. **Environment Variables**: We do not collect any environment variables set in your development or production environments.
+2. **File Paths**: The specific paths of your files and directories are not collected.
+3. **Contents of Files**: We do not access or collect the contents of your code files or any other files in your projects.
+4. **Logs**: No log files from your application or development environment are collected.
+5. **Serialized Errors**: We do not collect serialized errors that may contain sensitive information.
+
+Our goal is to gather useful data that helps improve Encore while ensuring that your sensitive information remains private and secure.
+
+## Disabling Telemetry
+
+While telemetry helps us improve Encore, we understand that some users may prefer to opt out. Disabling telemetry is straightforward and can be done in two ways:
+
+1. **Using the CLI Command**: You can disable telemetry by executing a simple command in your terminal.
+
+ ```sh
+ encore telemetry disable
+ ```
+
+2. **Setting an Environment Variable**: Alternatively, you can disable telemetry by setting the `DISABLE_ENCORE_TELEMETRY` environment variable.
+
+ ```sh
+ export DISABLE_ENCORE_TELEMETRY=1
+ ```
+
+3. **Confirmation**: After disabling telemetry, either by the CLI command or environment variable, you will receive a confirmation message indicating that telemetry has been successfully disabled.
+
+4. **Re-enabling Telemetry**: If you decide to re-enable telemetry later, you can do so with the following CLI command:
+
+ ```sh
+ encore telemetry enable
+ ```
+
+## Debugging Telemetry
+
+For users who want more visibility into what telemetry data is being sent, you can enable debug mode:
+
+1. **Setting Debug Mode**: Enable debug mode by setting the `ENCORE_TELEMETRY_DEBUG` environment variable.
+
+ ```sh
+ export ENCORE_TELEMETRY_DEBUG=1
+ ```
+
+2. **Log Statements**: When debug mode is enabled, a log statement prepended by `[telemetry]` will be printed every time telemetry data is sent.
+
+## Conclusion
+
+Telemetry is a vital tool for improving Encore, but we respect your choice regarding data sharing. With easy-to-use commands and environment variables, you can manage your telemetry settings as you see fit. If you have any further questions or need assistance, please refer to our support documentation or contact our support team.
+
+Thank you for helping us make Encore better!
diff --git a/docs/go/community/contribute.md b/docs/go/community/contribute.md
new file mode 100644
index 0000000000..4bb7f7eebb
--- /dev/null
+++ b/docs/go/community/contribute.md
@@ -0,0 +1,36 @@
+---
+seotitle: How to contribute to Encore Open Source Project
+seodesc: Learn how to contribute to the Encore Open Source project by submitting pull requests, reporting bugs, or contributing documentation or example projects.
+title: Ways to contribute
+subtitle: Guidelines for contributing to Encore
+lang: go
+---
+
+We’re so excited that you are interested in contributing to Encore! All contributions are welcome, and there are several valuable ways to contribute.
+
+### Open Source Project
+
+If you want to contribute to the Encore Open Source project, you can submit a pull request on [GitHub](https://github.com/encoredev/encore/pulls).
+
+### Report issues
+
+If you have run into an issue or think you’ve found a bug, please report it via the [issue tracker](https://github.com/encoredev/encore/issues).
+
+### Add or update docs
+
+If there’s something you think would be helpful to add to the docs or if there’s something that seems out of date, we appreciate your input.
+You can view the docs and contribute fixes or improvements directly in [GitHub](https://github.com/encoredev/encore/tree/main/docs).
+
+You can also email your feedback to us at [hello@encore.dev](mailto:hello@encore.dev).
+
+### Blog posts
+
+If you’ve built something cool using Encore, we’d really like you to talk about it! We love it when developers share their projects on blogs and on Twitter.
+
+Use the hashtag **#builtwithencore** and we’ll have an easier time finding your work. – We might also showcase it on the [Encore Twitter account](https://twitter.com/encoredotdev)!
+
+### Meetups & Workshops
+
+Organizing a meetup or workshop is a great way to connect with other developers using Encore. It can also be a great first step in trying out Encore for development in your company or other professional organization.
+
+If you want help with organizing or planning an event, please don’t hesitate to reach out to us via email at [hello@encore.dev](mailto:hello@encore.dev).
diff --git a/docs/go/community/get-involved.md b/docs/go/community/get-involved.md
new file mode 100644
index 0000000000..52da16e252
--- /dev/null
+++ b/docs/go/community/get-involved.md
@@ -0,0 +1,61 @@
+---
+seotitle: Encore's Open Source Developer Community
+seodesc: Learn how to engage in the Open Source Developer Community supporting Encore.
+title: Community
+subtitle: Join the most pioneering developer community!
+lang: go
+---
+
+Developers building with Encore are forward-thinkers, who are working on exciting and innovative applications.
+
+We rely on this group's feedback, and contributions to the Open Source project, to improve Encore for developers everywhere.
+Getting involved is a fantastic way of finding support and inspiration among peers.
+
+Everyone is welcome in the Encore community, and we hope you to get involved too!
+
+## Get involved
+
+There are many ways to get involved. Here's where you can start straight away.
+
+
+
+
+
+ Contribute on GitHub
+
+
+Use GitHub to report bugs, feedback on proposals, or contribute your ideas.
+
+
+
+
+
+ Join Discord
+
+
+Connect with fellow Encore developers, ask questions, or just hang out!
+
+
+
+
+
+ Follow on Twitter
+
+
+Follow Encore on Twitter to keep up with the latest. Share what you've built to help spread the word about the project.
+
+### Contribute to the project
+
+Want to make a contribution to Encore? Great, start by reading about the different [ways to contribute](/docs/go/community/contribute).
+
+### Feedback on the Roadmap
+
+[The Encore Roadmap](https://encore.dev/roadmap) is public. It's open to your comments, feature requests, and you can vote on existing entries.
+
+## Community Governance
+
+We recommend everyone read the [Community Principles](/docs/go/community/principles).
+
+If you need assistance, have concerns, or have questions for the Community team, please email us at [support@encore.dev](mailto:support@encore.dev).
diff --git a/docs/go/community/open-source.md b/docs/go/community/open-source.md
new file mode 100644
index 0000000000..8bb533a57c
--- /dev/null
+++ b/docs/go/community/open-source.md
@@ -0,0 +1,21 @@
+---
+seotitle: Encore is Open Source
+seodesc: We believe Open Source is key to a sustainable and prosperous technology community. Encore builds on Open Source software, and is itself Open Source.
+title: Open Source
+subtitle: Encore is Open Source Software
+lang: go
+---
+
+We believe Open Source is key to a long-term sustainable and prosperous technology community. Encore builds on Open Source software, and is largely Open Source itself.
+
+## License
+
+Encore's Backend Framework, parser, and compiler are Open Source under Mozilla Public License 2.0.
+
+> The MPL is a simple copyleft license. The MPL's "file-level" copyleft is designed to encourage contributors to share modifications they make to your code, while still allowing them to combine your code with code under other licenses (open or proprietary) with minimal restrictions.
+
+You can learn more about MPL 2.0 on [the official website](https://www.mozilla.org/en-US/MPL/2.0/FAQ/).
+
+## Contribute
+
+Contributions to improve Encore are very welcome. Contribute to Encore on [GitHub](https://github.com/encoredev/encore).
diff --git a/docs/go/community/principles.md b/docs/go/community/principles.md
new file mode 100644
index 0000000000..4039af88e4
--- /dev/null
+++ b/docs/go/community/principles.md
@@ -0,0 +1,15 @@
+---
+seotitle: Encore Community Principles
+seodesc: Everyone is welcome in the Encore community, and we want everyone to feel at home and free to contribute.
+title: Community principles
+subtitle: Everyone belongs in the Encore community
+lang: go
+---
+
+Everyone is welcome in the Encore community, and it is of utmost importance to us that everyone is able to feel at home and contribute.
+
+Therefore we as maintainers, and you as a contributor, must pledge to make participation in our community a harassment-free experience for everyone, regardless of: age, body size, disability, ethnicity, gender identity, level of experience, nationality, personal appearance, race, religion, or sexual identity.
+
+### Code of Conduct
+
+To this end, the Encore community is guided by the [Contributor Covenant 2.0 Code of Conduct](https://www.contributor-covenant.org/version/2/0/code_of_conduct/) to ensure everyone is welcome and able to participate.
\ No newline at end of file
diff --git a/docs/go/community/submit-template.md b/docs/go/community/submit-template.md
new file mode 100644
index 0000000000..583372d7e0
--- /dev/null
+++ b/docs/go/community/submit-template.md
@@ -0,0 +1,49 @@
+---
+seotitle: Submit a Template to Encore's Templates repo
+seodesc: Learn how to contribute to Encore's Templates repository and get features in the Encore Templates marketplace.
+title: Submit a Template
+subtitle: Your contributions help other developers build
+lang: go
+---
+
+[Templates](/templates) help and inspire developers to build applications using Encore.
+
+You are welcome to contribute your own templates!
+
+Two types of templates that are especially useful:
+- **Starters:** Runnable Encore applications for others to use as is, or take inspiration from.
+- **Bits:** Re-usable code samples to solve common development patterns or integrate Encore applications with third-party APIs and services.
+
+## Submit your contribution
+
+Contribute a template by submitting a Pull Request to the [Open Source Examples Repo](https://github.com/encoredev/examples): `https://github.com/encoredev/examples`
+
+### Submitting Starters
+
+Follow these steps to submit a **Starter**:
+
+1. Fork the repo.
+2. Create a new folder in the root directory of the repo, this is where you will place your template. — Use a short folder name as your template will be installable via the CLI, like so: `encore app create APP-NAME --example=`
+3. Include a `README.md` with instructions for how to use the template. We recommend following [this format](https://github.com/encoredev/examples/blob/8c7e33243f6bfb1b2654839e996e9a924dcd309e/uptime/README.md).
+
+Once your Pull Request has been approved, it may be featured on the [Templates page](/templates) on the Encore website.
+
+### Submitting Bits
+
+Follow these steps to submit your **Bits**:
+
+1. Fork the repo.
+2. Create a new folder inside the `bits` folder in the repo and place your template inside it. Use a short folder name as your template will soon be installable via the CLI.
+3. Include a `README.md` with instructions for how to use the template.
+
+Once your Pull Request has been approved, it may be featured on the [Templates page](/templates) on the Encore website.
+
+## Contribute from your own repo
+
+If you don't want to contribute code to the examples repo, but still want to be featured on the [Templates page](/templates), please contact us at [hello@encore.dev](mailto:hello@encore.dev).
+
+## Dynamic Encore AppID
+
+In most cases, you should avoid hardcoding an `AppID` in your template's source code. Instead, use the notation `{{ENCORE_APP_ID}}`.
+
+When a developer creates an app using the template, `{{ENCORE_APP_ID}}` will be dymically replaced with their new and unique `AppID`, meaning they will not need to make any manual code adjustments.
diff --git a/docs/go/concepts/application-model.md b/docs/go/concepts/application-model.md
new file mode 100644
index 0000000000..1071d60c78
--- /dev/null
+++ b/docs/go/concepts/application-model.md
@@ -0,0 +1,25 @@
+---
+seotitle: Encore Application Model
+seodesc: How Encore understands your application using static analysis
+title: Encore Application Model
+subtitle: How Encore understands your application
+lang: go
+---
+
+Encore works by using static analysis to understand your application. This is a fancy term for parsing and analyzing the code you write and creating a graph of how your application works. This graph closely represents your own mental model of the system: boxes and arrows that represent systems and services that communicate with other systems, pass data and connect to infrastructure. We call it the Encore Application Model.
+
+Because the Open Source framework, parser, and compiler, are all designed together, Encore can ensure 100% accuracy when creating the application model. Any deviation is caught as a compilation error.
+
+Using this model, Encore can provide tools to solve problems that normally would be up to the developer to do manually. From creating architecture diagrams and API documentation to provisioning cloud infrastructure.
+
+We're continuously expanding on Encore's capabilities and are building a new generation of developer tools that are enabled by Encore's understanding of your application.
+
+The framework, parser, and compiler that enable this are all [Open Source](https://github.com/encoredev/encore).
+
+
+
+## Standardization brings clarity
+
+Developers make dozens of decisions when creating a backend application. Deciding how to structure the codebase, defining API schemas, picking underlying infrastructure, etc. The decisions often come down to personal preferences, not technical rationale. This creates a huge problem in the form of fragmentation! When every stack looks different, all tools have to be general purpose.
+
+When you adopt Encore, many of these stylistic decisions are already made for you. The Encore framework ensures your application follows modern best practices. And when you run your application, Encore's Open Source parser and compiler check that you're sticking to the standard. This means you're free to focus your energy on what matters: writing your application's business logic.
\ No newline at end of file
diff --git a/docs/go/concepts/benefits.md b/docs/go/concepts/benefits.md
new file mode 100644
index 0000000000..6a7910affe
--- /dev/null
+++ b/docs/go/concepts/benefits.md
@@ -0,0 +1,29 @@
+---
+seotitle: Benefits of using Encore.go
+seodesc: See how Encore.go helps you build backends faster using Go.
+title: Encore.go Benefits
+subtitle: How Encore.go helps you build robust distributed systems, faster.
+lang: go
+---
+
+Using Encore.go to declare infrastructure in application code helps unlock several benefits:
+
+- **Local development with instant infrastructure**: Encore.go automatically sets up necessary infrastructure as you develop.
+- **Rapid feedback**: Catch issues early with type-safe infrastructure, avoiding slow deployment cycles.
+- **No manual configuration required**: No need for Infrastructure-as-Code. Your code is the single source of truth.
+- **Unified codebase**: One codebase for all environments; local, preview, and cloud.
+- **Cloud-agnostic by default**: Encore.go provides an abstraction layer on top of the cloud provider's APIs, so you avoid becoming locked in to a single cloud.
+- **Evolve infrastructure without code changes**: As requirements evolve, you can change the provisioned infrastructure without making code changes, you only need to change the infrastructure configuration which is separate from the application code.
+
+## No DevOps experience required
+
+Encore provides open source tools to help you integrate with your cloud infrastructure, enabling you to self-host your application anywhere to supports Docker containers.
+Learn more in the [self-host documentation](/docs/go/self-host/docker-build).
+
+You can also use [Encore Cloud](https://encore.dev/use-cases/devops-automation), which fully automates provisioning and managing infrastructure in your own cloud on AWS and GCP.
+
+This approach dramatically reduces the level of DevOps expertise required to use scalable, production-ready, cloud services like Kubernetes and Pub/Sub. And because your application code is the source of truth for infrastructure requirements, it ensures the infrastructure in all your environments are always in sync with the application's requirements.
+
+## Simplicity without giving up flexibility
+
+Encore.go provides integrations for common infrastructure primitives, but also allows for flexibility. You can always use any cloud infrastructure, even if it's not built into Encore.go. If you use Encore's [Cloud Platform](https://encore.dev/use-cases/devops-automation), it [automates infrastructure](/docs/platform/infrastructure/infra) using your own cloud account, so you always have full access to your services from the cloud provider's console.
diff --git a/docs/go/develop/api-docs.md b/docs/go/develop/api-docs.md
new file mode 100644
index 0000000000..9b2849106b
--- /dev/null
+++ b/docs/go/develop/api-docs.md
@@ -0,0 +1,16 @@
+---
+seotitle: Service Catalog & Generated API Docs
+seodesc: See how Encore automatically generates API documentation that always stays up to date and in sync.
+title: Service Catalog
+subtitle: Automatically get a Service Catalog and complete API docs
+---
+
+All developers agree API documentation is great to have, but the effort of maintaining it inevitably leads to docs becoming stale and out of date.
+
+To solve this, Encore uses the [Encore Application Model](/docs/go/concepts/application-model) to automatically generate a Service Catalog along with complete documentation for all APIs. This ensures docs are always up-to-date as your APIs evolve.
+
+The API docs are available both in your [Local Development Dashboard](/docs/go/observability/dev-dash) and for your whole team in the [Encore Cloud dashboard](https://app.encore.cloud).
+
+
+
+
diff --git a/docs/go/develop/auth.md b/docs/go/develop/auth.md
new file mode 100644
index 0000000000..5b2c93ff80
--- /dev/null
+++ b/docs/go/develop/auth.md
@@ -0,0 +1,221 @@
+---
+seotitle: Adding authentication to APIs to auth users
+seodesc: Learn how to add authentication to your APIs and make sure you know who's calling your backend APIs.
+title: Authenticating users
+subtitle: Knowing what's what and who's who
+infobox: {
+ title: "Authentication",
+ import: "encore.dev/beta/auth",
+}
+lang: go
+---
+Almost every application needs to know who's calling it, whether the user
+represents a person in a consumer-facing app or an organization in a B2B app.
+Encore supports both use cases in a simple yet powerful way.
+
+As described in the docs for [defining APIs](/docs/go/primitives/defining-apis), Encore offers three access levels
+for APIs:
+
+* `//encore:api public` – defines a public API that anybody on the internet can call.
+* `//encore:api private` – defines a private API that is never accessible to the outside world. It can only be called from other services in your app and via cron jobs.
+* `//encore:api auth` – defines a public API that anybody can call, but that requires valid authentication.
+
+When an API is defined with access level `auth`, outside calls to that API must specify
+an authorization header, in the form `Authorization: Bearer `. The token is passed to
+a designated auth handler function and the API call is allowed to go through only if the
+auth handler determines the token is valid.
+
+For more advanced use cases you can also customize the authentication information you want.
+See the section on [accepting structured auth information](#accepting-structured-auth-information) below.
+
+
+
+
+You can optionally send in auth data to `public` and `private` APIs, in which case the auth handler will be used. When used for `private` APIs, they are still not accessible from the outside world.
+
+
+
+## The auth handler
+
+Encore applications can designate a special function to handle authentication,
+by defining a function and annotating it with `//encore:authhandler`. This annotation
+tells Encore to run the function whenever an incoming API call contains authentication data.
+
+The auth handler is responsible for validating the incoming authentication data
+and returning an `auth.UID` (a string type representing a **user id**). The `auth.UID`
+can be whatever you wish, but in practice it usually maps directly to the primary key
+stored in a user table (either defined in the Encore service or in an external service like [Firebase](/docs/go/how-to/firebase-auth) or [Auth0](/docs/go/how-to/auth0-auth)).
+
+### With custom user data
+
+Oftentimes it's convenient for the rest of your application to easily be able to look up
+information about the authenticated user making the request. If that's the case,
+define the auth handler like so:
+
+```go
+import "encore.dev/beta/auth"
+
+// Data can be named whatever you prefer (but must be exported).
+type Data struct {
+ Username string
+ // ...
+}
+
+// AuthHandler can be named whatever you prefer (but must be exported).
+//encore:authhandler
+func AuthHandler(ctx context.Context, token string) (auth.UID, *Data, error) {
+ // Validate the token and look up the user id and user data,
+ // for example by calling Firebase Auth.
+}
+```
+
+
+
+
+
+### Without custom user data
+
+When you don't require custom user data and it's sufficient to use `auth.UID`,
+simply skip it in the return type:
+
+```go
+import "encore.dev/beta/auth"
+
+// AuthHandler can be named whatever you prefer (but must be exported).
+//encore:authhandler
+func AuthHandler(ctx context.Context, token string) (auth.UID, error) {
+ // Validate the token and look up the user id,
+ // for example by calling Firebase Auth.
+}
+```
+
+## Accepting structured auth information
+
+In the examples above the function accepts a `Bearer` token as a string argument.
+In that case Encore parses the `Authorization` HTTP header and passes the token to the auth handler.
+
+In cases where you have different or more complex authorization requirements, you can instead specify
+a data structure that specifies one or more fields to be parsed from the HTTP request. For example:
+
+```go
+type MyAuthParams struct {
+ // SessionCookie is set to the value of the "session" cookie.
+ // If the cookie is not set it's nil.
+ SessionCookie *http.Cookie `cookie:"session"`
+
+ // ClientID is the unique id of the client, sourced from the URL query string.
+ ClientID string `query:"client_id"`
+
+ // Authorization is the raw value of the "Authorization" header
+ // without any parsing.
+ Authorization string `header:"Authorization"`
+}
+
+//encore:authhandler
+func AuthHandler(ctx context.Context, p *MyAuthParams) (auth.UID, error) {
+ // ...
+}
+```
+
+This example tells Encore that the application accepts authentication information via
+the `session` cookie, the `client_id` query string parameter, and the `Authorization` header.
+These fields are automatically filled in when the auth handler is called (if present in the request).
+
+You can of course combine auth params like this with custom user data (see the section above).
+
+
+
+Cookies are generally only used by browsers and are automatically added to requests made by browsers.
+As a result Encore does not include cookie fields in generated clients' authentication payloads
+or in the [Local Development Dashboard](/docs/go/observability/dev-dash).
+
+
+
+## Handling auth errors
+
+When a token doesn't match your auth rules (for example if it's expired, the token has been revoked, or the token is invalid), you should return a non-nil error from the auth handler.
+
+Encore passes the error message on to the user when you use [Encore's built-in error package](/docs/go/primitives/api-errors), so we recommend using that with the error code `Unauthenticated` to communicate what happened. For example:
+
+```go
+import "encore.dev/beta/errs"
+
+//encore:authhandler
+func AuthHandler(ctx context.Context, token string) (auth.UID, error) {
+ return "", &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+}
+```
+
+
+
+Note that for security reasons you may not want to reveal too much information about why a request did not pass your auth checks. There are many subtle security considerations when dealing with authentication and we don't have time to go into all of them here.
+
+Whenever possible we recommend using a third-party auth provider instead of rolling your own authentication.
+
+
+
+## Using auth data
+
+Once the user has been identified by the auth handler, the API handler is called
+as usual. If it wishes to inspect the authenticated user, it can use the
+`encore.dev/beta/auth` package:
+
+- `auth.Data()` returns the custom user data returned by the auth handler (if any)
+- `auth.UserID()` returns `(auth.UID, bool)` to get the authenticated user id (if any)
+
+For an incoming request from the outside to an API that uses the `auth` access level,
+these are guaranteed to be set since the API won't be called if the auth handler doesn't succeed.
+
+Encore automatically propagates the auth data when you make API calls to other Encore API endpoints.
+
+
+
+If an endpoint calls another endpoint during its processing, and the original
+does not have an authenticated user, the request will fail. This behavior
+preserves the guarantees that `auth` endpoints always have an authenticated user.
+
+
+
+
+## Optional authentication
+
+While Encore always calls the auth handler for API endpoints marked as `auth`, you can also call `public` API endpoints with authentication data.
+
+This can be useful for APIs that support both a "logged in" and "logged out" experience.
+For example, a site like Reddit might have a `post.List` endpoint that returns the list of posts,
+but if you're logged in it also includes whether or not you have upvoted or downvoted each post.
+
+To support such use cases, Encore runs the auth handler for `public` API endpoints if (and only if) the request
+includes any authentication information (such as the `Authorization` header).
+
+In that case, the request processing behavior varies depending on the value of the `error` returned from the auth handler:
+
+* If the error is nil, the request is considered to be an authenticated request and `auth.UID()` and `auth.Data()` will include
+ the information the auth handler returned.
+* If the error is non-nil and the error code is `errs.Unauthenticated` (like shown above), the request continues as an unauthenticated request,
+ behaving exactly as if there was no authentication data provided at all.
+* If the error is non-nil and the error code is anything else, the request is aborted and Encore returns that error to the caller.
+
+To be able to determine if the request has an authenticated user, check the second return value from `auth.UserID()`.
+
+## Overriding auth information
+
+Encore supports overriding the auth information for an outgoing request using the
+[`auth.WithContext`](https://pkg.go.dev/encore.dev/beta/auth#WithContext) function.
+This function returns a new context with the auth information set to the specified values.
+
+Note that this only affects the auth information passed along with the request, and not the
+current request being processed (if any).
+
+This function is often useful when testing APIs that use authentication. For example:
+
+```go
+ctx := auth.WithContext(context.Background(), auth.UID("my-user-id"), &MyAuthData{Email: "hello@example.com"})
+// ... Make an API call using `ctx` to override the auth information for that API call.
+```
diff --git a/docs/go/develop/config.md b/docs/go/develop/config.md
new file mode 100644
index 0000000000..a6976ac57a
--- /dev/null
+++ b/docs/go/develop/config.md
@@ -0,0 +1,440 @@
+---
+seotitle: Configuration for environment specific changes
+seodesc: See how you can use configuration to define different behavior in each environment. Making it simpler to develop and test your backend application.
+title: Configuration
+subtitle: Define behavior in specific environments
+infobox: {
+ title: "Configuration",
+ import: "encore.dev/config",
+}
+lang: go
+---
+
+Configuration files let you define default behavior for your application, and override it for specific environments. This allows you to make changes without affecting deployments in other environments.
+
+Encore supports configuration files written in [CUE](https://cuelang.org/), which is a superset of JSON. It adds the following:
+- C-style comments
+- Quotes may be omitted from field names without special characters
+- Commas at the end of fields are optional
+- A comma after last element in list is allowed
+- The outer curly braces on the file are optional
+- [Expressions](https://cuelang.org/docs/tutorials/tour/expressions/) such as interpolation, comprehensions and conditionals
+ are supported.
+
+
+
+For sensitive data use Encore's [secrets management](/docs/go/primitives/secrets) functionality instead of configuration.
+
+
+
+## Using Config
+
+Inside your service, you can call `config.Load[*SomeConfigType]()`
+to load the config. This must be done at the package level, and not inside a function. See more in the [package documentation](https://pkg.go.dev/encore.dev/config#Load).
+
+Here's an example implementation:
+
+```go
+package mysvc
+
+import (
+ "encore.dev/config"
+)
+
+type SomeConfigType struct {
+ ReadOnly config.Bool // Put the system into read-only mode
+ Example config.String
+}
+
+var cfg *SomeConfigType = config.Load[*SomeConfigType]()
+```
+
+The type you pass as a type parameter to this function will be used to generate a `encore.gen.cue` file in your services
+directory. This file will contain both the CUE definition for your configuration type, and some [metadata](#provided-meta-values) that Encore will
+provide to your service at runtime. This allows you to change the final value of your configuration based on the environment the
+application is running in.
+
+Any files ending with `.cue` in your service directory or sub-directories will be loaded by Encore and given to CUE to
+unify and compute a final configuration.
+
+
+
+```
+-- mysvc/encore.gen.cue --
+// Code generated by encore. DO NOT EDIT.
+package mysvc
+
+#Meta: {
+ APIBaseURL: string
+ Environment: {
+ Name: string
+ Type: "production" | "development" | "ephemeral" | "test"
+ Cloud: "aws" | "gcp" | "encore" | "local"
+ }
+}
+
+#Config: {
+ ReadOnly: bool // Put the system into read-only mode
+ Example: string
+}
+#Config
+-- mysvc/myconfig.cue --
+// Set example to "hello world"
+Example: "hello world"
+
+// By default we're not in read only mode
+ReadOnly: bool | *false
+
+// But on the old production environment, we're in read only mode
+if #Meta.Environment.Name == "old-prod" {
+ ReadOnly: true
+}
+```
+
+
+
+
+
+Loading configuration
+is only supported in services and the loaded data can not be referenced from packages outside that service.
+
+
+
+### CUE tags in Go Structs
+
+You can use the `cue` tag in your Go to specify additional constraints on your configuration. For example:
+
+```go
+type FooBar {
+ A int `cue:">100"`
+ B int `cue:"A-50"` // If A is set, B can be inferred by CUE
+ C int `cue:"A+B"` // Which then allows CUE to infer this too
+}
+
+var _ = config.Load[*FooBar]()
+```
+
+Will result in the following CUE type definition being generated:
+
+```cue
+#Config: {
+ A: int & >100
+ B: int & A-50 // If A is set, B can be inferred by CUE
+ C: int & A+B // Which then allows CUE to infer this too
+}
+```
+
+## Config Wrappers
+
+Encore provides type wrappers for config in the form of `config.Value[T]` and `config.Values[T]` which expand into
+functions of type `T` and `[]T` respectively. These functions allow you to override the default value of your
+configuration in your CUE files inside tests, where only code run from that test will see the override.
+
+In the future we plan to support real-time updating of configuration values on running applications, thus using
+these wrappers in your configuration today will future proof your code and allow you to automatically take advantage of this feature when it is
+available.
+
+Any type supported in API requests and responses can be used as the type for a config wrapper. However for convenience, Encore ships with the following inbuilt aliases for the config wrappers:
+
+ - `config.String`, `config.Bool`, `config.Int`, `config.Uint`,
+ `config.Int8`, `config.Int16`, `config.Int32`, `config.In64`,
+ `config.Uint8`, `config.Uint16`, `config.Uint32`, `config.Uint64`,
+ `config.Float32`, `config.Float64`, `config.Bytes`, `config.Time`, `config.UUID`
+
+
+
+```go
+-- svc/svc.go --
+type mysvc
+
+import (
+ "encore.dev/config"
+)
+
+type Server struct {
+ // The config wrappers do not have to be in the top level struct
+ Enabled config.Bool
+ Port config.Int
+}
+
+type SvcConfig struct {
+ GameServerPorts config.Values[Server]
+}
+
+var cfg = config.Load[*SvcConfig]()
+
+func startServers() {
+ for _, server := range cfg.GameServerPorts() {
+ if server.Enabled() {
+ go startServer(server.Port())
+ }
+ }
+}
+
+func startServer(port int) {
+ // ...
+}
+-- svc/servers.cue --
+GameServerPorts: [
+ {
+ Enabled: false
+ Port: 12345
+ },
+ {
+ Enabled: true
+ Port: 1337
+ },
+]
+```
+
+
+
+
+## Provided Meta Values
+
+When your application is running, Encore will provide information about that environment to your CUE files, which you
+can use to filter on. These fields can be found in the `encore.gen.cue` file which Encore will generate when you add a
+call to load config. Encore provides the following meta values:
+
+- **APIBaseURL**: The base URL of the Encore API, which can be used to make API calls to the application.
+- **Environment**: A struct containing information about the environment the application is running in.
+ **Name**: The name of the environment
+ **Type**: One of `production`, `development`, `ephemeral` or `test`.
+ **Cloud**: The cloud the app is running on, which is one of `aws`, `gcp`, `encore` or `local`.
+
+The following are useful conditionals you can use in your CUE files:
+
+```cue
+// An application running due to `encore run`
+if #Meta.Environment.Type == "development" && #Meta.Environment.Cloud == "local" {}
+
+// An application running in a development environment in the Cloud
+if #Meta.Environment.Type == "development" && #Meta.Environment.Cloud != "local" {}
+
+// An application running in a production environment
+if #Meta.Environment.Type == "production" {}
+
+// An application running in an environment that Encore has created
+// for an open Pull Request on Github
+if #Meta.Environment.Type == "ephemeral" {}
+```
+
+## Testing with Config
+
+Through the provided meta values, your applications configuration can have different values in tests, compared to
+when the application is running. This can be useful to prevent external side effects from your tests, such as emailing
+customers across all test.
+
+Sometimes however, you may want to test specific behaviors based on different configurations (such as disabling user signups),
+in this scenario using the Meta data does not give you fine enough control. To allow you to set a configuration value
+at a per test level, Encore provides the helper function [`et.SetCfg`](https://pkg.go.dev/encore.dev/et#SetCfg). You can
+use this function to set a new value only in the current test and any sub tests, while all other tests will
+continue to use the value defined in the CUE files.
+
+```go
+-- config.cue --
+// By default we want to send emails
+SendEmails: bool | *true
+
+// But in all tests we want to disable emails
+if #Meta.Environment.Type == "test" {
+ SendEmails: false
+}
+-- signup.go --
+import (
+ "context"
+
+ "encore.dev/config"
+)
+
+type Config struct {
+ SendEmails config.Bool
+}
+
+var cfg = config.Load[Config]()
+
+//encore:api public
+func Signup(ctx context.Context, p *SignupParams) error {
+ user := createUser(p)
+
+ if cfg.SendEmails() {
+ SendWelcomeEmail(user)
+ }
+
+ return nil
+}
+-- signup_test.go --
+import (
+ "errors"
+ "testing"
+
+ "encore.dev/et"
+)
+
+func TestSignup(t *testing.T) {
+ err := Signup(context.Background(), &SignupParams { ... })
+ if err != nil {
+ // We don't expect an error here
+ t.Fatal(err)
+ }
+
+ if emailWasSent() {
+ // We don't expect an email to be sent
+ // as it's disabled for all tests
+ t.Fatal("email was sent")
+ }
+}
+
+func TestSignup_TestEmails(t *testing.T) {
+ // For this test, we want to enable the welcome
+ // emails so we can test that they are sent
+ et.SetCfg(cfg.SendEmails, true)
+
+ err := Signup(context.Background(), &SignupParams { ... })
+ if err != nil {
+ // We don't expect an error here
+ t.Fatal(err)
+ }
+
+ // Check the email was sent
+ if !emailWasSent() {
+ t.Fatal("email was not sent")
+ }
+}
+```
+
+## Useful CUE Patterns
+
+If you're new to CUE, we'd recommend checking out the [CUE documentation](https://cuelang.org/docs/) and
+[cuetorials](https://cuetorials.com/), however to get you started, here are some useful patterns you can use in your
+CUE files.
+
+
+
+
+### Defaults
+
+CUE supports the concept of a default value, which it will use if no other concrete value is provided. This can be useful
+for when you normally want one value, but occasionally might want to provide an override in a certain scenario. A default
+value is specified by prefixing it with a `*`.
+
+```cue
+// ReadOnlyMode is a boolean and if we don't provide a value, it
+// will default to false.
+ReadOnlyMode: bool | *false
+
+if #Meta.Environment.Name == "old-prod" {
+ // On this environment, we want to set ReadOnlyMode to true
+ ReadOnlyMode: true
+}
+```
+
+
+
+
+
+### Validation within CUE
+
+Any field prefixed with an `_` will not be exported to the concrete configuration once evaluated by CUE and can be used
+to hold intermediate values. Because CUE allows you to define the same field as many times as you want, as long as the
+values unify, we can build complex validation logic.
+
+```cue
+import (
+ "list" // import CUE's list package
+)
+
+// Set some port numbers defaulting just to 8080
+// but in development including 8443
+portNumbers: [...int] | *[8080]
+if #Meta.Environment.Type == "development" {
+ portNumbers: [8080, 8443]
+}
+
+// Port numbers must be an array and all values
+// are integers 1024 or above.
+portNumbers: [...int & >= 1024]
+
+// The ports are considered valid if they contain the port number 8080.
+_portsAreValid: list.Contains(portNumbers, 8080)
+
+// Ensure that the ports are valid by constraining the value to be true.
+// CUE will report an error if the value is false (that is if the portNumbers list
+// does not contain the value 8080).
+_portsAreValid: true
+```
+
+
+
+
+
+### Switch Statements
+
+If statements in CUE do not have else branches, which can make it difficult to write complex conditionals, we however
+can use an array to emulate a switch statement, where the first value that matches the condition is returned. The following
+example will set `SendEmailsFrom` to a single string.
+
+```cue
+SendEmailsFrom: [
+ // These act as individual case statements
+ if #Meta.Environment.Type == "production" { "noreply@example.com" },
+ if #Meta.Environment.Name == "staging" { "staging@example.com" },
+
+ // This last value without a condition acts as the default case
+ "dev-system@example.dev",
+][0] // Return the first value which matches the condition
+```
+
+
+
+
+
+### Using Map Keys as Values
+
+CUE allows us to extract map keys and use them as values to simplify the config we need to write and minimize duplication.
+
+```cue
+// Define the type we want to use
+#Server: {
+ server: string
+ port: int & > 1024
+ enabled: bool | *true
+}
+
+// Specify that servers is a map of strings to #Server
+// where they key we assign the variable Name
+servers: [Name=string]: #Server & {
+ // Then we union the key with the value of server
+ server: Name
+}
+
+servers: {
+ "Foo": {
+ port: 8080
+ },
+ "Bar": {
+ port: 8081
+ enabled: false
+ },
+}
+```
+
+This will result in the concrete configuration of:
+```json
+{
+ "servers": {
+ "Foo": {
+ "server": "Foo",
+ "port": 8080,
+ "enabled": true
+ },
+ "Bar": {
+ "server": "Bar",
+ "port": 8081,
+ "enabled": false
+ }
+ }
+}
+```
+
+
diff --git a/docs/go/develop/cors.md b/docs/go/develop/cors.md
new file mode 100644
index 0000000000..c93501bb9d
--- /dev/null
+++ b/docs/go/develop/cors.md
@@ -0,0 +1,81 @@
+---
+seotitle: Handling CORS (Cross-Origin Resource Sharing)
+seodesc: See how you can configure CORS for your Encore application.
+title: CORS
+subtitle: Configure CORS (Cross-Origin Resource Sharing) for your Encore application
+lang: go
+---
+
+CORS is a web security concept that defines which website origins are allowed to access your API.
+
+A deep-dive into CORS is out of scope for this documentation, but [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS)
+provides a good overview. In short, CORS affects requests made by browsers to resources hosted on
+other origins (a combination of the scheme, domain, and port).
+
+## Configuring CORS
+
+Encore provides a default CORS configuration that is suitable for many APIs. You can override these settings
+by specifying the `global_cors` key in the `encore.app` file, which has the following
+structure:
+
+```cue
+{
+ // debug enables CORS debug logging.
+ "debug": true | false,
+
+ // allow_headers allows an app to specify additional headers that should be
+ // accepted by the app.
+ //
+ // If the list contains "*", then all headers are allowed.
+ "allow_headers": [...string],
+
+ // expose_headers allows an app to specify additional headers that should be
+ // exposed from the app, beyond the default set always recognized by Encore.
+ //
+ // If the list contains "*", then all headers are exposed.
+ "expose_headers": [...string],
+
+ // allow_origins_without_credentials specifies the allowed origins for requests
+ // that don't include credentials. If nil it defaults to allowing all domains
+ // (equivalent to ["*"]).
+ "allow_origins_without_credentials": [...string],
+
+ // allow_origins_with_credentials specifies the allowed origins for requests
+ // that include credentials. If a request is made from an Origin in this list
+ // Encore responds with Access-Control-Allow-Origin: .
+ //
+ // The URLs in this list may include wildcards (e.g. "https://*.example.com"
+ // or "https://*-myapp.example.com").
+ "allow_origins_with_credentials": [...string],
+}
+```
+
+## Allowed origins
+
+The main CORS configuration is the list of allowed origins, meaning which websites are allowed
+to access your API (via browsers).
+
+For this purpose, CORS makes a distinction between requests that contain authentication information
+(cookies, HTTP authentication, or client certificates) and those that do not. CORS applies stricter
+rules to authenticated requests.
+
+By default, Encore allows unauthenticated requests from all origins but disallows requests that do
+include authorization information from other origins. This is a good default for many APIs.
+This can be changed by setting the `allow_origins_without_credentials` key (see above).
+For convenience Encore also allows all origins when developing locally.
+
+For security reasons it's necessary to explicitly specify which origins are allowed to make
+authenticated requests. This is done by setting the `allow_origins_with_credentials` key (see above).
+
+## Allowed headers and exposed headers
+
+CORS also lets you specify which headers are allowed to be sent by the client ("allowed headers"),
+and which headers are exposed to scripts running in the browser ("exposed headers").
+
+Encore automatically configures headers by parsing your program using static analysis.
+If your API defines a request or response type that contains a header field, Encore automatically adds the header to
+the list of exposed and allowed headers in request types respectively.
+
+To add additional headers to these lists, you can set the `allow_headers` and `expose_headers` keys (see above).
+This can be useful when your application relies on custom headers in e.g. raw endpoints that aren't seen by Encore's
+static analysis.
diff --git a/docs/go/develop/metadata.md b/docs/go/develop/metadata.md
new file mode 100644
index 0000000000..7dc1dae42a
--- /dev/null
+++ b/docs/go/develop/metadata.md
@@ -0,0 +1,110 @@
+---
+seotitle: Metadata API – Get data about apps, envs, and requests
+seodesc: See how to use Encore's Metadata API to get information about specific apps, environments, and requests.
+title: Metadata
+subtitle: Use the metadata API to get specifics about apps, environments, and requests
+infobox: {
+ title: "Metadata API",
+ import: "encore.dev",
+}
+lang: go
+---
+
+While Encore tries to provide a cloud-agnostic environment, sometimes it's helpful to know more about the environment
+your application is running in. For this reason Encore provides an API for accessing metadata about the
+[application](#application-metadata) and the environment it's running in, as well as information about the
+[current request](#current-request) as part of the `encore.dev` package.
+
+## Application Metadata
+
+Calling `encore.Meta()` will return an [encore.AppMetadata](https://pkg.go.dev/encore.dev/#AppMetadata) instance which
+contains information about the application, including:
+
+ - `AppID` - the application name.
+ - `APIBaseURL` - the URL the application API can be publicly accessed on.
+ - `Environment` - the [environment](/docs/platform/deploy/environments) the application is currently running in.
+ - `Build` - the revision information of the build from the version control system.
+ - `Deploy` - the deployment ID and when this version of the app was deployed.
+
+## Current Request
+
+`encore.CurrentRequest()` can be called from anywhere within your application and will return an
+[encore.Request](https://pkg.go.dev/encore.dev/#Request) instance which will provides information about why the current
+code is running.
+
+The [encore.Request](https://pkg.go.dev/encore.dev/#Request) type contains information about the running request, such as:
+ - The service and endpoint being called
+ - Path and path parameter information
+ - When the request started
+
+This works automatically as a result of Encore's request tracking, and works even in other goroutines that were spawned
+during request handling. If no request is processed by the caller, which can happen if you call it during service
+initialization, the Type field returns None. If `CurrentRequest()` is called from a goroutine spawned during request
+processing it will continue to report the same request even if the request handler has already returned.
+
+This can be useful on [raw endpoints](/docs/go/primitives/raw-endpoints) with [path parameters](/docs/go/primitives/defining-apis#rest-apis)
+as the standard `http.Request` object passed into the raw endpoint does not provide access to the parsed path parameters,
+however by calling `encore.CurrentRequest().PathParams()` you can get access to the parsed path parameters.
+
+
+## Example Use Cases
+
+### Using Cloud Specific Services
+
+All the [clouds](/docs/platform/deploy/own-cloud) contain a large number of services, not all of which Encore natively supports.
+By using information about the [environment](/docs/platform/deploy/environments), you can define the implementation of these and
+use different services for each environment's provider. For instance if you are pushing audit logs into a data warehouse, when running on GCP you could use BigQuery, but when running on AWS you could use Redshift, when running locally you could
+simply write them to a file.
+
+```go
+package audit
+
+import (
+ "encore.dev"
+ "encore.dev/beta/auth"
+)
+
+func Audit(ctx context.Context, action message, user auth.UID) error {
+ switch encore.Meta().Environment.Cloud {
+ case encore.CloudAWS:
+ return writeIntoRedshift(ctx, action, user)
+ case encore.CloudGCP:
+ return writeIntoBigQuery(ctx, action, user)
+ case encore.CloudLocal:
+ return writeIntoFile(ctx, action, user)
+ default:
+ return fmt.Errorf("unknown cloud: %s", encore.Meta().Environment.Cloud)
+ }
+}
+```
+
+### Checking Environment type
+
+When implementing a signup system, you may want to skip email verification on user signups when developing the application.
+Using the `encore.Meta()` API, we can check the environment and decide whether to send an email or simply mark the user as
+verified upon signup.
+
+```go
+package user
+
+import "encore.dev"
+
+//encore:api public
+func Signup(ctx context.Context, params *SignupParams) (*SignupResponse, error) {
+ // ...
+
+ // If this is a testing environment, skip sending the verification email
+ switch encore.Meta().Environment.Type {
+ case encore.EnvTest, encore.EnvDevelopment:
+ if err := MarkEmailVerified(ctx, userID); err != nil {
+ return nil, err
+ }
+ default:
+ if err := SendVerificationEmail(ctx, userID); err != nil {
+ return nil, err
+ }
+ }
+
+ // ...
+}
+```
diff --git a/docs/go/develop/middleware.md b/docs/go/develop/middleware.md
new file mode 100644
index 0000000000..efd60ee4c6
--- /dev/null
+++ b/docs/go/develop/middleware.md
@@ -0,0 +1,141 @@
+---
+seotitle: Using Middleware in your backend application
+seodesc: See how you can use middleware in your backend application to handle cross-cutting generic functionality, like request logging, auth, or tracing.
+title: Middleware
+subtitle: Handling cross-cutting, generic functionality
+infobox: {
+ title: "Middleawre",
+ import: "encore.dev/middleware",
+}
+lang: go
+---
+
+Middleware is a way to write reusable code that runs before or after (or both)
+the handling of API requests, often across several (or all) API endpoints.
+
+It's commonly used to implement cross-cutting concerns like
+[request logging](/docs/go/observability/logging),
+[authentication](/docs/go/develop/auth),
+[tracing](/docs/go/observability/tracing),
+and so on. One of the benefits of Encore is that
+all of these use cases are already handled out-of-the-box, so there's no
+need to use middleware for those things.
+
+Nonetheless, there are several use cases where it can be useful to write
+reusable functionality that applies to multiple API endpoints, and middleware
+is a good solution in those cases.
+
+Encore provides built-in support for middleware by defining a function with the
+`//encore:middleware` directive. The middleware directive takes a `target`
+parameter that specifies which API endpoints it applies to.
+
+## Middleware functions
+
+A typical middleware implementation looks like this:
+
+```go
+import (
+ "encore.dev/beta/errs"
+ "encore.dev/middleware"
+)
+
+//encore:middleware global target=all
+func ValidationMiddleware(req middleware.Request, next middleware.Next) middleware.Response {
+ // If the payload has a Validate method, use it to validate the request.
+ payload := req.Data().Payload
+ if validator, ok := payload.(interface { Validate() error }); ok {
+ if err := validator.Validate(); err != nil {
+ // If the validation fails, return an InvalidArgument error.
+ err = errs.WrapCode(err, errs.InvalidArgument, "validation failed")
+ return middleware.Response{Err: err}
+ }
+ }
+ return next(req)
+}
+```
+
+Middleware forms a chain, allowing each middleware to introspect and process
+the incoming request before handing it off to the next middleware by calling the
+`next` function that's passed in as an argument. For the last middleware in the
+chain, calling `next` results in the actual API handler being called.
+
+The `req` parameter provides information about the incoming request
+(see [package docs](https://pkg.go.dev/encore.dev/middleware#Request)).
+
+The `next` function returns a [`middleware.Response`](https://pkg.go.dev/encore.dev/middleware#Response)
+object which contains the response from the API, describing whether there was an error, and on success
+the actual response payload.
+
+This enables middleware to also introspect and even
+modify the outgoing response, like this:
+
+```go
+//encore:middleware target=tag:cache
+func CachingMiddleware(req middleware.Request, next middleware.Next) middleware.Response {
+ data := req.Data()
+ // Check if we have the response cached. Use the request path as the cache key.
+ cacheKey := data.Path
+ if cached, err := loadFromCache(cacheKey, data.API.ResponseType); err == nil && cached != nil {
+ return middleware.Response{Payload: cached}
+ }
+ // Otherwise forward the request to the handler
+ return next(req)
+}
+```
+
+This uses `target=tag:cache` to have the middleware only apply to APIs that have
+that tag. More on this below in [Targeting APIs](#targeting-apis).
+
+
+
+Middleware functions can also be defined as methods on a Dependency Injection
+struct declared with `//encore:service`. For example:
+
+```go
+//encore:service
+type Service struct{}
+
+//encore:middleware target=all
+func (s *Service) MyMiddleware(req middleware.Request, next middleware.Next) middleware.Response {
+ // ...
+}
+```
+
+See the [Dependency Injection](/docs/go/how-to/dependency-injection) docs for more information.
+
+
+
+## Middleware ordering
+
+Middleware can either be defined inside a service, in which case it only runs
+for APIs within that service, or it can be defined as a `global` middleware,
+in which case it applies to all services. For global middleware the `target`
+directive still applies and enables you to easily match a subset of APIs.
+
+
+
+Global middleware always run before all service-specific middleware,
+and then run in the order they are defined in the source code based on
+file name lexicographic ordering.
+
+
+
+To avoid surprises it's best to define all middleware in a file called
+`middleware.go` in each service, and to create a single top-level package
+to contain all global middleware.
+
+## Targeting APIs
+
+The `target` directive can either be provided as `target=all` (meaning it applies
+to all APIs) or a list of tags, in the form `target=tag:foo,tag:bar`. Note that
+these tags are evaluated with `OR`, meaning the middleware applies to an API if
+the API has at least one of those tags.
+
+APIs can be defined with tags by adding `tag:foo` at the end of the `//encore:api` directive:
+
+```go
+//encore:api public method=GET path=/user/:id tag:cache
+func GetUser(ctx context.Context, id string) (*User, error) {
+ // ...
+}
+```
diff --git a/docs/go/develop/mocking.md b/docs/go/develop/mocking.md
new file mode 100644
index 0000000000..73687dc1c8
--- /dev/null
+++ b/docs/go/develop/mocking.md
@@ -0,0 +1,147 @@
+---
+seotitle: Mocking out your APIs and services for testing
+seodesc: Learn how to mock out your APIs and services for testing, and how to use the built-in mocking support in Encore.
+title: Mocking
+subtitle: Testing your application in isolation
+infobox: {
+ title: "Testing",
+ import: "encore.dev/et",
+}
+lang: go
+---
+
+Encore comes with built-in support for mocking out APIs and services, which makes it easier to test your application in
+isolation.
+
+## Mocking Endpoints
+
+Let's say you have an endpoint that calls an external API in our `products` service:
+
+```go
+//encore:api private
+func GetPrice(ctx context.Context, p *PriceParams) (*PriceResponse, error) {
+ // Call external API to get the price
+}
+```
+
+When testing this function, you don't want to call the real external API since that would be slow and cause your tests
+to fail if the API is down. Instead, you want to mock out the API call and return a fake response.
+
+In Encore, you can do this by adding a mock implementation of the endpoint using the `et.MockEndpoint` function inside your test:
+
+```go
+package shoppingcart
+
+import (
+ "context"
+ "testing"
+
+ "encore.dev/et" // Encore's test support package
+
+ "your_app/products"
+)
+
+
+func Test_Something(t *testing.T) {
+ t.Parallel() // Run this test in parallel with other tests without the mock implementation interfering
+
+ // Create a mock implementation of pricing API which will only impact this test and any sub-tests
+ et.MockEndpoint(products.GetPrice, func(ctx context.Context, p *products.PriceParams) (*products.PriceResponse, error) {
+ return &products.PriceResponse{Price: 100}, nil
+ })
+
+ // ... the rest of your test code here ...
+}
+```
+
+When any code within the test, or any sub-test calls the `GetPrice` API, the mock implementation will be called instead.
+The mock will not impact any other tests running in parallel. The function you pass to `et.MockEndpoint` must have the same
+signature as the real endpoint.
+
+If you want to mock out the API for all tests in the package, you can add the mock implementation to the `TestMain` function:
+
+```go
+package shoppingcart
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "encore.dev/et"
+
+ "your_app/products"
+)
+
+func TestMain(m *testing.M) {
+ // Create a mock implementation of pricing API which will impact all tests within this package
+ et.MockEndpoint(products.GetPrice, func(ctx context.Context, p *products.PriceParams) (*products.PriceResponse, error) {
+ return &products.PriceResponse{Price: 100}, nil
+ })
+
+ // Now run the tests
+ os.Exit(m.Run())
+}
+```
+
+Mocks can be changed at any time, including removing them by setting the mock implementation to `nil`.
+
+## Mocking services
+
+As well as mocking individual APIs, you can also mock entire services. This can be useful if you want to inject a different
+set of dependencies into your service for testing, or a service that your code depends on. This can be done using the
+`et.MockService` function:
+
+```go
+package shoppingcart
+
+import (
+ "context"
+ "testing"
+
+ "encore.dev/et" // Encore's test support package
+
+ "your_app/products"
+)
+
+func Test_Something(t *testing.T) {
+ t.Parallel() // Run this test in parallel with other tests without the mock implementation interfering
+
+ // Create a instance of the products service which will only impact this test and any sub-tests
+ et.MockService("products", &products.Service{
+ SomeField: "a testing value",
+ })
+
+ // ... the rest of your test code here ...
+}
+```
+
+When any code within the test, or any sub-test calls the `products` service, the mock implementation will be called instead.
+Unlike `et.MockEndpoint`, the mock implementation does not need to have the same signature, and can be any object. The only requirement
+is that any of the services APIs that are called during the test must be implemented by as a receiver method on the mock object.
+(This also includes APIs that are defined as package level functions in the service, and are not necessarily defined as receiver methods
+on that services struct).
+
+To help with compile time safety on service mocking, for every service Encore will automatically generate an `Interface` interface
+which contains all the APIs defined in the service. This interface can be passed as a generic argument to `et.MockService` to ensure
+that the mock object implements all the APIs defined in the service:
+
+```go
+type myMockObject struct{}
+
+func (m *myMockObject) GetPrice(ctx context.Context, p *products.PriceParams) (*products.PriceResponse, error) {
+ return &products.PriceResponse{Price: 100}, nil
+}
+
+func Test_Something(t *testing.T) {
+ t.Parallel() // Run this test in parallel with other tests without the mock implementation interfering
+
+ // This will cause a compile time error if myMockObject does not implement all the APIs defined in the products service
+ et.MockService[products.Interface]("products", &myMockObject{})
+}
+```
+
+### Automatic generation of mock objects
+
+Thanks to the generated `Interface` interface, it's possible to automatically generate mock objects for your services using
+either [Mockery](https://vektra.github.io/mockery/latest/) or [GoMock](https://github.com/uber-go/mock).
diff --git a/docs/go/develop/testing.md b/docs/go/develop/testing.md
new file mode 100644
index 0000000000..d65a14a653
--- /dev/null
+++ b/docs/go/develop/testing.md
@@ -0,0 +1,100 @@
+---
+seotitle: Automated testing for your backend application
+seodesc: Learn how create automated tests for your microservices backend application, and run them automatically on deploy using Go and Encore.
+title: Automated testing
+subtitle: Confidence at speed
+infobox: {
+ title: "Testing",
+ import: "encore.dev/et",
+}
+lang: go
+---
+
+Go comes with excellent built-in support for automated tests.
+Encore builds on top of this foundation, and lets you write tests in exactly the same way.
+We won't cover the basics of how to write tests here, see [the official Go docs](https://golang.org/pkg/testing/) for that.
+Let's instead focus on the difference between testing in Encore compared to a standard Go application.
+
+The main difference is that since Encore requires an extra compilation step,
+you must run your tests using `encore test` instead of `go test`. This is
+a wrapper that compiles the Encore app and then runs `go test`. It supports
+all the same flags that the `go test` command does.
+
+For example, use `encore test ./...` to run tests in all sub-directories,
+or just `encore test` for the current directory.
+
+## Test tracing
+
+Encore comes with built-in test tracing for local development.
+
+You only need to open Encore's local development dashboard at [localhost:9400](http://localhost:9400) to see traces for all your tests.
+This makes it very simple to understand the root cause for why a test is failing.
+
+
+
+
+## Integration testing
+
+Since Encore removes almost all boilerplate, most of the code you write
+is business logic that involves databases and calling APIs between services.
+Such behavior is most easily tested with integration tests.
+
+When running tests, Encore automatically sets up the databases you need
+in a separate database cluster. They are additionally configured to skip `fsync`
+and to use an in-memory filesystem since durability is not a concern for automated tests.
+
+This drastically reduces the speed overhead of writing integration tests.
+
+In general, Encore applications tend to focus more on integration tests
+compared to traditional applications that are heavier on unit tests.
+This is nothing to worry about and is the recommended best practice.
+
+### Temporary databases
+
+When Encore runs tests, by default it reuses the same database for all tests,
+to improve performance. However, this means that you need to take care when writing tests
+to ensure tests don't interfere with each other.
+
+If you instead want to have a separate database for a given test, you can use
+[`et.NewTestDatabase`](https://pkg.go.dev/encore.dev/et#NewTestDatabase) to create a temporary database
+that only exists for the duration of the test.
+
+The temporary test database is a fully-migrated database. It does not include any data written by other tests.
+
+
+
+Under the hood, when you start running tests, Encore sets up a fresh "template database" and runs the database migrations
+against that database. When you later call `et.NewTestDatabase`, Encore creates a new database by cloning the template database.
+
+
+
+### Service Structs
+
+In tests, [service structs](/docs/go/primitives/service-structs) are initialized on demand when the first
+API call is made to that service and then that instance of the service struct for all future tests. This means your tests
+can run faster as they don't have to each initialize all the service struct's each time a new test starts.
+
+However, in some situations you might be storing state in the service struct that would interfere with other tests. When
+you have a test you want to have its own instance of the service struct, you can use the `et.EnableServiceInstanceIsolation()` function within the test to enable this for just that test, while the rest of your tests will continue to use the shared instance.
+
+## Test-only infrastructure
+
+Encore allows tests to define infrastructure resources specifically for testing.
+This can be useful for testing library code that interacts with infrastructure.
+
+For example, the [x.encore.dev/pubsub/outbox](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox) package
+defines a test-only database that is used to do integration testing of the outbox functionality.
+
+## Testing from your IDE
+
+### GoLand / IntelliJ
+
+Encore has an officially supported plugin [available in the JetBrains marketplace](https://plugins.jetbrains.com/plugin/20010-encore).
+
+It lets you run unit tests directly from within your IDE with support for debug mode and breakpoints.
+
+### Visual Studio Code (VS Code)
+
+There's no official VS Code plugin available yet, but we are happy to include your contribution if you build one. Reach out on [Discord](/discord) if you need help to get started.
+
+For advice on debugging when using VS Code, see the [Debugging docs](/docs/go/how-to/debug).
diff --git a/docs/go/develop/validation.md b/docs/go/develop/validation.md
new file mode 100644
index 0000000000..36885359a3
--- /dev/null
+++ b/docs/go/develop/validation.md
@@ -0,0 +1,26 @@
+---
+seotitle: Request validation in your backend application
+seodesc: Learn how request validation works, and see how you can use Encore's built-in middleware to validate incoming requests in your backend application.
+title: Validation
+subtitle: Making sure everything's right in the world
+lang: go
+---
+
+When receiving incoming requests it's best practice to validate the
+payload to make sure it meets your expectations, contains all the necessary
+fields, and so on.
+
+Encore provides an out-of-the-box middleware that automatically validates
+incoming requests if the request type implements the method `Validate() error`.
+
+If it does, Encore will call this method after deserializing the request payload,
+and only call your API handler (and other middleware) if the validation function
+returns `nil`.
+
+If the validation function returns an [`*errs.Error`](/docs/go/primitives/api-errors) that error
+is reported unmodified to the caller. Other errors are converted to an `*errs.Error`
+with code `InvalidArgument`, which results in a HTTP response with status code `400 Bad Request`.
+
+This design means that it's easy to use your validation library of choice.
+In the future we're looking to provide an out-of-the-box validation library
+for an even better developer experience.
diff --git a/docs/go/faq.md b/docs/go/faq.md
new file mode 100644
index 0000000000..9f346a52bb
--- /dev/null
+++ b/docs/go/faq.md
@@ -0,0 +1,61 @@
+---
+seotitle: Frequently Asked Questions
+seodesc: See quick answers to common questions about Encore
+title: FAQ
+subtitle: Quick answers to common questions
+lang: go
+---
+
+## About the project
+
+**Is Encore Open Source?**
+
+Yes, check out the project on [GitHub](https://github.com/encoredev/encore).
+
+**Is there a community?**
+
+Yes, you're welcome to join the developer community on [Discord](https://encore.dev/discord).
+
+## Can I use X with Encore?
+
+**Can I use Python with Encore?**
+
+Encore currently supports Go and TypeScript. Python support in on the [roadmap](https://encore.dev/roadmap) and will be available around Q1 2025.
+
+**Can mix TypeScript and Go in one application?**
+
+Support for mixing languages in coming. Currently, if you want to use both TypeScript and Go, you need to create a separate application per language and integrate using APIs.
+
+**Can I use Azure / Digital Ocean?**
+
+Encore Cloud currently supports automating deployments to AWS and GCP. Azure support in on the [roadmap](https://encore.dev/roadmap) and will be available in 2025.
+
+If you want to use other cloud providers like Azure or Digital Ocean, you can follow the [self-hosting instructions](/docs/go/self-host/docker-build).
+
+**Can I use MongoDB / MySQL with Encore?**
+
+Encore currently has built-in support for PostgreSQL. To use another type of database, like MongoDB and MySQL, you will need to set it up and integrate as you normally would when not using Encore.
+
+**Can I use AWS lambda with Encore?**
+
+Not right now. Encore currently supports AWS Fargate and EKS (along with CloudRun and GKE on Google Cloud Platform).
+
+## IDE Integrations
+
+**Is there an Encore plugin for Goland / IntelliJ?**
+
+Yes, Encore's official Goland plugin is available in the [JetBrains marketplace](https://plugins.jetbrains.com/plugin/20010-encore).
+
+**Is there an Encore plugin for VS Code?**
+
+Not yet, it's coming soon.
+
+## Troubleshooting
+
+**symlink creation error on Windows**
+
+Encore currently relies on symbolic links, which may be disabled by default. A common fix for this issue is to enable "developer mode" in the Windows settings (Settings > System > For developers > Developer mode).
+
+**`node` errors**
+
+You might need to restart the Encore daemon, e.g. if your PATH has changed since installing nvm. Restart the daemon by running `encore daemon`.
diff --git a/docs/go/how-to/assets/logto-api-resource.png b/docs/go/how-to/assets/logto-api-resource.png
new file mode 100644
index 0000000000..60aa6b98d1
Binary files /dev/null and b/docs/go/how-to/assets/logto-api-resource.png differ
diff --git a/docs/go/how-to/assets/logto-application-endpoints.png b/docs/go/how-to/assets/logto-application-endpoints.png
new file mode 100644
index 0000000000..85185a1b57
Binary files /dev/null and b/docs/go/how-to/assets/logto-application-endpoints.png differ
diff --git a/docs/go/how-to/atlas-gorm.md b/docs/go/how-to/atlas-gorm.md
new file mode 100644
index 0000000000..4eb400fbb4
--- /dev/null
+++ b/docs/go/how-to/atlas-gorm.md
@@ -0,0 +1,161 @@
+---
+seotitle: How to use Atlas + GORM for database migrations with Encore
+seodesc: See how you can use Atlas to manage your database migrations in your Encore application.
+title: Use Atlas + GORM for database migrations
+lang: go
+---
+
+[Atlas](https://atlasgo.io) is a popular tool for managing database migrations.
+[GORM](https://gorm.io/) is a popular ORM for Go.
+
+Encore provides excellent support for using them together to easily manage database schemas and migrations.
+Encore executes database migrations using [golang-migrate](https://github.com/golang-migrate/migrate),
+which Atlas supports out-of-the-box. This means that you can use Atlas to manage your Encore database migrations.
+
+The easiest way to use Atlas + GORM together is with Atlas's support for [external schemas](https://atlasgo.io/blog/2023/06/28/external-schemas-and-gorm-support).
+
+## Setting up GORM
+
+To set up your Encore application with GORM, start by installing the GORM package and associated Postgres driver:
+
+```shell
+go get -u gorm.io/gorm gorm.io/driver/postgres
+```
+
+Then, in the service that you want to use GORM for, add the `*gorm.DB` as a dependency
+in your service struct (create a service struct if you don't already have one).
+
+For example, if you had a service called `blog`:
+
+```go
+-- blog/blog.go --
+package blog
+
+import (
+ "encore.dev/storage/sqldb"
+ "gorm.io/driver/postgres"
+ "gorm.io/gorm"
+)
+
+
+//encore:service
+type Service struct {
+ db *gorm.DB
+}
+
+var blogDB = sqldb.NewDatabase("blog", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// initService initializes the site service.
+// It is automatically called by Encore on service startup.
+func initService() (*Service, error) {
+ db, err := gorm.Open(postgres.New(postgres.Config{
+ Conn: blogDB.Stdlib(),
+ }))
+ if err != nil {
+ return nil, err
+ }
+ return &Service{db: db}, nil
+}
+```
+
+Finally, create the `migrations` directory inside the `blog` directory if it doesn't already exist.
+This is where Atlas will put your database migrations.
+
+## Setting up Atlas
+
+First [install Atlas](https://atlasgo.io/getting-started).
+
+Then, add an `atlas.hcl` file inside the `blog` directory:
+
+```
+-- blog/atlas.hcl --
+data "external_schema" "gorm" {
+ program = ["env", "ENCORERUNTIME_NOPANIC=1", "go", "run", "./scripts/atlas-gorm-loader.go"]
+}
+
+env "local" {
+ src = data.external_schema.gorm.url
+
+ migration {
+ dir = "file://migrations"
+ format = golang-migrate
+ }
+
+ format {
+ migrate {
+ diff = "{{ sql . \" \" }}"
+ }
+ }
+}
+```
+
+Next, we need to create the `atlas-gorm-loader` script referenced above.
+It will use the [atlas-provider-gorm](https://github.com/ariga/atlas-provider-gorm) library provided by Atlas.
+
+Create the file as follows:
+
+```
+-- blog/scripts/atlas-gorm-loader.go --
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ _ "ariga.io/atlas-go-sdk/recordriver"
+ "ariga.io/atlas-provider-gorm/gormschema"
+ "encore.app/blog"
+)
+
+// Define the models to generate migrations for.
+var models = []any{
+ &blog.Post{},
+ &blog.Comment{},
+}
+
+func main() {
+ stmts, err := gormschema.New("postgres").Load(models...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to load gorm schema: %v\n", err)
+ os.Exit(1)
+ }
+ io.WriteString(os.Stdout, stmts)
+}
+```
+
+## Creating migrations
+
+To wrap things up, let's create a script to automate the process of generating migrations:
+
+```
+-- blog/scripts/generate-migration --
+#!/bin/bash
+set -eu
+DB_NAME=blog
+MIGRATION_NAME=${1:-}
+
+SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+
+# Reset the shadow database
+encore db reset --shadow $DB_NAME
+
+# GORM executes Go code without initializing Encore when generating migrations,
+# so configure the Encore runtime to be aware that this is expected.
+export ENCORERUNTIME_NOPANIC=1
+
+# Generate the migration
+atlas migrate diff $MIGRATION_NAME --env local --dev-url "$(encore db conn-uri --shadow $DB_NAME)&search_path=public"
+```
+
+Finally let's make the script executable, and generate our first migration:
+
+```shell
+$ chmod +x blog/scripts/generate-migration
+$ cd blog && ./scripts/generate-migration init
+```
+
+This will generate a new migration file in the `blog/migrations` directory, which
+will be automatically applied when running `encore run`.
diff --git a/docs/go/how-to/auth0-auth.md b/docs/go/how-to/auth0-auth.md
new file mode 100644
index 0000000000..654a8e82b3
--- /dev/null
+++ b/docs/go/how-to/auth0-auth.md
@@ -0,0 +1,620 @@
+---
+seotitle: How to use Auth0 for your backend application
+seodesc: Learn how to use Auth0 for user authentication in your backend application. In this guide we show you how to integrate your Go backend with Auth0.
+title: Use Auth0 with your app
+lang: go
+---
+
+In this guide you will learn how to set up an Encore [auth handler](/docs/go/develop/auth#the-auth-handler) that makes use of
+[Auth0](https://auth0.com/) in order to add a seamless signup and login experience to your web app.
+
+For all the code and instructions of how to clone and run this example locally, see the [Auth0 Example](https://github.com/encoredev/examples/tree/main/auth0) in our examples repo.
+
+## Communicate with Auth0
+
+In your Encore app, install two modules:
+
+```shell
+$ go get github.com/coreos/go-oidc/v3/oidc golang.org/x/oauth2
+```
+
+Create a folder and naming it `auth`, this is where our authentication related backend code will live.
+
+Next, let's set up the Auth0 `Authenticator` that will be used by our auth handler. The `Authenticator` has a method to configure and return [OAuth2](https://pkg.go.dev/golang.org/x/oauth2?utm_source=godoc) and [oidc](https://pkg.go.dev/github.com/coreos/go-oidc?utm_source=godoc) clients, and another one to verify an ID Token.
+
+Create `auth/authenticator.go` and paste the following:
+
+```go
+package auth
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "encore.dev/config"
+ "errors"
+ "github.com/coreos/go-oidc/v3/oidc"
+ "golang.org/x/oauth2"
+)
+
+type Auth0Config struct {
+ ClientID config.String
+ Domain config.String
+ CallbackURL config.String
+ LogoutURL config.String
+}
+
+var cfg = config.Load[*Auth0Config]()
+
+var secrets struct {
+ Auth0ClientSecret string
+}
+
+// Authenticator is used to authenticate our users.
+type Authenticator struct {
+ *oidc.Provider
+ oauth2.Config
+}
+
+// New instantiates the *Authenticator.
+func New() (*Authenticator, error) {
+ provider, err := oidc.NewProvider(
+ context.Background(),
+ "https://"+cfg.Domain()+"/",
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ conf := oauth2.Config{
+ ClientID: cfg.ClientID(),
+ ClientSecret: secrets.Auth0ClientSecret,
+ RedirectURL: cfg.CallbackURL(),
+ Endpoint: provider.Endpoint(),
+ Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
+ }
+
+ return &Authenticator{
+ Provider: provider,
+ Config: conf,
+ }, nil
+}
+
+// VerifyIDToken verifies that an *oauth2.Token is a valid *oidc.IDToken.
+func (a *Authenticator) VerifyIDToken(ctx context.Context, token *oauth2.Token) (*oidc.IDToken, error) {
+ rawIDToken, ok := token.Extra("id_token").(string)
+ if !ok {
+ return nil, errors.New("no id_token field in oauth2 token")
+ }
+
+ oidcConfig := &oidc.Config{
+ ClientID: a.ClientID,
+ }
+
+ return a.Verifier(oidcConfig).Verify(ctx, rawIDToken)
+}
+
+func generateRandomState() (string, error) {
+ b := make([]byte, 32)
+ _, err := rand.Read(b)
+ if err != nil {
+ return "", err
+ }
+
+ state := base64.StdEncoding.EncodeToString(b)
+
+ return state, nil
+}
+```
+
+## Set up the auth handler
+
+It's time to define your [auth handler](/docs/go/develop/auth) and the endpoints needed for the login and logout flow.
+
+Create the `auth/auth.go` file and paste the following:
+
+```go
+package auth
+
+import (
+ "context"
+ "net/url"
+
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "github.com/coreos/go-oidc/v3/oidc"
+)
+
+// Service struct definition.
+// Learn more: encore.dev/docs/primitives/services-and-apis/service-structs
+//
+//encore:service
+type Service struct {
+ auth *Authenticator
+}
+
+// initService is automatically called by Encore when the service starts up.
+func initService() (*Service, error) {
+ authenticator, err := New()
+ if err != nil {
+ return nil, err
+ }
+ return &Service{auth: authenticator}, nil
+}
+
+type LoginResponse struct {
+ State string `json:"state"`
+ AuthCodeURL string `json:"auth_code_url"`
+}
+
+//encore:api public method=POST path=/auth/login
+func (s *Service) Login(ctx context.Context) (*LoginResponse, error) {
+ state, err := generateRandomState()
+ if err != nil {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ return &LoginResponse{
+ State: state,
+ // add the audience to the auth code url
+ AuthCodeURL: s.auth.AuthCodeURL(state),
+ }, nil
+}
+
+type CallbackRequest struct {
+ Code string `json:"code"`
+}
+
+type CallbackResponse struct {
+ Token string `json:"token"`
+}
+
+//encore:api public method=POST path=/auth/callback
+func (s *Service) Callback(
+ ctx context.Context,
+ req *CallbackRequest,
+) (*CallbackResponse, error) {
+
+ // Exchange an authorization code for a token.
+ token, err := s.auth.Exchange(ctx, req.Code)
+ if err != nil {
+ return nil, &errs.Error{
+ Code: errs.PermissionDenied,
+ Message: "Failed to convert an authorization code into a token.",
+ }
+ }
+
+ idToken, err := s.auth.VerifyIDToken(ctx, token)
+ if err != nil {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: "Failed to verify ID Token.",
+ }
+ }
+
+ var profile map[string]interface{}
+ if err := idToken.Claims(&profile); err != nil {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ return &CallbackResponse{
+ Token: token.Extra("id_token").(string),
+ }, nil
+}
+
+type LogoutResponse struct {
+ RedirectURL string `json:"redirect_url"`
+}
+
+//encore:api public method=GET path=/auth/logout
+func (s *Service) Logout(ctx context.Context) (*LogoutResponse, error) {
+ logoutUrl, err := url.Parse("https://" + cfg.Domain() + "/v2/logout")
+ if err != nil {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ returnTo, err := url.Parse(cfg.LogoutURL())
+ if err != nil {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ parameters := url.Values{}
+ parameters.Add("returnTo", returnTo.String())
+ parameters.Add("client_id", cfg.ClientID())
+ logoutUrl.RawQuery = parameters.Encode()
+
+ return &LogoutResponse{
+ RedirectURL: logoutUrl.String(),
+ }, nil
+}
+
+type ProfileData struct {
+ Email string `json:"email"`
+ Picture string `json:"picture"`
+}
+
+// The `encore:authhandler` annotation tells Encore to run this function for all
+// incoming API call that requires authentication.
+// Learn more: encore.dev/docs/develop/auth#the-auth-handler
+//
+//encore:authhandler
+func (s *Service) AuthHandler(
+ ctx context.Context,
+ token string,
+) (auth.UID, *ProfileData, error) {
+ oidcConfig := &oidc.Config{
+ ClientID: s.auth.ClientID,
+ }
+
+ t, err := s.auth.Verifier(oidcConfig).Verify(ctx, token)
+ if err != nil {
+ return "", nil, &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+ }
+
+ var profile map[string]interface{}
+ if err := t.Claims(&profile); err != nil {
+ return "", nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ // Extract profile data returned from the identity provider.
+ // auth0.com/docs/manage-users/user-accounts/user-profiles/user-profile-structure
+ profileData := &ProfileData{
+ Email: profile["email"].(string),
+ Picture: profile["picture"].(string),
+ }
+
+ return auth.UID(profile["sub"].(string)), profileData, nil
+}
+
+// Endpoints annotated with `auth` are public and requires authentication
+// Learn more: encore.dev/docs/primitives/apis#access-controls
+//
+//encore:api auth method=GET path=/profile
+func GetProfile(ctx context.Context) (*ProfileData, error) {
+ return auth.Data().(*ProfileData), nil
+}
+```
+
+## Auth0 settings
+
+The `Authenticator` class requires some values that are specific your Auth0 application, namely the `ClientID`, `ClientSecret`, `Domain`, `CallbackURL` and `LogoutURL`.
+
+Create an Auth0 account if you haven't already. Then, in the Auth0 dashboard, create a new *Single Page Web Applications*.
+
+
+
+Next, go to the *Application Settings* section. There you will find the `Domain`, `Client ID`, and `Client Secret` that you need to communicate with Auth0.
+Copy these values, we will need them shortly.
+
+
+
+A callback URL is where Auth0 redirects the user after they have been authenticated.
+Add `http://localhost:3000/callback` to the *Allowed Callback URLs*.
+You will need to add more URLs to this list when you have a production or staging environments.
+
+The same goes for the logout URL (were the user will get redirected after logout). Add `http://localhost:3000/` to the *Allowed Logout URLs*.
+
+
+
+
+## Config and secrets
+
+Create a [configuration file](/docs/go/develop/config) in the `auth` service and name it `auth-config.cue`. Add the following:
+
+```cue
+ClientID: ""
+Domain: ""
+
+// An application running locally
+if #Meta.Environment.Type == "development" && #Meta.Environment.Cloud == "local" {
+ CallbackURL: "http://localhost:3000/callback"
+ LogoutURL: "http://localhost:3000/"
+}
+```
+
+Replace the values for the `ClientID` and `Domain` that you got from the Auth0 dashboard.
+
+The `ClientSecret` is especially sensitive and should not be hardcoded in your code/config. Instead, you should store that as an [Encore secret](/docs/go/primitives/secrets).
+
+From your terminal (inside your Encore app directory), run:
+
+```shell
+$ encore secret set --prod Auth0ClientSecret
+```
+
+Now you should do the same for the development secret. The most secure way is to set up a different Auth0 application and use that for development.
+Depending on your security requirements you could also use the same secret for development and production.
+
+Once you have a client secret for development, set it similarly to before:
+
+```shell
+$ encore secret set --dev Auth0ClientSecret
+```
+
+That's it! Encore will run your auth handler and validate the token against Auth0.
+
+## Frontend
+
+Now that the backend is set up, we can create a frontend application that uses the login flow.
+
+Here's an example using [React](https://react.dev/) together with [React Router](https://reactrouter.com/). This example
+also makes use of a Encores ability to [generate request clients](/docs/go/cli/client-generation) to make the communication
+with our backend simple and typesafe.
+
+```tsx
+-- App.tsx --
+import { PropsWithChildren } from "react";
+import {
+ createBrowserRouter,
+ Link,
+ Outlet,
+ redirect,
+ RouterProvider,
+ useRouteError,
+} from "react-router-dom";
+import { Auth0Provider } from "./lib/auth";
+import AdminDashboard from "./components/AdminDashboard.tsx";
+
+import IndexPage from "./components/IndexPage.tsx";
+import "./App.css";
+import LoginStatus from "./components/LoginStatus.tsx";
+
+// Application routes
+const router = createBrowserRouter([
+ {
+ id: "root",
+ path: "/",
+ Component: Layout,
+ errorElement: (
+
+
+
+ ),
+ children: [
+ {
+ Component: Outlet,
+ children: [
+ {
+ index: true,
+ Component: IndexPage,
+ },
+ {
+ // Login route
+ path: "login",
+ loader: async ({ request }) => {
+ const url = new URL(request.url);
+ const searchParams = new URLSearchParams(url.search);
+ const returnToURL = searchParams.get("returnTo") ?? "/";
+
+ if (Auth0Provider.isAuthenticated()) return redirect(returnToURL);
+
+ try {
+ const returnURL = await Auth0Provider.login(returnToURL);
+ return redirect(returnURL);
+ } catch (error) {
+ throw new Error("Login failed");
+ }
+ },
+ },
+ {
+ // Callback route, redirected to from Auth0 after login
+ path: "callback",
+ loader: async ({ request }) => {
+ const url = new URL(request.url);
+ const searchParams = new URLSearchParams(url.search);
+ const state = searchParams.get("state");
+ const code = searchParams.get("code");
+
+ if (!state || !code) throw new Error("Login failed");
+
+ try {
+ const redirectURL = await Auth0Provider.validate(state, code);
+ return redirect(redirectURL);
+ } catch (error) {
+ throw new Error("Login failed");
+ }
+ },
+ },
+ {
+ // Logout route
+ path: "logout",
+ loader: async () => {
+ try {
+ const redirectURL = await Auth0Provider.logout();
+ return redirect(redirectURL);
+ } catch (error) {
+ throw new Error("Logout failed");
+ }
+ },
+ },
+ {
+ element: ,
+ // Redirect to /login if not authenticated
+ loader: async ({ request }) => {
+ if (!Auth0Provider.isAuthenticated()) {
+ const params = new URLSearchParams();
+ params.set("returnTo", new URL(request.url).pathname);
+ return redirect("/login?" + params.toString());
+ }
+ return null;
+ },
+ // Protected routes
+ children: [
+ {
+ path: "admin-dashboard",
+ Component: AdminDashboard,
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ },
+]);
+
+export default function App() {
+ return Loading...} />;
+}
+
+function Layout({ children }: PropsWithChildren) {
+ return (
+
+ );
+}
+
+function ErrorBoundary() {
+ const error = useRouteError() as Error;
+ return (
+
+
Something went wrong
+
{error.message || JSON.stringify(error)}
+
+ );
+}
+-- lib/auth.ts --
+import Cookies from "js-cookie";
+import getRequestClient from "./getRequestClient.ts";
+
+type RedirectURL = string;
+
+/**
+ * Handles the backend communication for the authentication flow.
+ */
+export const Auth0Provider = {
+ client: getRequestClient(),
+ isAuthenticated: () => !!Cookies.get("auth-token"),
+
+ async login(returnTo: RedirectURL): Promise {
+ const response = await this.client.auth.Login();
+ Cookies.set("state", response.state);
+ sessionStorage.setItem(response.state, returnTo);
+ return response.auth_code_url;
+ },
+
+ async logout(): Promise {
+ const response = await this.client.auth.Logout();
+
+ Cookies.remove("auth-token");
+ Cookies.remove("state");
+
+ return response.redirect_url;
+ },
+
+ async validate(state: string, authCode: string): Promise {
+ if (state != Cookies.get("state")) throw new Error("Invalid state");
+
+ const response = await this.client.auth.Callback({ code: authCode });
+ Cookies.set("auth-token", response.token);
+ const returnURL = sessionStorage.getItem(state) ?? "/";
+ sessionStorage.removeItem(state);
+ return returnURL;
+ },
+};
+-- components/LoginStatus.tsx --
+import getRequestClient from "../lib/getRequestClient.ts";
+import { useFetcher } from "react-router-dom";
+import { useEffect, useState } from "react";
+import { auth } from "../lib/client.ts";
+import { Auth0Provider } from "../lib/auth.ts";
+
+/**
+ * Component displaying login/logout button and basic user information if logged in.
+ */
+function LoginStatus() {
+ const client = getRequestClient();
+ const fetcher = useFetcher();
+ const [profile, setProfile] = useState();
+ const [loading, setLoading] = useState(true);
+
+ // Fetch profile data if user is authenticated
+ useEffect(() => {
+ const getProfile = async () => {
+ setProfile(await client.auth.GetProfile());
+ setLoading(false);
+ };
+ if (Auth0Provider.isAuthenticated()) getProfile();
+ else setLoading(false);
+ }, []);
+
+ if (loading) return null;
+
+ if (profile) {
+ return (
+
+
+
+ Sign out {profile.email}
+
+
+ );
+ }
+
+ const params = new URLSearchParams();
+ params.set("returnTo", window.location.pathname);
+ return (
+
+
+
+ {fetcher.state !== "idle" ? "Signing in..." : "Sign in"}
+
+
+
+ );
+}
+
+export default LoginStatus;
+-- lib/getRequestClient.ts --
+import Client, { Environment, Local } from "./client.ts";
+import Cookies from "js-cookie";
+
+/**
+ * Returns the generated Encore request client for either the local or staging environment.
+ * If we are running the frontend locally (development) we assume that our Encore
+ * backend is also running locally.
+ */
+const getRequestClient = () => {
+ const token = Cookies.get("auth-token");
+ const env = import.meta.env.DEV ? Local : Environment("staging");
+
+ return new Client(env, {
+ auth: token,
+ });
+};
+
+export default getRequestClient;
+```
+
+## Auth0 Social Identity Providers
+
+Auth0 supports multiple [social identity providers](https://auth0.com/docs/authenticate/identity-providers/social-identity-providers) (like Google and GitHub) for web applications out of the box.
diff --git a/docs/go/how-to/break-up-monolith.md b/docs/go/how-to/break-up-monolith.md
new file mode 100644
index 0000000000..01e05b1a32
--- /dev/null
+++ b/docs/go/how-to/break-up-monolith.md
@@ -0,0 +1,85 @@
+---
+seotitle: Break a monolith into microservices
+seodesc: Learn how to quickly break up your backend monolith into microservices using Encore, while avoiding the common pitfalls.
+title: Break a monolith into microservices
+subtitle: Evolving your architecture as needed
+lang: go
+---
+
+It's common to want to break out specific functionality into separate services. Perhaps you want to independently scale a specific service, or simply want to structure your codebase in smaller pieces.
+
+Encore makes it simple to evolve your system architecture over time, and enables you to deploy your application in multiple different ways without making code changes.
+
+## How to break out a service from a monolith
+
+As a (slightly silly) example, let's imagine we have a monolith `hello` with two API endpoints `H1` and `H2`. It looks like this:
+
+```go
+package hello
+
+import (
+ "context"
+)
+
+//encore:api public path=/hello/:name
+func H1(ctx context.Context, name string) (*Response, error) {
+ msg := "Hello, " + name + "!"
+ return &Response{Message: msg}, nil
+}
+
+//encore:api public path=/yo/:name
+func H2(ctx context.Context, name string) (*Response, error) {
+ msg := "Yo, " + name + "!"
+ return &Response{Message: msg}, nil
+}
+
+type Response struct {
+ Message string
+}
+```
+
+Now we're going to break out `H2` into its own separate service. Happily, all we need to do is create a new package, let's call it `yo`, and move the `H2` endpoint into it.
+
+Like so:
+```go
+package yo
+
+import (
+ "context"
+)
+
+//encore:api public path=/yo/:name
+func H2(ctx context.Context, name string) (*Response, error) {
+ msg := "Yo, " + name + "!"
+ return &Response{Message: msg}, nil
+}
+
+type Response struct {
+ Message string
+}
+```
+
+On disk we now have:
+```
+/my-app
+├── encore.app // ... and other top-level project files
+│
+├── hello // hello service (a Go package)
+│ └── hello.go // hello service code
+│
+└── yo // yo service (a Go package)
+ └── yo.go // yo service code
+```
+
+Encore now understands these are separate services, and when you run your app you'll see that the [Service Catalog](/docs/go/observability/service-catalog) has been automatically updated accordingly.
+
+
+
+As well as the [Flow architecture diagram](/docs/go/observability/encore-flow).
+
+
+
+## Sharing databases between services (or not)
+
+Deciding whether to share a database between multiple services depends on your specific situation. Encore supports both options. Learn more in the [database documentation](/docs/go/primitives/share-db-between-services).
+
diff --git a/docs/go/how-to/cgo.md b/docs/go/how-to/cgo.md
new file mode 100644
index 0000000000..870f160384
--- /dev/null
+++ b/docs/go/how-to/cgo.md
@@ -0,0 +1,36 @@
+---
+seotitle: Build Go applications with cgo using Encore
+seodesc: Learn how to build Go applications with cgo using Encore
+title: Build with cgo
+lang: go
+---
+
+Cgo is a feature of the Go compiler that enables Go programs to interface
+with libraries written in other languages using C bindings.
+
+By default, for improved portability Encore builds applications with cgo support disabled.
+
+To enable cgo for your application, add `"build": {"cgo_enabled": true}` to your `encore.app` file.
+
+For example:
+
+```json
+-- encore.app --
+{
+ "id": "my-app-id",
+ "build": {
+ "cgo_enabled": true
+ }
+}
+```
+
+With this setting Encore's build system will compile the application using an Ubuntu builder image
+with gcc pre-installed.
+
+## Static linking
+
+To keep the resulting Docker images as minimal as possible, Encore compiles applications with static linking.
+This happens even with cgo enabled. As a result the cgo libraries you use must support static linking.
+
+In some cases, you may need to add additional linker flags to properly work with static linking of cgo libraries.
+See the [official cgo docs](https://pkg.go.dev/cmd/cgo) for more information on how to do this.
diff --git a/docs/go/how-to/clerk-auth.md b/docs/go/how-to/clerk-auth.md
new file mode 100644
index 0000000000..0c51024af2
--- /dev/null
+++ b/docs/go/how-to/clerk-auth.md
@@ -0,0 +1,157 @@
+---
+seotitle: How to use Clerk to authenticate users in your backend application
+seodesc: Learn how to use Clerk for user authentication in your backend application. In this guide we show you how to integrate your Go backend with Clerk.
+title: Use Clerk with your app
+lang: go
+---
+
+In this guide you will learn how to set up an Encore [auth handler](/docs/go/develop/auth#the-auth-handler) that makes use of
+[Clerk](https://clerk.com/) in order to add an integrated signup and login experience to your web app.
+
+For all the code and instructions of how to clone and run this example locally, see the [Clerk Example](https://github.com/encoredev/examples/tree/main/clerk) in our examples repo.
+
+## Set up the auth handler
+
+In your Encore app, install the following module:
+
+```shell
+$ go get github.com/clerkinc/clerk-sdk-go/clerk
+```
+
+Create a folder and naming it `auth`, this is where our authentication related backend code will live.
+
+It's time to define your [auth handler](/docs/go/develop/auth). Create `auth/auth.go` and paste the following:
+
+```go
+package auth
+
+import (
+ "context"
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "github.com/clerkinc/clerk-sdk-go/clerk"
+)
+
+var secrets struct {
+ ClientSecretKey string
+}
+
+// Service struct definition.
+// Learn more: encore.dev/docs/primitives/services-and-apis/service-structs
+//
+//encore:service
+type Service struct {
+ client clerk.Client
+}
+
+// initService is automatically called by Encore when the service starts up.
+func initService() (*Service, error) {
+ client, err := clerk.NewClient(secrets.ClientSecretKey)
+ if err != nil {
+ return nil, err
+ }
+ return &Service{client: client}, nil
+}
+
+type UserData struct {
+ ID string `json:"id"`
+ Username *string `json:"username"`
+ FirstName *string `json:"first_name"`
+ LastName *string `json:"last_name"`
+ ProfileImageURL string `json:"profile_image_url"`
+ PrimaryEmailAddressID *string `json:"primary_email_address_id"`
+ EmailAddresses []clerk.EmailAddress `json:"email_addresses"`
+}
+
+// The `encore:authhandler` annotation tells Encore to run this function for all
+// incoming API call that requires authentication.
+// Learn more: encore.dev/docs/develop/auth#the-auth-handler
+//
+//encore:authhandler
+func (s *Service) AuthHandler(ctx context.Context, token string) (auth.UID, *UserData, error) {
+ // verify the session
+ sessClaims, err := s.client.VerifyToken(token)
+ if err != nil {
+ return "", nil, &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+ }
+
+ user, err := s.client.Users().Read(sessClaims.Claims.Subject)
+ if err != nil {
+ return "", nil, &errs.Error{
+ Code: errs.Internal,
+ Message: err.Error(),
+ }
+ }
+
+ userData := &UserData{
+ ID: user.ID,
+ Username: user.Username,
+ FirstName: user.FirstName,
+ LastName: user.LastName,
+ ProfileImageURL: user.ProfileImageURL,
+ PrimaryEmailAddressID: user.PrimaryEmailAddressID,
+ EmailAddresses: user.EmailAddresses,
+ }
+
+ return auth.UID(user.ID), userData, nil
+}
+```
+
+## Clerk credentials
+
+Create a Clerk account if you haven't already. Then, in the Clerk dashboard, create a new applications.
+
+Next, go to the *API Keys* page for your app. Copy one of the "Secret keys" (the "Publishable Key" will be used by your frontend).
+
+The `Secret key` is sensitive and should not be hardcoded in your code/config. Instead, you should store that as an [Encore secret](/docs/go/primitives/secrets).
+
+From your terminal (inside your Encore app directory), run:
+
+```shell
+$ encore secret set --prod ClientSecretKey
+```
+
+Now you should do the same for the development secret. The most secure way is to create another secret key (Clerk allows you to have multiple).
+Once you have a client secret for development, set it similarly to before:
+
+```shell
+$ encore secret set --dev ClientSecretKey
+```
+
+## Frontend
+
+Clerk offers a [React SDK](https://clerk.com/docs/references/react/overview) for the frontend which makes it really simple to integrate
+a login/signup flow inside your web app as well as getting the token required to communicate with your Encore backend.
+
+You can use the `useAuth` hook from `@clerk/clerk-react` to get the token and send it to your backend.
+
+```tsx
+import { useAuth } from '@clerk/clerk-react';
+
+export default function ExternalDataPage() {
+ const { getToken, isLoaded, isSignedIn } = useAuth();
+
+ if (!isLoaded) {
+ // Handle loading state however you like
+ return Loading...
;
+ }
+
+ if (!isSignedIn) {
+ // Handle signed out state however you like
+ return Sign in to view this page
;
+ }
+
+ const fetchDataFromExternalResource = async () => {
+ const token = await getToken();
+ // Use token to send to Encore backend when fetching data
+ return data;
+ }
+
+ return ...
;
+}
+```
+
+For a fully working backend + frontend example see the [Clerk Example](https://github.com/encoredev/examples/tree/main/clerk) in our examples repo.
diff --git a/docs/go/how-to/debug.md b/docs/go/how-to/debug.md
new file mode 100644
index 0000000000..70778d7ce5
--- /dev/null
+++ b/docs/go/how-to/debug.md
@@ -0,0 +1,118 @@
+---
+seotitle: How to debug your application with Delve
+seodesc: Learn how to debug your Go backend application using Delve and Encore.
+title: Debug with Delve
+lang: go
+---
+
+Encore makes it easy to debug your application using [Delve](https://github.com/go-delve/delve "Delve").
+
+First, make sure you have `dlv` installed by running (Go 1.16 and later):
+
+```shell
+$ go install github.com/go-delve/delve/cmd/dlv@latest
+```
+
+You have two debugger options, you can either debug by attaching to a running process or by starting the process in debug mode.
+
+## Debug by starting the process in debug mode
+Run your Encore application with `encore run --debug=break`. This will launch your encore application with a headless Delve server, which will pause your application until a debugger is attached.
+
+```shell
+$ encore run --debug=break
+API Base URL: http://localhost:4000
+Dev Dashboard URL: http://localhost:9400/hello-world-cgu2
+
+API server listening at: 127.0.0.1:2345
+```
+
+Now it's time to attach the debugger. The instructions differ depending on how you would like to debug (in your terminal or in your editor). If instructions for your editor aren’t listed below, consult your editor for information on how to attach to a Delve server.
+
+### Terminal debugging
+To debug in your terminal, run `dlv attach :2345`. You should see:
+
+```shell
+$ dlv connect :2345
+Type 'help' for list of commands.
+(dlv)
+```
+How to use Delve’s terminal interface for debugging is out of scope for this guide, but there are great resources available. For a good introduction, see [](https://golang.cafe/blog/golang-debugging-with-delve.html "Debugging with Delve").
+
+### Visual Studio Code
+To debug with VS Code you must first add a debug configuration. Press `Run -> Add Configuration`, choose `Go -> Connect to server`. Input `127.0.0.1` as host and `2345` as port. The resulting configuration should look something like this:
+
+```json
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Connect to server",
+ "type": "go",
+ "request": "attach",
+ "mode": "remote",
+ "remotePath": "${workspaceFolder}",
+ "port": 2345,
+ "host": "127.0.0.1"
+ }
+ ]
+}
+```
+
+Next, open the **Run and Debug** menu in the toolbar on the left, select `Connect to server` (the configuration you just created), and then press the green arrow.
+
+That’s it! You should be able to set breakpoints and have the Encore application pause when they’re hit like you would expect.
+
+### Goland
+To debug with Goland, you must create a new Go Remote configuration. Press `Run | Edit Configurations`, click the `+` button, and choose `Go Remote`. Give it a name and hit `OK`.
+
+Now select the configuration you just created and press the green bug.
+
+That's it. You should be able to set breakpoints and have the Encore application pause when they’re hit like you would expect.
+
+## Debug by attaching to a running process
+
+Run your Encore application with `encore run --debug`. This will cause Encore to print the Process ID to the terminal, which you will use to attach your debugger:
+
+```shell
+$ encore run --debug
+API Base URL: http://localhost:4000
+Dev Dashboard URL: http://localhost:9400/hello-world-cgu2
+Process ID: 51894
+1:48PM INF registered endpoint path=/hello/:name service=hello endpoint=Hello
+```
+
+(Your process id will differ).
+
+When your Encore application is running, it’s time to attach the debugger. The instructions differ depending on how you would like to debug (in your terminal or in your editor). If instructions for your editor aren’t listed below, consult your editor for information on how to attach a debugger to a running process.
+
+### Terminal debugging
+To debug in your terminal, run `dlv attach $PID` (replace `$PID` with your Process ID from the previous step). You should see:
+
+```shell
+$ dlv attach 51894
+Type 'help' for list of commands.
+(dlv)
+```
+
+How to use Delve’s terminal interface for debugging is out of scope for this guide, but there are great resources available. For a good introduction, see [](https://golang.cafe/blog/golang-debugging-with-delve.html "Debugging with Delve").
+
+### Visual Studio Code
+To debug with VS Code you must first add a debug configuration. Press `Run -> Add Configuration`, choose `Go -> Attach to local process`. In the generated configuration, you should see `"processId": 0` as a field. Replace `0` with the process id from above.
+
+Next, open the **Run and Debug** menu in the toolbar on the left, select Attach to Process (the configuration you just created), and then press the green arrow.
+
+That’s it! You should be able to set breakpoints and have the Encore application pause when they’re hit like you would expect.
+
+### Goland
+To debug with Goland, you must first install the `gops` package. Open a terminal and run the following command
+
+```shell
+go get -t github.com/google/gops/
+```
+
+Then click `Run | Attach to Process`. If a notification window appears, click the `Invoke 'go get gops'` link. Once
+it has completed, click `Run | Attach to Process` again. In the dialog that appears, select the process with the
+process ID from above.
+
+That's it. You should be able to set breakpoints and have the Encore application pause when they’re hit like you would expect.
+
diff --git a/docs/go/how-to/dependency-injection.md b/docs/go/how-to/dependency-injection.md
new file mode 100644
index 0000000000..a0d2866c3a
--- /dev/null
+++ b/docs/go/how-to/dependency-injection.md
@@ -0,0 +1,68 @@
+---
+seotitle: How to use dependency injection to test your microservices app
+seodesc: Learn how to use dependency injection in your Go based microservices backend application using Encore.
+title: Dependency Injection
+subtitle: Simplifying testing
+lang: go
+---
+
+Dependency Injection is a fancy name for a simple concept: when you depend on some
+functionality, add that dependency as a field on your struct and refer to it that way
+instead of directly calling it. By doing so it becomes easier to test your services
+by swapping out certain dependencies for other implementations (often with the use of
+interfaces).
+
+Encore provides built-in support for dependency injection in services through the use
+of the `//encore:service` directive and a **service struct**. See the [service structs docs](/docs/go/primitives/service-structs) more information on how to define service structs.
+
+As an example, consider an email service that has a SendGrid API client that is
+dependency injected. It might look like this:
+
+```go
+package email
+
+//encore:service
+type Service struct {
+ sendgridClient *sendgrid.Client
+}
+
+func initService() (*Service, error) {
+ client, err := sendgrid.NewClient()
+ if err != nil {
+ return nil, err
+ }
+ return &Service{sendgridClient: client}, nil
+}
+```
+
+You can then define APIs as methods on this struct:
+```go
+//encore:api private
+func (s *Service) Send(ctx context.Context, p *SendParams) error {
+ // ... use s.sendgridClient to send emails ...
+}
+```
+
+### Mocking dependencies
+
+If you wish to mock out the SendGrid client for testing purposes you can change the
+field to an interface:
+
+```go
+type sendgridClient interface {
+ SendEmail(...) // a hypothetical signature, for illustration purposes
+}
+
+//encore:service
+type Service struct {
+ sendgridClient sendgridClient
+}
+```
+
+Then during your tests you can instantiate the service object by hand:
+```go
+func TestFoo(t *testing.T) {
+ svc := &Service{sendgridClient: &myMockClient{}}
+ // ...
+}
+```
diff --git a/docs/go/how-to/entgo-orm.md b/docs/go/how-to/entgo-orm.md
new file mode 100644
index 0000000000..bc932c8771
--- /dev/null
+++ b/docs/go/how-to/entgo-orm.md
@@ -0,0 +1,185 @@
+---
+seotitle: Use ent + Atlas for database schema management with Encore.
+seodesc: See how you can use an ORM like ent with Atlas to handle your database schemas.
+title: Use ent ORM + Atlas for database schemas
+lang: go
+---
+
+Encore has all the tools needed to support ORMs and migration frameworks out-of-the-box through
+[named databases](/docs/go/primitives/share-db-between-services) and
+[migration files](/docs/go/primitives/databases#defining-a-database-schema). Writing plain SQL might
+not work for your use case, or you may not want to use SQL in the first place.
+
+ORMs like [ent](https://entgo.io/) and migration frameworks like [Atlas](https://atlasgo.io/) can
+be used with Encore by integrating their logic with a system's database. Encore is not restrictive,
+it uses plain SQL migration files for its migrations.
+
+- If your ORM of choice can connect to any database using a [standard SQL driver](https://github.com/lib/pq), then it can be used with Encore.
+- If your migration framework can generate SQL migration files without any modifications, then it can be used with Encore.
+
+Let's take a look at how you can integrate ent with Encore, using Atlas for generating the migration files.
+
+## Add ent schemas to a service
+[Install ent](https://entgo.io/docs/tutorial-setup#installation), then initialize your first
+schema in the service where you want to use it. For example, if you had the following app structure:
+
+```
+/my-app
+├── encore.app
+└── user // user service
+```
+
+You can then use this command to generate a user schema along with the ent directory that will contain
+that schema and all future generated files:
+
+```shell
+$ go run entgo.io/ent/cmd/ent@latest new --target user/ent/schema User
+```
+
+The `--target` option sets the schema directory within your Encore system. Each system
+should contain its own models and schemas, and its own migration files. Like you would when using
+plain SQL.
+
+Add the fields and edges for your new model in the generated file under `user/ent/schema/user.go`.
+
+Now, run the following command:
+
+```shell
+$ go run entgo.io/ent/cmd/ent@latest generate ./user/ent/schema
+```
+
+This generates the ent client files. Run this command again whenever you change the schemas.
+
+## Integrating ent with an Encore database
+
+Encore automates database provisioning, and automatically runs migrations in all environments.
+
+To integrate ent with Encore, we need to do three things:
+
+1. Create the Encore database
+2. Set up the ent client to use that database.
+3. Generate migration files for the ent schema, using Atlas.
+
+### Create the Encore database
+
+Create the database using [`sqldb.NewDatabase`](/docs/go/primitives/databases) in `user/user.go`:
+
+```
+-- user/user.go --
+package user
+
+import "encore.dev/storage/sqldb"
+
+var userDB = sqldb.NewDatabase("user", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+```
+
+Now, create the `migrations` directory, and leave it empty for now:
+
+```shell
+$ mkdir user/migrations
+```
+
+### Connect ent to the database
+
+Next, extend the user service with a [Service Struct](/docs/go/primitives/service-structs) that
+creates an ent client connected to the database.
+
+Replace the contents of the `user/user.go` file with:
+
+```
+-- user/user.go --
+package user
+
+import (
+ "encore.dev/storage/sqldb"
+ "entgo.io/ent/dialect"
+ entsql "entgo.io/ent/dialect/sql"
+
+ "encore.app/user/ent"
+)
+
+var userDB = sqldb.NewDatabase("user", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+//encore:service
+type Service struct{
+ ent *ent.Client
+}
+
+func initService() (*Service, error) {
+ driver := entsql.OpenDB(dialect.Postgres, userDB.Stdlib())
+ entClient := ent.NewClient(ent.Driver(driver))
+ return &Service{ent: entClient}, nil
+}
+```
+
+Now ent is fully wired up to the Encore database, and can be used from the service struct in any API endpoint.
+
+## Using Atlas for database migrations
+
+Finally, we'll set up Atlas to generate database migrations for the ent schema.
+
+First, make sure you [have Atlas installed](https://atlasgo.io/getting-started).
+
+Then, create the file `user/atlas.hcl` containing the following:
+
+```
+-- user/atlas.hcl --
+env "local" {
+ src = "ent://ent/schema"
+
+ migration {
+ dir = "file://migrations"
+ format = golang-migrate
+ }
+
+ format {
+ migrate {
+ diff = "{{ sql . \" \" }}"
+ }
+ }
+}
+```
+
+This tells Atlas to generate migrations for the ent schema, and to output them to the `migrations` directory.
+
+Atlas works by comparing the desired ent schema with the current database schema, and generating a migration
+to bring the database schema in line with the ent schema. This relies on a so-called "shadow database",
+which is an empty database that Atlas uses to compare the ent schema against.
+
+Fortunately for us, Encore has built-in support for shadow databases.
+
+Create the file `user/scripts/generate-migration` containing the following:
+
+```
+-- user/scripts/generate-migration --
+#!/bin/bash
+set -eu
+DB_NAME=user
+MIGRATION_NAME=${1:-}
+
+# Reset the shadow database
+encore db reset --shadow $DB_NAME
+
+# ent executes Go code without initializing Encore when generating migrations,
+# so configure the Encore runtime to be aware that this is expected.
+export ENCORERUNTIME_NOPANIC=1
+
+# Generate the migration
+atlas migrate diff $MIGRATION_NAME --env local --dev-url "$(encore db conn-uri --shadow $DB_NAME)&search_path=public"
+```
+
+Finally, make the script executable, and generate our first migration:
+
+```shell
+$ chmod +x user/scripts/generate-migration
+$ cd user && ./scripts/generate-migration init
+```
+
+You should see a new migration file being added to the `user/migrations` directory,
+containing the schema changes to create the ent models.
+
+You can now run the service with `encore run`, and everything should be ready to go!
diff --git a/docs/go/how-to/firebase-auth.md b/docs/go/how-to/firebase-auth.md
new file mode 100644
index 0000000000..69c977e459
--- /dev/null
+++ b/docs/go/how-to/firebase-auth.md
@@ -0,0 +1,163 @@
+---
+seotitle: How to use Firebase Auth for your backend application
+seodesc: Learn how to use Firebase Auth for user authentication in your backend application. In this guide we show you how to integrate your Go backend with Firebase Auth.
+title: Use Firebase Auth with your app
+lang: go
+---
+
+Encore's [authentication support](/docs/go/develop/auth) provides a simple yet powerful
+way of dealing with various authentication scenarios.
+
+Firebase Authentication
+{" "}is a common solution for quickly setting up a user store and simplifying social logins.
+
+Encore makes it really easy to integrate with Firebase Authentication on the backend.
+
+For all the code and instructions of how to clone and run this example locally, see the [Firebase Auth Example](https://github.com/encoredev/examples/tree/main/firebase-auth) in our examples repo.
+
+## Set up auth handler
+
+First, install two modules:
+
+```shell
+$ go get firebase.google.com/go/v4 go4.org/syncutil
+```
+
+Next it's time to define your [authentication handler](/docs/go/develop/auth).
+It can live in whatever service you'd like, but it's usually easiest
+to create a designated `user` service.
+
+Create the `user/user.go` file and add the following skeleton code:
+
+```go
+package user
+
+import (
+ "context"
+ "strings"
+
+ "encore.dev/beta/auth"
+ firebase "firebase.google.com/go/v4"
+ fbauth "firebase.google.com/go/v4/auth"
+ "go4.org/syncutil"
+ "google.golang.org/api/option"
+)
+
+// Data represents the user's data stored in Firebase Auth.
+type Data struct {
+ // Email is the user's email.
+ Email string
+ // Name is the user's name.
+ Name string
+ // Picture is the user's picture URL.
+ Picture string
+}
+
+// ValidateToken validates an auth token against Firebase Auth.
+//encore:authhandler
+func ValidateToken(ctx context.Context, token string) (auth.UID, *Data, error) {
+ panic("Not Yet Implemented")
+}
+```
+
+## Initialize Firebase SDK
+
+Next, let's set up the Firebase Auth client. We'll use
+ `syncutil.Once`
+to do it lazily the first time we need it.
+
+Add to the bottom of our file:
+
+```go
+var (
+ fbAuth *fbauth.Client
+ setupOnce syncutil.Once
+)
+
+// setupFB ensures Firebase Auth is setup.
+func setupFB() error {
+ return setupOnce.Do(func() error {
+ opt := option.WithCredentialsJSON([]byte(secrets.FirebasePrivateKey))
+ app, err := firebase.NewApp(context.Background(), nil, opt)
+ if err == nil {
+ fbAuth, err = app.Auth(context.Background())
+ }
+ return err
+ })
+}
+
+var secrets struct {
+ // FirebasePrivateKey is the JSON credentials for calling Firebase.
+ FirebasePrivateKey string
+}
+```
+
+## Validate token against Firebase
+
+Now that we have the code to initialize Firebase Auth, we can use it from our `ValidateToken` auth handler.
+Update the function to look like the following:
+
+```go
+func ValidateToken(ctx context.Context, token string) (auth.UID, *Data, error) {
+ if err := setupFB(); err != nil {
+ return "", nil, err
+ }
+ tok, err := fbAuth.VerifyIDToken(ctx, token)
+ if err != nil {
+ return "", nil, err
+ }
+
+ email, _ := tok.Claims["email"].(string)
+ name, _ := tok.Claims["name"].(string)
+ picture, _ := tok.Claims["picture"].(string)
+ uid := auth.UID(tok.UID)
+
+ usr := &Data{
+ Email: email,
+ Name: name,
+ Picture: picture,
+ }
+ return uid, usr, nil
+}
+```
+
+Great! We're done with the code. Now we just need to set up the secret.
+
+## Set Firebase secret credentials
+
+If you haven't already, set up a Firebase project.
+
+Then, go to **Project settings** and navigate to **Service accounts**.
+Select `Go` as the language of choice and click `Generate new private key`.
+Download the generated key and take note where it is stored.
+
+Next, store the private key as your firebase secret.
+From your terminal (inside your Encore app directory), run:
+
+```shell
+$ encore secret set --type prod FirebasePrivateKey < /path/to/firebase-private-key.json
+Successfully updated production secret FirebasePrivateKey
+```
+
+Now you should do the same for the development secret. The most secure way is to
+set up a different Firebase project and use that for development.
+
+Depending on your security requirements you could also use the same Firebase project,
+but we recommend generating a new private key for development in that case.
+
+Once you have a private key for development, set it similarly to before:
+
+```shell
+$ encore secret set --type dev,local,pr FirebasePrivateKey < /path/to/firebase-private-key.json
+Successfully updated development secret FirebasePrivateKey
+```
+
+That's it! You can now call your Encore application and pass in Firebase tokens.
+Encore will run your auth handler and validate the token against Firebase Auth.
+
+## Frontend
+
+Firebase offers a [npm package](https://www.npmjs.com/package/firebase) for your web frontend which makes it really simple to create
+a login/signup flow inside your web app as well as getting the token required to communicate with your Encore backend.
+
+For a fully working backend + frontend example see the [Firebase Auth Example](https://github.com/encoredev/examples/tree/main/firebase-auth) in our examples repo.
diff --git a/docs/go/how-to/grpc-connect.md b/docs/go/how-to/grpc-connect.md
new file mode 100644
index 0000000000..89446eefd2
--- /dev/null
+++ b/docs/go/how-to/grpc-connect.md
@@ -0,0 +1,199 @@
+---
+seotitle: Use Connect for gRPC/protobuf-based APIs with Encore
+seodesc: See how you can use the Connect protocol for gRPC communication with Encore services
+title: Use Connect for incoming gRPC requests
+lang: go
+---
+
+The [Connect protocol](https://connectrpc.com/) is an HTTP/2-based protocol for RPC communication.
+It's conceptually similar to gRPC, but with better support for using from browsers and JavaScript clients.
+
+This guide shows how to use Encore for setting up a Connect service for external clients to use:
+
+1. First, we'll define a simple gRPC service using Protobuf and Connect.
+2. Then, we'll implement the service in Go, using [connect-go](https://connectrpc.com/docs/go/getting-started).
+3. Then, we'll mount the Connect service into Encore with a raw endpoint.
+4. Finally, we'll call the Connect service from cURL using its JSON mapping.
+
+## Define a Connect service
+
+We'll largely follow the connect-go [getting started guide](https://connectrpc.com/docs/go/getting-started)
+with some small tweaks.
+
+Start by installing the necessary tools:
+
+```shell
+$ go install github.com/bufbuild/buf/cmd/buf@latest
+$ go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
+$ go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
+$ go install connectrpc.com/connect/cmd/protoc-gen-connect-go@latest
+```
+
+Next, inside your Encore application ([create one if you haven't already](/docs/go/quick-start))
+create a new file at `greet/v1/greet.proto` with the following contents:
+
+```
+-- greet/v1/greet.proto --
+syntax = "proto3";
+
+package greet.v1;
+
+option go_package = "encore.app/gen/greet/v1;greetv1";
+
+message GreetRequest {
+ string name = 1;
+}
+
+message GreetResponse {
+ string greeting = 1;
+}
+
+service GreetService {
+ rpc Greet(GreetRequest) returns (GreetResponse) {}
+}
+```
+
+Next, add a `buf.gen.yaml` in the repository root, containing:
+
+```
+-- buf.gen.yaml --
+version: v2
+plugins:
+ - local: protoc-gen-go
+ out: gen
+ opt: paths=source_relative
+ - local: protoc-gen-connect-go
+ out: gen
+ opt: paths=source_relative
+```
+
+Now it's time to generate the connect-go service code. Run:
+
+```shell
+$ buf lint
+$ buf generate
+```
+
+If all went well, you should see a new `gen` directory in the repository root containing some generated Go code:
+
+```
+gen
+└── greet
+ └── v1
+ ├── greet.pb.go
+ └── greetv1connect
+ └── greet.connect.go
+```
+
+## Implement the service
+
+Now that we have the service definition, we can implement the Connect service in Go.
+
+Add the file `greet/greet.go` with the following contents:
+
+```
+-- greet/greet.go --
+package greet
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "connectrpc.com/connect"
+
+ greetv1 "encore.app/gen/greet/v1" // generated by protoc-gen-go
+)
+
+type GreetServer struct{}
+
+func (s *GreetServer) Greet(
+ ctx context.Context,
+ req *connect.Request[greetv1.GreetRequest],
+) (*connect.Response[greetv1.GreetResponse], error) {
+ log.Println("Request headers: ", req.Header())
+ res := connect.NewResponse(&greetv1.GreetResponse{
+ Greeting: fmt.Sprintf("Hello, %s!", req.Msg.Name),
+ })
+ res.Header().Set("Greet-Version", "v1")
+ return res, nil
+}
+```
+
+
+
+The sample code is straight from the [getting started guide](https://connectrpc.com/docs/go/getting-started);
+there are no Encore specific changes required here.
+
+
+
+## Mount the service in Encore
+
+Now we'll create an Encore [service struct](/docs/go/primitives/service-structs)
+that initializes the Connect service, and a [raw endpoint](/docs/go/primitives/raw-endpoints)
+that forwards incoming requests to the Connect service.
+
+Add the file `greet/service.go` with the following contents:
+
+```
+-- greet/service.go --
+package greet
+
+import (
+ "net/http"
+
+ "encore.app/gen/greet/v1/greetv1connect"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+)
+
+//encore:service
+type Service struct {
+ routes http.Handler
+}
+
+//encore:api public raw path=/greet.v1.GreetService/*endpoint
+func (s *Service) GreetService(w http.ResponseWriter, req *http.Request) {
+ s.routes.ServeHTTP(w, req)
+}
+
+func initService() (*Service, error) {
+ greeter := &GreetServer{}
+ mux := http.NewServeMux()
+ path, handler := greetv1connect.NewGreetServiceHandler(greeter)
+ mux.Handle(path, handler)
+ return &Service{routes: mux}, nil
+}
+```
+
+That's it! We're ready to run the service and check that everything works.
+
+## Run the service
+
+Run the service with `encore run`:
+
+```shell
+$ encore run
+```
+
+Once it starts up, open a separate terminal and use `grpcurl` to call the service:
+
+```shell
+# Install grpcurl if you haven't already
+$ go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
+
+# Call the service with grpcurl
+grpcurl \
+ -protoset <(buf build -o -) -plaintext \
+ -d '{"name": "Jane"}' \
+ localhost:4000 greet.v1.GreetService/Greet
+{"greeting": "Hello, Jane!"}
+
+# Or call the service with curl
+$ curl -H "Content-Type: application/json" -d '{"name": "Jane"}' http://localhost:4000/greet.v1.GreetService/Greet
+{"greeting":"Hello, Jane!"} # Expected response
+```
+
+If you see `{"greeting":"Hello, Jane!"}`, everything is working!
+
+What's more, Encore automatically traces the incoming requests, and adds request logging and captures request metrics.
diff --git a/docs/go/how-to/http-requests.md b/docs/go/how-to/http-requests.md
new file mode 100644
index 0000000000..341f881e7e
--- /dev/null
+++ b/docs/go/how-to/http-requests.md
@@ -0,0 +1,59 @@
+---
+seotitle: How to receive regular HTTP requests in your backend application
+seodesc: Learn how to receive regular HTTP requests in your Go based backend application using Encore.
+title: Receive regular HTTP requests
+subtitle: Dropping down in abstraction level
+lang: go
+---
+
+Encore makes it easy to define APIs and expose them, but it works best when you are in charge of the API schema.
+
+Sometimes you need more control over the underlying HTTP request, such as to accept incoming webhooks from other
+services, or to use WebSockets to stream data to/from the client.
+
+For these use cases Encore lets you define **raw endpoints**. Raw endpoints operate at a lower abstraction level,
+giving you access to the underlying HTTP request.
+
+## Defining raw endpoints
+
+To define a raw endpoint, change the `//encore:api` annotation and function signature like so:
+
+```go
+package service
+
+import "net/http"
+
+// Webhook receives incoming webhooks from Some Service That Sends Webhooks.
+//encore:api public raw method=POST path=/webhook
+func Webhook(w http.ResponseWriter, req *http.Request) {
+ // ... operate on the raw HTTP request ...
+}
+```
+
+If you're an experienced Go developer, this is just a regular Go HTTP handler.
+
+See the net/http documentation
+for more information on how Go HTTP handlers work.
+
+## Reading path parameters
+
+Sometimes webhooks have information in the path that you may be interested in retrieving or validating.
+
+To do so, define the path with a path parameter, and then use [`encore.CurrentRequest`](https://pkg.go.dev/encore.dev#CurrentRequest)
+to access the path parameters. For example:
+
+```go
+package service
+
+import (
+ "net/http"
+
+ "encore.dev"
+ )
+
+//encore:api public raw method=POST path=/webhook/:id
+func Webhook(w http.ResponseWriter, req *http.Request) {
+ id := encore.CurrentRequest().PathParams.Get("id")
+ // ... Do something with id
+ }
+```
diff --git a/docs/go/how-to/integrate-frontend.mdx b/docs/go/how-to/integrate-frontend.mdx
new file mode 100644
index 0000000000..4a9e2f4345
--- /dev/null
+++ b/docs/go/how-to/integrate-frontend.mdx
@@ -0,0 +1,271 @@
+---
+seotitle: Integrate your backend application with a frontend
+seodesc: Learn how to integrate your Go backend application with a frontend, using Encore's built-in frontend client generation feature.
+title: Integrate with a web frontend
+subtitle: Keep using your favorite frontend hosting provider
+lang: go
+---
+Encore is not opinionated about where you host your frontend, pick the platform that suits your situation best.
+
+If your frontend and backend use different domains, often the case when using PR preview environments for your frontend, you may need to [configure CORS](#handling-cors).
+
+Take a look at our [React starter template](https://encore.dev/templates/react) for an example of deploying a frontend to [Vercel](https://vercel.com/) or the [Meeting Notes tutorial](https://encore.dev/docs/go/tutorials/meeting-notes) deployed to [GitHub Pages](https://pages.github.com/).
+
+## Generating a request client
+Encore is able to generate frontend request clients (TypeScript or JavaScript). This lets you to keep the request/response types in sync without manual work and assists you in calling the APIs. Generate a client by running:
+
+```bash
+$ encore gen client --output=./src/client.ts --env=
+```
+
+Adding this as a script to your `package.json` is often a good idea to be able to run it whenever a change is made to your Encore API:
+
+```json
+{
+...
+"scripts": {
+ ...
+ "generate-client:staging": "encore gen client --output=./src/client.ts --env=staging",
+ "generate-client:local": "encore gen client --output=./src/client.ts --env=local"
+ }
+}
+```
+
+After that you are ready to use the request client in your code. Here is an example from the [Meeting Notes tutorial](https://encore.dev/docs/tutorials/meeting-notes) for calling the `GetNote` endpoint on the `note` service in order to retrieve a specific meeting note (which has the properties `id`, `cover_url` & `text`):
+
+```ts
+import Client, { Environment, Local } from "src/client.ts";
+
+// Making request to locally running backend...
+const client = new Client(Local);
+// or to a specific deployed environment
+const client = new Client(Environment("staging"));
+
+// Calling APIs as typesafe functions 🌟
+const response = await client.note.GetNote("note-uuid");
+console.log(response.id);
+console.log(response.cover_url);
+console.log(response.text);
+```
+
+See more in the [client generation docs](/docs/develop/client-generation).
+
+### Asynchronous state management
+
+When building something a bit more complex, you will likely need to deal with caching, refetching, and data going stale.
+[TanStack Query](https://tanstack.com/query/latest) is a popular library that was built to solve exactly these problems and works well with the Encore request client.
+
+Here is a simple example of using an Encore request client together with TanStack Query:
+
+```ts
+import {
+ useQuery,
+ useMutation,
+ useQueryClient,
+ QueryClient,
+ QueryClientProvider,
+} from '@tanstack/react-query'
+import Client, { todo } from '../encore-client'
+
+// Create a Encore client
+const encoreClient = new Client(window.location.origin);
+
+// Create a react-query client
+const queryClient = new QueryClient()
+
+function App() {
+ return (
+ // Provide the client to your App
+
+
+
+ )
+}
+
+function Todos() {
+ // Access the client
+ const queryClient = useQueryClient()
+
+ // Queries
+ const query = useQuery({
+ queryKey: ['todos'],
+ queryFn: () => encoreClient.todo.List()
+ })
+
+ // Mutations
+ const mutation = useMutation({
+ mutationFn: (params: todo.AddParams) => encoreClient.todo.Add(params),
+ onSuccess: () => {
+ // Invalidate and refetch
+ queryClient.invalidateQueries({ queryKey: ['todos'] })
+ },
+ })
+
+ return (
+
+
+ {query.data?.map((todo) => (
+ {todo.title}
+ ))}
+
+
+
{
+ mutation.mutate({
+ id: Date.now(),
+ title: 'Do Laundry',
+ })
+ }}
+ >
+ Add Todo
+
+
+ )
+}
+
+render( , document.getElementById('root'))
+```
+
+This example assumes that we have a `todo` service with a `List` and `Add` endpoint. When adding the new todo,
+TanStack Query will automatically invalidate the `todos` query and refetch it.
+
+For a real-world example, take a look at the [Uptime Monitoring](https://github.com/encoredev/examples/tree/main/uptime) app which also makes use of
+TanStack Query's `refetchInterval` option for polling the backend.
+
+### Testing
+When unit testing a component that interacts with your Encore API you can mock methods on the request client to
+return a value suitable for the test. This makes your test URL agnostic because you are not intercepting
+specific requests on the fetch layer. You also get type errors in your tests if the request client gets updated.
+
+Here is an example from the [Uptime Monitoring Starter](https://github.com/encoredev/examples/tree/main/uptime) where we are mocking a GET request method and spying on a POST request method:
+
+```ts
+import { render, waitForElementToBeRemoved } from "@testing-library/react";
+import App from "./App";
+import { site } from "./client";
+import { userEvent } from "@testing-library/user-event";
+
+describe("App", () => {
+ beforeEach(() => {
+ // Return mocked data from the List (GET) endpoint
+ jest
+ .spyOn(site.ServiceClient.prototype, "List")
+ .mockReturnValue(Promise.resolve({
+ sites: [{
+ id: 1,
+ url: "test.dev"
+ }]
+ }));
+
+ // Spy on the Add (POST) endpoint
+ jest.spyOn(site.ServiceClient.prototype, "Add");
+ });
+
+ it("render sites", async () => {
+ render( );
+ await waitForElementToBeRemoved(() => screen.queryByText("Loading..."));
+
+ // Verify that the List endpoint has been called
+ expect(site.ServiceClient.prototype.List).toBeCalledTimes(1);
+
+ // Verify that the sites are rendered with our mocked data
+ screen.getAllByText("test.dev");
+ });
+
+ it("add site", async () => {
+ render( );
+ await waitForElementToBeRemoved(() => screen.queryByText("Loading..."));
+
+ // Interact with the page and add 'another.com'
+ await userEvent.click(screen.getByText("Add website"));
+ await userEvent.type(
+ screen.getByPlaceholderText("google.com"),
+ "another.com",
+ );
+ await userEvent.click(screen.getByText("Save"));
+
+ // Verify that the Add endpoint has been called with the correct parameters
+ expect(site.ServiceClient.prototype.Add).toHaveBeenCalledWith({
+ url: "another.com",
+ });
+ });
+})
+```
+
+
+
+ In the example above we need to mock the `List` method on `site.ServiceClient.prototype` because the request client has not
+ yet been initialized when we're creating the mock. If you have access to the instance of the request client in your test
+ (which could be the case if you are passing the client around in your components) you can instead do `jest.spyOn(client.site, "List")`
+ and `expect(client.site.List).toHaveBeenCalled()` which would give you the same result.
+
+
+
+More examples of tests can be found in the [Uptime Monitoring Starter repo](https://github.com/encoredev/examples/tree/main/uptime).
+
+## Monorepo or Multi repo
+Encore is not opinionated about where your frontend lives, pick the approach that fits your application best.
+
+If you use a monorepo then it is often a good idea to place your backend and frontend in separate folders. There are two approaches to moving your Encore backend to a subfolder:
+
+1. Place your microservices together with the `encore.app` file in a subfolder. When moving `encore.app` to a subfolder you will need to configure the "Root Directory" in app settings in the [Encore Cloud dashboard](https://app.encore.cloud).
+2. Place your microservices in a subfolder and keep the `encore.app` in the repo root directory. No configuration change is needed, but you will need to update the import paths if your services are calling each other.
+
+## REST vs. GraphQL
+Encore allows for building backends using both REST and GraphQL, you should pick the approach that suits your use case best.
+
+Take a look at the [GraphQL tutorial](/docs/go/tutorials/graphql) for an example of building a GraphQL backend with Encore.
+
+## Hosting a frontend on Encore for development
+Encore is primarily designed for backend development and does not (at the moment) support building or testing frontends in the deploy pipeline. For production use, we recommend that you deploy your frontend using Vercel, Netlify, or a similar service.
+
+For development purposes, you can create a `raw` endpoint that serves static frontend assets. It would look something like the example below (taken from the [Uptime Monitoring tutorial](https://encore.dev/docs/go/tutorials/uptime)), but keep in mind that you need to have the compiled frontend assets under version control (`dist` folder in the example below).
+
+```go
+package frontend
+
+import (
+ "embed"
+ "io/fs"
+ "net/http"
+)
+
+var (
+ //go:embed dist
+ dist embed.FS
+
+ assets, _ = fs.Sub(dist, "dist")
+ handler = http.StripPrefix("/frontend/", http.FileServer(http.FS(assets)))
+)
+
+ //encore:api public raw path=/frontend/*path
+ func Serve(w http.ResponseWriter, req *http.Request) {
+ handler.ServeHTTP(w, req)
+ }
+```
+
+## Handling CORS
+If you are running into CORS issues when calling your Encore API from your frontend you may need to specify which origins are allowed to access your API (via browsers). Do this by specifying the `global_cors` key in the `encore.app` file, which has the following structure:
+
+```json
+global_cors: {
+ // allow_origins_without_credentials specifies the allowed origins for requests
+ // that don't include credentials. If nil it defaults to allowing all domains
+ // (equivalent to ["*"]).
+ "allow_origins_without_credentials": [
+ ""
+ ],
+
+ // allow_origins_with_credentials specifies the allowed origins for requests
+ // that include credentials. If a request is made from an Origin in this list
+ // Encore responds with Access-Control-Allow-Origin: .
+ //
+ // The URLs in this list may include wildcards (e.g. "https://*.example.com"
+ // or "https://*-myapp.example.com").
+ "allow_origins_with_credentials": [
+ ""
+ ]
+}
+```
+
+See more in the [CORS docs](/docs/go/develop/cors).
diff --git a/docs/go/how-to/logto-auth.md b/docs/go/how-to/logto-auth.md
new file mode 100644
index 0000000000..8d155e25ac
--- /dev/null
+++ b/docs/go/how-to/logto-auth.md
@@ -0,0 +1,324 @@
+---
+seotitle: How to use Logto for your backend application
+seodesc: Learn how to use Logto for user authentication in your backend application. In this guide we show you how to integrate your Go backend with Logto.
+title: Use Logto with your app
+lang: go
+---
+
+[Logto](https://logto.io) is a modern Auth0 alternative that helps you build the sign-in experience and user identity within minutes. It's particularly well-suited for protecting API services built with Encore.
+
+This guide will show you how to integrate Logto with your Encore application to add authentication and authorization capabilities. You can find the complete [Logto example](https://github.com/encoredev/examples/tree/main/logto-react-sdk) in our examples repo.
+
+## Logto settings
+
+Before we begin integrating with Encore, you'll need to set up a few things in Logto:
+
+1. Create an account at [Logto Cloud](https://cloud.logto.io) if you don't have one yet.
+
+2. Create an API Resource in Logto Console, this represents your Encore API service
+ - Go to "API Resources" in Logto Console and create a new API
+ - Set a name and API identifier (e.g., `https://api.encoreapp.com`)
+ - Note down the API identifier on the API resource details page as we'll need it later
+
+
+
+3. Create an application for your frontend application
+ - Go to "Applications" in Logto Console
+ - Create a new application according to your frontend framework (We use React as an example, but you can create any Single-Page Application (SPA) or native app)
+ - (Optional, we'll cover this later) Integrate Logto with your frontend application according to the guide in the Logto Console.
+ - Note down the application ID and issuer URL on the Application details page as we'll need them later
+
+
+
+## Setup the auth handler
+
+Now let's implement the authentication in your Encore application. We'll use Encore's built-in [auth handler](/docs/go/develop/auth) to validate Logto's JWT tokens.
+
+Add these two modules in your Encore application:
+
+```shell
+$ go get github.com/golang-jwt/jwt/v5
+$ go get github.com/MicahParks/keyfunc/v3
+```
+
+Create `auth/auth.go` and add the following code:
+
+```go
+package auth
+
+import (
+ "context"
+ "time"
+
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "encore.dev/config"
+ "github.com/MicahParks/keyfunc/v3"
+ "github.com/golang-jwt/jwt/v5"
+)
+
+// Configuration variables for authentication
+type LogtoAuthConfig struct {
+ // The issuer URL
+ Issuer config.String
+ // URL to fetch JSON Web Key Set (JWKS)
+ JwksUri config.String
+ // Expected audience for the JWT
+ ApiResourceIndicator config.String
+ // Expected client ID in the token claims
+ ClientId config.String
+}
+
+var authConfig *LogtoAuthConfig = config.Load[*LogtoAuthConfig]()
+
+// RequiredClaims defines the expected structure of JWT claims
+// Extends the standard JWT claims with a custom ClientID field
+type RequiredClaims struct {
+ ClientID string `json:"client_id"`
+ jwt.RegisteredClaims
+}
+
+// AuthHandler validates JWT tokens and extracts the user ID
+// Implements Encore's authentication handler interface
+//
+//encore:authhandler
+func AuthHandler(ctx context.Context, token string) (auth.UID, error) {
+ // Fetch and parse the JWKS (JSON Web Key Set) from the identity provider
+ jwks, err := keyfunc.NewDefaultCtx(ctx, []string{authConfig.JwksUri()})
+ if err != nil {
+ return "", &errs.Error{
+ Code: errs.Internal,
+ Message: "failed to fetch JWKS",
+ }
+ }
+
+ // Parse and validate the JWT token with required claims and validation options
+ parsedToken, err := jwt.ParseWithClaims(
+ token,
+ &RequiredClaims{},
+ jwks.Keyfunc,
+ // Expect the token to be intended for this API resource
+ jwt.WithAudience(authConfig.ApiResourceIndicator()),
+ // Expect the token to be issued by this issuer
+ jwt.WithIssuer(authConfig.Issuer()),
+ // Allow some leeway for clock skew
+ jwt.WithLeeway(time.Minute*10),
+ )
+
+ // Check if there were any errors during token parsing
+ if err != nil {
+ return "", &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+ }
+
+ // Verify that the client ID in the token matches the expected client ID
+ if parsedToken.Claims.(*RequiredClaims).ClientID != authConfig.ClientId() {
+ return "", &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+ }
+
+ // Extract the user ID (subject) from the token claims
+ userId, err := parsedToken.Claims.GetSubject()
+ if err != nil {
+ return "", &errs.Error{
+ Code: errs.Unauthenticated,
+ Message: "invalid token",
+ }
+ }
+
+ // Return the user ID as an Encore auth.UID
+ return auth.UID(userId), nil
+}
+```
+
+Create a [configuration file](https://encore.dev/docs/go/develop/config) in the auth service and name it `auth-config.cue`. Add the following:
+
+```cue
+Issuer: ""
+JwksUri: "/jwks"
+ApiResourceIndicator: ""
+ClientId: ""
+```
+
+Replace the values with the ones you noted down from your Logto settings:
+- ``: The issuer URL from your Logto application endpoints (e.g., `https://your-tenant.logto.app`)
+- ``: The API identifier you set when creating the API resource (e.g., `https://api.encoreapp.com`)
+- ``: The application ID from your Logto application details page
+
+For example, your `auth-config.cue` might look like:
+
+```cue
+Issuer: "https://your-tenant.logto.app"
+JwksUri: "https://your-tenant.logto.app/jwks"
+ApiResourceIndicator: "https://api.encoreapp.com"
+ClientId: "2gadf3mp0zotlq8j1k5x"
+```
+
+And then, you can use this auth handler to protect your API endpoints:
+
+```go
+package api
+
+import (
+ "context"
+ "fmt"
+
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+)
+
+//encore:api auth path=/api/hello
+func Api(ctx context.Context) (*Response, error) {
+ userId, hasUserId := auth.UserID()
+
+ if !hasUserId {
+ return nil, &errs.Error{
+ Code: errs.Internal,
+ Message: "User ID not found",
+ }
+ }
+
+ msg := fmt.Sprintf("Hello, %s!", userId)
+
+ return &Response{Message: msg}, nil
+}
+
+type Response struct {
+ Message string
+}
+
+```
+
+## Frontend
+
+We've completed our work in the Encore API service. Now we need to integrate Logto with our frontend application.
+
+You can choose the framework you are using in the [Logto Quick start](https://docs.logto.io/quick-starts) page to integrate Logto with your frontend application. In this guide we use React as an example.
+
+Check out the [Add authentication to your React application](https://docs.logto.io/quick-starts/react) guide to learn how to integrate Logto with your React application. In this example, you only need to complete up to the Integration section. After that, we'll demonstrate how the frontend application can obtain an access token from Logto to access the Encore API.
+
+First, update your `LogtoConfig` by adding the API resource used in your Encore app to the `resources` field. This tells Logto that we will be requesting access tokens for this API resource (Encore API).
+
+```ts
+import { LogtoConfig } from '@logto/react';
+
+const config: LogtoConfig = {
+ // ...other configs
+ resources: [''],
+};
+```
+
+After updating the `LogtoConfig`, if a user is already signed in, they need to sign out and sign in again for the new `LogtoConfig` settings to take effect.
+
+Once the user is logged in, you can use the `getAccessToken` method provided by the Logto React SDK to obtain an access token for accessing specific API resources. For example, to access the Encore API, we use `https://api.encoreapp.com` as the API resource identifier.
+
+Then, add this access token to the request headers as the `Authorization` field in subsequent requests.
+
+```ts
+const { getAccessToken } = useLogto();
+const accessToken = await getAccessToken('');
+
+// Add this access token to the request headers as the 'Authorization' field in subsequent requests
+fetch('/hello', {
+ headers: {
+ Authorization: `Bearer ${accessToken}`,
+ },
+});
+```
+
+Here's the key frontend code:
+
+```tsx
+-- config/logto.tsx --
+import { LogtoConfig } from '@logto/react'
+
+export const config: LogtoConfig = {
+ endpoint: '',
+ appId: '',
+ resources: [''],
+}
+
+export const appConfig = {
+ apiResourceIndicator: '',
+ signInRedirectUri: '',
+ signOutRedirectUri: '',
+}
+
+export const encoreApiEndpoint = ''
+-- pages/ProtectedResource.tsx --
+import { useLogto } from "@logto/react";
+import { useState } from "react";
+import { Navigate } from "react-router-dom";
+import { appConfig, encoreApiEndpoint } from "../config/logto";
+
+export function ProtectedResource() {
+ const { isAuthenticated, getAccessToken } = useLogto();
+ const [message, setMessage] = useState("");
+ const [isLoading, setIsLoading] = useState(false);
+ const [error, setError] = useState("");
+
+ const fetchProtectedResource = async () => {
+ setIsLoading(true);
+ setError("");
+ try {
+ const accessToken = await getAccessToken(appConfig.apiResourceIndicator);
+ const response = await fetch(`${encoreApiEndpoint}/api/hello`, {
+ headers: {
+ Authorization: `Bearer ${accessToken}`,
+ },
+ });
+ if (!response.ok) {
+ throw new Error(`HTTP error! status: ${response.status}`);
+ }
+ const data = await response.json();
+ setMessage(JSON.stringify(data));
+ } catch (error) {
+ console.error("Error fetching protected resource:", error);
+ setError("Failed to fetch protected resource. Please try again.");
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ if (!isAuthenticated) {
+ return ;
+ }
+
+ return (
+
+
Protected Resource
+
+ {message && !error && (
+
+
Response from Protected API
+
{message}
+
+ )}
+
+
+ {isLoading ? "Loading..." : "Fetch protected resource"}
+
+
+ {error &&
{error}
}
+
+ );
+}
+```
+
+That's it, you've successfully integrated Logto with your Encore application.
+
+You can find the complete example code [here](https://github.com/encoredev/examples/tree/main/logto-react-sdk).
+
+## Explore more
+
+If you want to use more Logto features, you can refer to the following links for more information:
+
+- Combine Logto's [Custom token claims](https://docs.logto.io/developers/custom-token-claims) to set [custom user data](/docs/go/develop/auth#with-custom-user-data) in the auth handler
+- Use [Logto RBAC features](https://docs.logto.io/authorization/role-based-access-control) to add authorization support to your application. The React integration tutorial also demonstrates how to add `scope` information to your Access token (note that you need to sign in again after updating Logto config)
diff --git a/docs/go/how-to/pubsub-outbox.md b/docs/go/how-to/pubsub-outbox.md
new file mode 100644
index 0000000000..e2a72f38c9
--- /dev/null
+++ b/docs/go/how-to/pubsub-outbox.md
@@ -0,0 +1,124 @@
+---
+seotitle: Using a transactional Pub/Sub outbox
+seodesc: Learn how you can use a transactional outbox with Pub/Sub to guarantee consistency between your database and Pub/Sub subscribers
+title: Transactional Pub/Sub outbox
+subtitle: Guarantee consistency between your database and Pub/Sub subscribers
+lang: go
+---
+
+One of the hardest parts of building an event-driven application is ensuring consistency between services.
+A common pattern is for each service to have its own database and use Pub/Sub to notify other systems of business events.
+Inevitably this leads to inconsistencies since the Pub/Sub publishing is not transactional with the database writes.
+
+While there are several approaches to solving this, it's important the solution doesn't add too much complexity
+to what is often an already complex architecture. Perhaps the best solution in this regard is the [transactional outbox pattern](https://softwaremill.com/microservices-101/).
+
+Encore provides support for the transactional outbox pattern in the [x.encore.dev/infra/pubsub/outbox](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox) package.
+
+The transactional outbox works by binding a Pub/Sub topic to a database transaction, translating all calls to `topic.Publish`
+into inserting a database row in an `outbox` table. If/when the transaction later commits, the messages are picked up by
+a [Relay](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox#Relay) that polls the `outbox` table and publishes the
+messages to the actual Pub/Sub topic.
+
+## Publishing messages to the outbox
+
+To publish messages to the outbox, a topic must first be bound to the outbox. This is done using
+[Pub/Sub topic references](/docs/go/primitives/pubsub#using-topic-references) which allows you to retain complete
+type safety and the same interface as regular Pub/Sub topics, allowing existing code to continue to work without changes.
+
+
+
+In regular (non-outbox) usage the message id returned by `topic.Publish` is the same as the message id the subscriber
+receives when processing the message. With the outbox, this message id is not available until the transaction commits,
+so `topic.Publish` returns an id referencing the outbox row instead.
+
+
+
+
+The topic binding supports pluggable storage backends, enabling use of the outbox pattern with any
+transactional storage backend. Implementation are provided out-of-the-box for use with Encore's
+`encore.dev/storage/sqldb` package, as well as the standard library `database/sql` and `github.com/jackc/pgx/v5` drivers,
+but it's easy to write your own for other use cases.
+See the [Go package reference](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox#PersistFunc) for more information.
+
+For example, to use a transactional outbox to notify subscribers when a user is created:
+
+```go
+-- outbox.go --
+// Create a SignupsTopic somehow.
+var SignupsTopic = pubsub.NewTopic[*SignupEvent](/* ... */)
+
+// Create a topic ref with publisher permissions.
+ref := pubsub.TopicRef[pubsub.Publisher[*SignupEvent]](SignupsTopic)
+
+// Bind it to the transactional outbox
+import "x.encore.dev/infra/pubsub/outbox"
+var tx *sqldb.Tx // somehow get a transaction
+ref = outbox.Bind(ref, outbox.TxPersister(tx))
+
+// Calls to ref.Publish() will now insert a row in the outbox table.
+
+-- db_migration.sql --
+-- The database used must contain the below database table:
+-- See https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox#SQLDBStore
+CREATE TABLE outbox (
+ id BIGSERIAL PRIMARY KEY,
+ topic TEXT NOT NULL,
+ data JSONB NOT NULL,
+ inserted_at TIMESTAMPTZ NOT NULL
+);
+CREATE INDEX outbox_topic_idx ON outbox (topic, id);
+```
+
+Once the transaction commits any published messages via `ref` above will be stored in the `outbox` table.
+
+## Consuming messages from the outbox
+
+Once committed, the messages are ready to be picked up and published to the actual Pub/Sub topic.
+
+That is done via the [Relay](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox#Relay).
+The relay continuously polls the `outbox` table and publishes any new messages to the actual Pub/Sub topic.
+
+The relay supports pluggable storage backends, enabling use of the outbox pattern with any
+transactional storage backend. An implementation is provided out-of-the-box that uses Encore's built-in
+[SQL database support](https://pkg.go.dev/x.encore.dev/infra/pubsub/outbox#SQLDBStore),
+but it's easy to write your own for other databases.
+
+The topics to poll must be registered with the relay, typically during service initialization. For example:
+
+```go
+-- user/service.go --
+package user
+
+import (
+ "context"
+
+ "encore.dev/pubsub"
+ "encore.dev/storage/sqldb"
+ "x.encore.dev/infra/pubsub/outbox"
+)
+
+type Service struct {
+ signupsRef pubsub.Publisher[*SignupEvent]
+}
+
+// db is the database the outbox table is stored in
+var db = sqldb.NewDatabase(...)
+
+// Create the SignupsTopic somehow.
+var SignupsTopic = pubsub.NewTopic[*SignupEvent](/* ... */)
+
+func initService() (*Service, error) {
+ // Initialize the relay to poll from our database.
+ relay := outbox.NewRelay(outbox.SQLDBStore(db))
+
+ // Register the SignupsTopic to be polled.
+ signupsRef := pubsub.TopicRef[pubsub.Publisher[*SignupEvent]](SignupsTopic)
+ outbox.RegisterTopic(relay, signupsRef)
+
+ // Start polling.
+ go relay.PollForMessage(context.Background(), -1)
+
+ return &Service{signupsRef: signupsRef}, nil
+}
+```
diff --git a/docs/go/how-to/temporal.md b/docs/go/how-to/temporal.md
new file mode 100644
index 0000000000..bc35667889
--- /dev/null
+++ b/docs/go/how-to/temporal.md
@@ -0,0 +1,221 @@
+---
+seotitle: How to use Temporal and Encore
+seodesc: Learn how to use Temporal for reliable workflow execution with Encore.
+title: Use Temporal with Encore
+lang: go
+---
+
+[Temporal](https://temporal.io) is a workflow orchestration system for building highly reliable systems.
+Encore works great with Temporal, and this guide shows you how to integrate Temporal into your Encore application.
+
+## Set up Temporal clusters
+You'll need at least two Temporal clusters: one for local development and one for cloud environments.
+
+We recommend using [Temporalite](https://github.com/temporalio/temporalite) for local development,
+and [Temporal Cloud](https://temporal.io/cloud) for cloud environments.
+
+## Set up Temporal Workflow
+
+Next it's time to create a Temporal Workflow. We'll base this on the Temporal [Hello World](https://learn.temporal.io/getting_started/go/hello_world_in_go/)
+example.
+
+Create a new Encore service named `greeting`:
+
+```go
+-- greeting/greeting.go --
+package greeting
+
+import (
+ "context"
+ "fmt"
+
+ "go.temporal.io/sdk/client"
+ "go.temporal.io/sdk/worker"
+ "encore.dev"
+)
+
+// Use an environment-specific task queue so we can use the same
+// Temporal Cluster for all cloud environments.
+var (
+ envName = encore.Meta().Environment.Name
+ greetingTaskQueue = envName + "-greeting"
+)
+
+//encore:service
+type Service struct {
+ client client.Client
+ worker worker.Worker
+}
+
+func initService() (*Service, error) {
+ c, err := client.Dial(client.Options{})
+ if err != nil {
+ return nil, fmt.Errorf("create temporal client: %v", err)
+ }
+
+ w := worker.New(c, greetingTaskQueue, worker.Options{})
+
+ err = w.Start()
+ if err != nil {
+ c.Close()
+ return nil, fmt.Errorf("start temporal worker: %v", err)
+ }
+ return &Service{client: c, worker: w}, nil
+}
+
+func (s *Service) Shutdown(force context.Context) {
+ s.client.Close()
+ s.worker.Stop()
+}
+```
+
+Next it's time to define some workflows. These need to be in the same service,
+so add a new `workflow` package inside the `greeting` service, containing
+a workflow and activity definition in separate files:
+
+```go
+-- greeting/workflow/workflow.go --
+package workflow
+
+import (
+ "time"
+
+ "go.temporal.io/sdk/workflow"
+)
+
+func Greeting(ctx workflow.Context, name string) (string, error) {
+ options := workflow.ActivityOptions{
+ StartToCloseTimeout: time.Second * 5,
+ }
+
+ ctx = workflow.WithActivityOptions(ctx, options)
+
+ var result string
+ err := workflow.ExecuteActivity(ctx, ComposeGreeting, name).Get(ctx, &result)
+
+ return result, err
+}
+-- greeting/workflow/activity.go --
+package workflow
+
+import (
+ "context"
+ "fmt"
+)
+
+func ComposeGreeting(ctx context.Context, name string) (string, error) {
+ greeting := fmt.Sprintf("Hello %s!", name)
+ return greeting, nil
+}
+```
+
+Then, go back to the `greeting` service and register the workflow and activity:
+
+```go
+-- greeting/greeting.go --
+// Import the package at the top:
+import "encore.app/greeting/workflow"
+
+// Add these lines to `initService`, below the call to `worker.New`:
+w.RegisterWorkflow(workflow.Greeting)
+w.RegisterActivity(workflow.ComposeGreeting)
+```
+
+Now let's create an Encore API that triggers this workflow.
+
+Add a new file `greeting/greet.go`:
+
+```go
+-- greeting/greet.go --
+package greeting
+
+import (
+ "context"
+
+ "encore.app/greeting/workflow"
+ "encore.dev/rlog"
+ "go.temporal.io/sdk/client"
+)
+
+type GreetResponse struct {
+ Greeting string
+}
+
+//encore:api public path=/greet/:name
+func (s *Service) Greet(ctx context.Context, name string) (*GreetResponse, error) {
+ options := client.StartWorkflowOptions{
+ ID: "greeting-workflow",
+ TaskQueue: greetingTaskQueue,
+ }
+ we, err := s.client.ExecuteWorkflow(ctx, options, workflow.Greeting, name)
+ if err != nil {
+ return nil, err
+ }
+ rlog.Info("started workflow", "id", we.GetID(), "run_id", we.GetRunID())
+
+ // Get the results
+ var greeting string
+ err = we.Get(ctx, &greeting)
+ if err != nil {
+ return nil, err
+ }
+ return &GreetResponse{Greeting: greeting}, nil
+}
+```
+
+## Run it locally
+
+Now we're ready to test it out. Start up `temporalite` and your Encore application (in separate terminals):
+
+```bash
+$ temporalite start --namespace default
+$ encore run
+```
+
+Now try calling it, either from the [Local Development Dashboard](/docs/go/observability/dev-dash) or using cURL:
+
+```bash
+$ curl 'http://localhost:4000/greeting/Temporal'
+{"Greeting": "Hello Temporal!"}
+```
+
+If you see this, it works!
+
+## Run in the cloud
+
+To run it in the cloud, you will need to use Temporal Cloud or your own, self-hosted Temporal cluster.
+The easiest way to automatically pick up the correct cluster address is to use Encore's [config functionality](/docs/go/develop/config).
+
+Add two new files:
+```
+-- greeting/config.go --
+package greeting
+
+import "encore.dev/config"
+
+type Config struct {
+ TemporalServer string
+}
+
+var cfg = config.Load[*Config]()
+-- greeting/config.cue --
+package greeting
+
+TemporalServer: [
+ // These act as individual case statements
+ if #Meta.Environment.Cloud == "local" { "localhost:7233" },
+
+ // TODO: configure this to match your own cluster address
+ "my.cluster.address:7233",
+][0] // Return the first value which matches the condition
+```
+
+Finally go back to `greeting/greeting.go` and update the `client.Dial` call to look like:
+
+```go
+-- greeting/greeting.go --
+client.Dial(client.Options{HostPort: cfg.TemporalServer})
+```
+
+With that, Encore will automatically connect to the correct Temporal cluster, using a local cluster
+for local development and your cloud-hosted cluster for everything else.
diff --git a/docs/go/install.md b/docs/go/install.md
new file mode 100644
index 0000000000..b0ae9d652c
--- /dev/null
+++ b/docs/go/install.md
@@ -0,0 +1,47 @@
+---
+seotitle: Install Encore to start building
+seodesc: See how you can install Encore on all platforms, and get started building your next backend application in minutes.
+title: Installation
+subtitle: Install the Encore CLI to get started with local development
+lang: go
+---
+
+If you are new to Encore, we recommend following the [quick start guide](/docs/go/quick-start).
+
+## Install the Encore CLI
+To develop locally with Encore, you first need to install the Encore CLI.
+This is what provisions your local development environment, and runs your Local Development Dashboard complete with logs, tracing, and API documentation.
+
+
+
+
+
+
+To locally run Encore apps with databases, you also need to have [Docker](https://www.docker.com) installed and running.
+
+
+
+### Optional: Add Encore LLM instructions
+
+To help LLM powered tools like Cursor and GitHub Copilot understand how to use Encore, you can add pre-made instructions to your app.
+
+Download the [go_llm_instructions.txt](https://github.com/encoredev/encore/blob/main/go_llm_instructions.txt) file.
+
+ **How to use:**
+ - Cursor: Rename the file to `.cursorrules`.
+ - GitHub Copilot: Paste content in `.github/copilot-instructions.md`.
+ - For other tools, place the file in your app root.
+
+### Build from source
+If you prefer to build from source, [follow these instructions](https://github.com/encoredev/encore/blob/main/CONTRIBUTING.md).
+
+
+## Update to the latest version
+Check which version of Encore you have installed by running `encore version` in your terminal.
+It should print something like:
+```shell
+encore version v1.28.0
+```
+
+If you think you're on an older version of Encore, you can easily update to the latest version by running
+`encore version update` from your terminal.
diff --git a/docs/go/migration/migrate-away.md b/docs/go/migration/migrate-away.md
new file mode 100644
index 0000000000..654a70f3ab
--- /dev/null
+++ b/docs/go/migration/migrate-away.md
@@ -0,0 +1,57 @@
+---
+title: Migrate away from Encore
+subtitle: If you love someone, set them free.
+lang: go
+---
+
+_We realize most people read this page before even trying Encore, so we start with a perspective on how you might reason about adopting Encore. Read on to see what tools are available for migrating away._
+
+Picking technologies for your project is an important decision. It's tricky because you don't know what the requirements are going to look like in the future. This uncertainty makes many teams opt for maximum flexibility, often without acknowledging this has a significant negative effect on productivity.
+
+When designing Encore, we've leaned on standardization to provide a well-integrated and highly productive development workflow. The design is based on the core team's experience building scalable distributed systems at Spotify and Google, complemented with loads of invaluable input from the developer community.
+
+In practise Encore is opinionated only in certain areas which are critical for enabling the static analysis used to create Encore's application model. This is fundamental to how Encore can provide its powerful features, like automatically instrumenting distributed tracing, and provisioning and managing cloud infrastructure.
+
+## Accommodating for your unique requirements
+
+Many software projects end up having a few novel requirements, which are highly specific to the problem domain. To accommodate for this, Encore is designed to let you go outside of the standardized Backend Framework when you need to, for example:
+- You can drop down in abstraction level in the API framework using [raw endpoints](/docs/go/primitives/defining-apis#raw-endpoints)
+- You can use tools like the [Terraform provider](/docs/platform/integrations/terraform) to integrate infrastructure that is not managed by Encore
+
+## Mitigating risk through Open Source and efficiency
+
+We believe that adopting Encore is a low-risk decision for several reasons:
+
+- There's no upfront investment needed to get the benefits
+- Encore apps are normal programs where less than 1% of the code is Encore-specific
+- All infrastructure and data is in your own cloud
+- It's simple to integrate with cloud services and systems not natively supported by Encore
+- Everything you need to develop your application is Open Source, including the [parser](https://github.com/encoredev/encore/tree/main/v2/parser), [compiler](https://github.com/encoredev/encore/tree/main/v2/compiler), [runtime](https://github.com/encoredev/encore/tree/main/runtimes)
+- Everything you need to self-host your application is [Open Source and documented](/docs/go/self-host/docker-build)
+
+## What to expect when migrating away
+
+If you want to migrate away, we want to ensure this is as smooth as possible! Here are some of the ways Encore is designed to keep your app portable, with minimized lock-in, and the tools provided to aid in migrating away.
+
+### Code changes
+
+Building with Encore doesn't require writing your entire application in an Encore-specific way. Encore applications are normal programs where only 1% of the code is specific to Encore's Open Source Backend Framework.
+
+This means that the changes required to stop using the Backend Framework is almost exactly the same work you would have needed to do if you hadn't used Encore in the first place, e.g. writing infrastructure boilerplate. There is no added migration cost.
+
+### Deployment
+
+If you are self-hosting your application, then you're already done.
+
+If you are using Encore Cloud Platform to manage deployments and want to migrate to your own solution, you can use the `encore build docker` command to produce a Docker image, containing the compiled application, using exactly the same code path as Encore's CI system to ensure compatibility.
+
+Learn more in the [self-hosting docs](/docs/go/self-host/docker-build).
+
+### Tell us what you need
+
+We're engineers ourselves and we understand the importance of not being constrained by a single technology.
+
+We're working every single day on making it even easier to start, and stop , using Encore.
+If you have specific concerns, questions, or requirements, we'd love to hear from you!
+
+Please reach out on [Discord](https://encore.dev/discord) or [send an email](mailto:hello@encore.dev) with your thoughts.
diff --git a/docs/go/observability/dev-dash.md b/docs/go/observability/dev-dash.md
new file mode 100644
index 0000000000..3081ce58d2
--- /dev/null
+++ b/docs/go/observability/dev-dash.md
@@ -0,0 +1,29 @@
+---
+seotitle: Development dashboard for local development
+seodesc: Encore's Local Development Dashboard comes with build-in distributed tracing, API docs, and real-time architecture diagrams.
+title: Local Development Dashboard
+subtitle: Built-in tools for simplicity and productivity
+lang: go
+---
+
+Encore provides an efficient local development workflow that automatically provisions [local infrastructure](/docs/platform/infrastructure/infra#local-development) and supports automated testing with dedicated test infrastructure.
+
+The local environment also comes with a built-in Local Development Dashboard to simplify development and improve productivity. It has several features to help you design, develop, and debug your application:
+
+* [Service Catalog](/docs/go/observability/service-catalog) with Automatic API Documentation
+* API Explorer to call your APIs
+* [Distributed Tracing](/docs/go/observability/tracing) for simple and powerful debugging
+* [Encore Flow](/docs/go/observability/encore-flow) for visualizing your microservices architecture
+
+All these features update in real-time as you make changes to your application.
+
+To access the dashboard, start your Encore application with `encore run` and it will open automatically. You can also follow the link in your terminal:
+
+```bash
+$ encore run
+API Base URL: http://localhost:4000
+Dev Dashboard URL: http://localhost:9400/hello-world-cgu2
+```
+
+
+
diff --git a/docs/go/observability/encore-flow.md b/docs/go/observability/encore-flow.md
new file mode 100644
index 0000000000..844d79be0e
--- /dev/null
+++ b/docs/go/observability/encore-flow.md
@@ -0,0 +1,50 @@
+---
+seotitle: Encore Flow automatic microservices architecture diagrams
+seodesc: Visualize your microservices architecture automatically using Encore Flow. Get real-time interactive architecture diagrams for your entire application.
+title: Flow Architecture Diagram
+subtitle: Visualize your cloud microservices architecture
+lang: go
+---
+
+Flow is a visual tool that gives you an always up-to-date view of your entire system, helping you reason about your
+microservices architecture and identify which services depend on each other and how they work together.
+
+## Birds-eye view
+
+Having access to a zoomed out representation of your system can be invaluable in pretty much all parts of the
+development cycle. Flow helps you:
+
+* Track down bottlenecks before they grow into big problems.
+* Get new team members onboarded much faster.
+* Pinpoint hot paths in your system, services that might need extra attention.
+
+Services and PubSub topics are represented as boxes, arrows indicate a dependency. In the example below
+the `login` service has dependencies on the `user` and `authentication` services. Dashed arrows shows publications or
+subscriptions to a topic. Here, `payment` publishes to the `payment-made` topic and `email` subscribe to it:
+
+
+
+## Highlight dependencies
+
+Hover over a service, or PubSub topic, to instantly reveal the nature and scale of its dependencies.
+
+Here the `login` service and its dependencies are highlighted. We can see that `login` makes queries to the
+database and requests to two of the endpoints from the `user` service as well as requests to one endpoint from
+the `authentication` service:
+
+
+
+## Real-time updates
+
+Flow is accessible in the [Local Development Dashboard](/docs/go/observability/dev-dash) and, when using Encore Cloud, in the [Encore Cloud dashboard](https://app.encore.cloud) for cloud environments.
+
+When developing locally, Flow will auto update in real-time to reflect your architecture as you
+make code changes. This helps you be mindful of important dependencies and makes it clear if you introduce new ones.
+
+For cloud environments, Flow auto-updates with each deploy.
+
+In the example below a new subscription on the topic `payment-made` is introduced and then removed in `user` service:
+
+
+
+
diff --git a/docs/go/observability/logging.md b/docs/go/observability/logging.md
new file mode 100644
index 0000000000..5e32b8ec17
--- /dev/null
+++ b/docs/go/observability/logging.md
@@ -0,0 +1,45 @@
+---
+seotitle: Use structured logging to understand your application
+seodesc: Learn how to use structured logging, a combination of free-form log messages and type-safe key-value pairs, to understand your backend application's behavior.
+title: Logging
+subtitle: Structured logging helps you understand your application
+lang: go
+infobox: {
+ title: "Structured Logging",
+ import: "encore.dev/rlog",
+}
+---
+
+Encore offers built-in support for Structured Logging, which combines a free-form log message with structured and type-safe key-value pairs. This enables straightforward analysis of what your application is doing, in a way that is easy for a computer to parse, analyze, and index. This makes it simple to quickly filter and search through logs.
+
+Encore’s logging is integrated with the built-in [Distributed Tracing](/docs/go/observability/tracing) functionality, and all logs are automatically included in the active trace. This dramatically simplifies debugging of your application.
+
+## Usage
+First, import `encore.dev/rlog` in your package. Then simply call one of the package methods `Info`, `Error`, or `Debug`. For example:
+
+```go
+rlog.Info("log message",
+ "user_id", 12345,
+ "is_subscriber", true)
+rlog.Error("something went terribly wrong!",
+ "err", err)
+```
+
+The first parameter is the log message. After that follows zero or more key-value pairs for structured logging for context.
+
+If you’re logging many log messages with the same key-value pairs each time it can be a bit cumbersome. To help with that, use `rlog.With()` to group them into a context object, which then copies the key-value pairs into each log event:
+
+```go
+ctx := rlog.With("is_subscriber", true)
+ctx.Info("user logged in", "login_method", "oauth") // includes is_subscriber=true
+```
+
+For more information, see the [API Documentation](https://pkg.go.dev/encore.dev/rlog).
+
+## Live-streaming logs
+
+Encore also makes it simple to live-stream logs directly to your terminal, from any environment, by running:
+
+```
+$ encore logs --env=prod
+```
\ No newline at end of file
diff --git a/docs/go/observability/service-catalog.md b/docs/go/observability/service-catalog.md
new file mode 100644
index 0000000000..83347eafd3
--- /dev/null
+++ b/docs/go/observability/service-catalog.md
@@ -0,0 +1,17 @@
+---
+seotitle: Service Catalog & Generated API Docs
+seodesc: See how Encore automatically generates API documentation that always stays up to date and in sync.
+title: Service Catalog
+subtitle: Automatically get a Service Catalog and complete API docs
+lang: go
+---
+
+All developers agree API documentation is great to have, but the effort of maintaining it inevitably leads to docs becoming stale and out of date.
+
+To solve this, Encore uses the [Encore Application Model](/docs/go/concepts/application-model) to automatically generate a Service Catalog along with complete documentation for all APIs. This ensures docs are always up-to-date as your APIs evolve.
+
+The API docs are available both in your [Local Development Dashboard](/docs/go/observability/dev-dash) and for your whole team in the [Encore Cloud dashboard](https://app.encore.cloud).
+
+
+
+
diff --git a/docs/go/observability/tracing.md b/docs/go/observability/tracing.md
new file mode 100644
index 0000000000..d38b77a53f
--- /dev/null
+++ b/docs/go/observability/tracing.md
@@ -0,0 +1,39 @@
+---
+seotitle: Distributed Tracing helps you understand your app
+seodesc: See how to use distributed tracing in your backend application, across multiple services, using Encore.
+title: Distributed Tracing
+subtitle: Track requests across your application and infrastructure
+lang: go
+---
+
+Distributed systems often have many moving parts, making it difficult to understand what your code is doing and finding the root-cause to bugs. That’s where Tracing comes in. If you haven’t seen it before, it may just about change your life.
+
+Tracing is a revolutionary way to gain insight into what your applications are doing. It works by capturing the series of events as they occur during the execution of your code (a “trace”). This works by propagating a trace id between all individual systems, then correlating and joining the information together to present a unified picture of what happened end-to-end.
+
+As opposed to the labor intensive instrumentation you'd normally need to go through to use tracing, Encore automatically captures traces for your entire application – in all environments. Uniquely, this means you can use tracing even for local development to help debugging and speed up iterations.
+
+You view traces in the [Local Development Dashboard](/docs/go/observability/dev-dash) and, when using Encore Cloud, you can also see traces in the [ENcore Cloud dashboard](https://app.encore.cloud) for Production and other environments.
+
+
+
+
+
+## Encore's tracing is more comprehensive and more performant than all other tools
+
+Unlike other tracing solutions, Encore understands what each trace event is and captures unique insights about each one. This means you get access to more information than ever before:
+
+* Stack traces
+* Structured logging
+* HTTP requests
+* Network connection information
+* API calls
+* Database queries
+* etc.
+
+## Redacting sensitive data
+
+Encore's tracing automatically captures request and response payloads to simplify debugging.
+
+For cases where this is undesirable, such as for passwords or personally identifiable information (PII), Encore supports redacting fields marked as containing sensitive data.
+
+See the documentation on [API Schemas](/docs/go/primitives/defining-apis#sensitive-data) for more information.
diff --git a/docs/go/overview.md b/docs/go/overview.md
new file mode 100644
index 0000000000..637c353b05
--- /dev/null
+++ b/docs/go/overview.md
@@ -0,0 +1,97 @@
+---
+seotitle: Encore.go Introduction
+seodesc: Learn how Encore's Go Backend Framework works, and get to know the powerful features that help you build cloud backend applications faster.
+title: Encore.go
+subtitle: Use Encore.go to build robust backend applications and distributed systems
+toc: false
+lang: go
+---
+
+
+
+
+
Quick Start Guide
+
+ Dive right in and build your first Encore application.
+
+
+ Get started
+
+
+
+
+Encore.go is a backend framework for distributed systems that provides a declarative approach of working with essential backend primitives like APIs,microservices, databases, queues, caches, cron jobs, and storage buckets.
+
+Encore helps you build robust distributed systems and provides a smooth developer experience with a lot of built-in tooling:
+
+1. **Local Environment Management**: Automatically sets up and runs your local development environment and all local infrastructure.
+2. **Enhanced Observability**: Comes with tools like a [Local Development Dashboard](/docs/go/observability/dev-dash) and [tracing](/docs/go/observability/tracing) for monitoring application behavior.
+3. **Automatic Documentation**: Generates and maintains [up-to-date documentation](/docs/go/observability/service-catalog) for APIs and services, and created [architecture diagrams](/docs/go/observability/encore-flow) for your system.
+
+Optional: **DevOps Automation**: Encore provides an optional [Cloud Platform](/use-cases/devops-automation) for automating infrastructure provisioning and DevOps processes on AWS and GCP.
+
+
diff --git a/docs/go/primitives/api-calls.md b/docs/go/primitives/api-calls.md
new file mode 100644
index 0000000000..52b636389b
--- /dev/null
+++ b/docs/go/primitives/api-calls.md
@@ -0,0 +1,42 @@
+---
+seotitle: API Calls with Encore.go
+seodesc: Learn how to make type-safe API calls in Go with Encore.go
+title: API Calls
+subtitle: Making API calls is as simple as making function calls
+lang: go
+---
+
+Calling an API endpoint looks like a regular function call with Encore.go. To call an endpoint you first import the other service as a Go package using `import "encore.app/package-name"` and then call the API endpoint like a regular function. Encore will automatically generate the necessary boilerplate at compile-time.
+
+In the example below, we import the service package `hello` and call the `Ping` endpoint using a function call to `hello.Ping`.
+
+```go
+import "encore.app/hello" // import service
+
+//encore:api public
+func MyOtherAPI(ctx context.Context) error {
+ resp, err := hello.Ping(ctx, &hello.PingParams{Name: "World"})
+ if err == nil {
+ log.Println(resp.Message) // "Hello, World!"
+ }
+ return err
+}
+```
+
+
+
+This means your development workflow is as simple as building a monolith, even if you use multiple services.
+You also get all the benefits of function calls, like compile-time checking of all the parameters and auto-completion in your editor, while still allowing the division of code into logical components, services, and systems.
+
+Then when building your application, Encore uses [static analysis](/docs/go/concepts/application-model) to parse all API calls and compiles them to proper API calls.
+
+## Current Request
+
+By using Encore's [current request API](https://pkg.go.dev/encore.dev/#Request) you can get meta-information about the
+current request. Including the type of request, the time the request started, the service and endpoint called and the path
+which was called on the service.
+
+For more information, see the [metadata documentation](/docs/go/develop/metadata).
diff --git a/docs/go/primitives/api-errors.md b/docs/go/primitives/api-errors.md
new file mode 100644
index 0000000000..27b300a3d2
--- /dev/null
+++ b/docs/go/primitives/api-errors.md
@@ -0,0 +1,172 @@
+---
+seotitle: API Errors – Types, Wrappers, and Codes
+seodesc: See how to return structured error information from your APIs using Encore's errs package, and how to build precise error messages for complex business logic.
+title: API Errors
+subtitle: Returning structured error information from your APIs
+infobox: {
+ title: "API Errors",
+ import: "encore.dev/beta/errs",
+}
+lang: go
+---
+
+Encore supports returning structured error information from your APIs using the [encore.dev/beta/errs](https://pkg.go.dev/encore.dev/beta/errs) package.
+
+Errors are propagated across the network to the [generated clients](/docs/go/cli/client-generation) and can be used within your front-ends without having to build any custom marshalling code.
+
+## The errs.Error type
+
+Structured errors are represented by the `errs.Error` type:
+
+```go
+type Error struct {
+ // Code is the error code to return.
+ Code ErrCode `json:"code"`
+ // Message is a descriptive message of the error.
+ Message string `json:"message"`
+ // Details are user-defined additional details.
+ Details ErrDetails `json:"details"`
+ // Meta are arbitrary key-value pairs for use within
+ // the Encore application. They are not exposed to external clients.
+ Meta Metadata `json:"-"`
+}
+```
+
+Returning an `*errs.Error` from an Encore API endpoint will result in Encore
+serializing this struct to JSON and returning it in the response. Additionally
+Encore will set the HTTP status code to match the error code (see the mapping table below).
+
+For example:
+```go
+return &errs.Error{
+ Code: errs.NotFound,
+ Message: "sprocket not found",
+}
+```
+
+Causes Encore to respond with a `HTTP 404` error with body:
+```json
+{
+ "code": "not_found",
+ "message": "sprocket not found",
+ "details": null
+}
+```
+
+## Error Wrapping
+
+Encore applications are encouraged to always use the `errs` package to
+manipulate errors. It supports wrapping errors to gradually add more error
+information, and lets you easily define both structured error details to return
+to external clients, as well as internal key-value metadata for debugging
+and error handling.
+
+```go
+func Wrap(err error, msg string, metaPairs ...interface{}) error
+```
+Use `errs.Wrap` to conveniently wrap an error, adding additional context and converting it to an `*errs.Error`.
+If `err` is nil it returns `nil`. If `err` is already an `*errs.Error` it copies the Code, Details, and Meta fields over.
+
+The variadic `metaPairs` parameter must be key-value pairs, where the key is always a `string` and the value can be
+any built-in type. Existing key-value pairs from the `err` are merged into the new `*Error`.
+
+```go
+func WrapCode(err error, code ErrCode, msg string, metaPairs ...interface{}) error
+```
+`errs.WrapCode` is like `errs.Wrap` but also sets the error code.
+
+```go
+func Convert(err error) error
+```
+`errs.Convert` converts an error to an `*errs.Error`. If the error is already an `*errs.Error` it returns it unmodified.
+If `err` is nil it returns nil.
+
+## Error Codes
+
+The `errs` package defines error codes for common error scenarios.
+They are identical to the codes defined by `gRPC` for interoperability.
+
+The table below summarizes the error codes.
+You can find additional documentation about when to use them in the
+[package documentation](https://pkg.go.dev/encore.dev/beta/errs#ErrCode).
+
+| Code | String | HTTP Status |
+| -------------------- | ----------------------- | ------------------------- |
+| `OK` | `"ok"` | 200 OK |
+| `Canceled` | `"canceled"` | 499 Client Closed Request |
+| `Unknown` | `"unknown"` | 500 Internal Server Error |
+| `InvalidArgument` | `"invalid_argument"` | 400 Bad Request |
+| `DeadlineExceeded` | `"deadline_exceeded"` | 504 Gateway Timeout |
+| `NotFound` | `"not_found"` | 404 Not Found |
+| `AlreadyExists` | `"already_exists"` | 409 Conflict |
+| `PermissionDenied` | `"permission_denied"` | 403 Forbidden |
+| `ResourceExhausted` | `"resource_exhausted"` | 429 Too Many Requests |
+| `FailedPrecondition` | `"failed_precondition"` | 400 Bad Request |
+| `Aborted` | `"aborted"` | 409 Conflict |
+| `OutOfRange` | `"out_of_range"` | 400 Bad Request |
+| `Unimplemented` | `"unimplemented"` | 501 Not Implemented |
+| `Internal` | `"internal"` | 500 Internal Server Error |
+| `Unavailable` | `"unavailable"` | 503 Unavailable |
+| `DataLoss` | `"data_loss"` | 500 Internal Server Error |
+| `Unauthenticated` | `"unauthenticated"` | 401 Unauthorized |
+
+## Error Building
+
+In cases where you have complex business logic, or multiple error returns,
+it's convenient to gradually add metadata to your error.
+
+For this purpose Encore provides `errs.Builder`. The builder lets you
+gradually set aspects of the error, using a chaining API design.
+Use `errs.B()` to get a new builder that you can start chaining with directly.
+
+When you want to return the constructed error call the `.Err() `method.
+
+For example:
+
+```go
+func getBoard(ctx context.Context, boardID int64) (*Board, error) {
+ // Construct a new error builder with errs.B()
+ eb := errs.B().Meta("board_id", params.ID)
+
+ b := &Board{ID: params.ID}
+ err := sqldb.QueryRow(ctx, `
+ SELECT name, created
+ FROM board
+ WHERE id = $1
+ `, params.ID).Scan(&b.Name, &b.Created)
+ if errors.Is(err, sqldb.ErrNoRows) {
+ // Return a "board not found" error with code == NotFound
+ return nil, eb.Code(errs.NotFound).Msg("board not found").Err()
+ } else if err != nil {
+ // Return a general error
+ return nil, eb.Cause(err).Msg("could not get board").Err()
+ }
+ // ...
+}
+```
+
+## Inspecting API Errors
+
+When you call another API within Encore, the returned errors are always wrapped in `*errs.Error`.
+
+You can inspect the error information either by casting to `*errs.Error`, or using the below
+helper methods.
+
+```go
+func Code(err error) ErrCode
+```
+`errs.Code` returns the error code. If the error was not an `*errs.Error` it returns `errs.Unknown`.
+
+```go
+func Meta(err error) Metadata
+type Metadata map[string]interface{}
+```
+`errs.Meta` returns any structured metadata present in the error. If the error was not an `*errs.Error` it returns nil.
+Unlike when you return error information to external clients,
+all the metadata is sent to the calling service, making debugging even easier.
+
+```go
+func Details(err error) ErrDetails
+```
+`errs.Details` returns the structured error details. If the error was not an `*errs.Error` or the error lacked details,
+it returns nil.
diff --git a/docs/go/primitives/api-schemas.md b/docs/go/primitives/api-schemas.md
new file mode 100644
index 0000000000..5a146d26b6
--- /dev/null
+++ b/docs/go/primitives/api-schemas.md
@@ -0,0 +1,255 @@
+---
+seotitle: API Schemas – Path, Query, and Body parameters
+seodesc: See how to design API schemas for your Go based backend application using Encore.
+title: API Schemas
+subtitle: How to design schemas for your APIs
+lang: go
+---
+APIs in Encore are regular functions with request and response data types.
+These types are structs (or pointers to structs) with optional field tags, which Encore uses to encode API requests to HTTP messages. The same struct can be used for requests and responses, but the `query` tag is ignored when generating responses.
+
+All tags except `json` are ignored for nested tags, which means you can only define
+`header` and `query` parameters for root level fields.
+
+For example, this struct:
+```go
+type NestedRequestResponse struct {
+ Header string `header:"X-Header"`// this field will be read from the http header
+ Query string `query:"query"`// this field will be read from the query string
+ Body1 string `json:"body1"`
+ Nested struct {
+ Header2 string `header:"X-Header2"`// this field will be read from the body
+ Query2 string `query:"query2"`// this field will be read from the body
+ Body2 string `json:"body2"`
+ } `json:"nested"`
+}
+```
+
+Would be unmarshalled from this request:
+
+```output
+POST /example?query=a%20query HTTP/1.1
+Content-Type: application/json
+X-Header: A header
+
+{
+ "body1": "a body",
+ "nested": {
+ "Header2": "not a header",
+ "Query2": "not a query",
+ "body2": "a nested body"
+ }
+}
+
+```
+
+And marshalled to this response:
+
+```output
+HTTP/1.1 200 OK
+Content-Type: application/json
+X-Header: A header
+
+{
+ "Query": "not a query",
+ "body1": "a body",
+ "nested": {
+ "Header2": "not a header",
+ "Query2": "not a query",
+ "body2": "a nested body"
+ }
+}
+
+```
+
+## Path parameters
+
+Path parameters are specified by the `path` field in the `//encore:api` annotation.
+To specify a placeholder variable, use `:name` and add a function parameter with the same name to the function signature.
+Encore parses the incoming request URL and makes sure it matches the type of the parameter. The last segment of the path
+can be parsed as a wildcard parameter by using `*name` with a matching function parameter.
+
+```go
+// GetBlogPost retrieves a blog post by id.
+//encore:api public method=GET path=/blog/:id/*path
+func GetBlogPost(ctx context.Context, id int, path string) (*BlogPost, error) {
+ // Use id to query database...
+}
+```
+
+### Fallback routes
+
+Encore supports defining fallback routes that will be called if no other endpoint matches the request,
+using the syntax `path=/!fallback`.
+
+This is often useful when migrating an existing backend service over to Encore, as it allows you to gradually
+migrate endpoints over to Encore while routing the remaining endpoints to the existing HTTP router using
+a raw endpoint with a fallback route.
+
+For example:
+
+```go
+//encore:service
+type Service struct {
+ oldRouter *gin.Engine // existing HTTP router
+}
+
+// Route all requests to the existing HTTP router if no other endpoint matches.
+//encore:api public raw path=/!fallback
+func (s *Service) Fallback(w http.ResponseWriter, req *http.Request) {
+ s.oldRouter.ServeHTTP(w, req)
+}
+```
+
+## Headers
+
+Headers are defined by the `header` field tag, which can be used in both request and response data types. The tag name is used to translate between the struct field and http headers.
+In the example below, the `Language` field of `ListBlogPost` will be fetched from the
+`Accept-Language` HTTP header.
+
+```go
+type ListBlogPost struct {
+ Language string `header:"Accept-Language"`
+ Author string // Not a header
+}
+```
+
+### Cookies
+
+Cookies can be set in the response by using the `header` tag with the `Set-Cookie` header name.
+
+```go
+type LoginResponse struct {
+ SessionID string `header:"Set-Cookie"`
+}
+
+//encore:api public method=POST path=/login
+func Login(ctx context.Context) (*LoginResponse, error) {
+ return &LoginResponse{SessionID: "session=123"}, nil
+}
+````
+
+The cookies can then be read using e.g. [structured auth data](/docs/go/develop/auth#accepting-structured-auth-information).
+
+## Query parameters
+
+For `GET`, `HEAD` and `DELETE` requests, parameters are read from the query string by default.
+The query parameter name defaults to the [snake-case](https://en.wikipedia.org/wiki/Snake_case)
+encoded name of the corresponding struct field (e.g. BlogPost becomes blog_post).
+
+The `query` field tag can be used
+to parse a field from the query string for other HTTP methods (e.g. POST) and to override the default parameter name.
+
+Query strings are not supported in HTTP responses and therefore `query` tags in response types are ignored.
+
+In the example below, the `PageLimit` field will be read from the `limit` query
+parameter, whereas the `Author` field will be parsed from the query string (as `author`) only if the method of
+the request is `GET`, `HEAD` or `DELETE`.
+
+```go
+type ListBlogPost struct {
+ PageLimit int `query:"limit"` // always a query parameter
+ Author string // query if GET, HEAD or DELETE, otherwise body parameter
+}
+```
+
+## Body parameters
+
+Encore will default to reading request parameters from the body (as JSON) for all HTTP methods except `GET`, `HEAD` or
+`DELETE`. The name of the body parameter defaults to the field name, but can be overridden by the
+`json` tag. Response fields will be serialized as JSON in the HTTP body unless the `header` tag is set.
+
+There is no tag to force a field to be read from the body, as some infrastructure entities
+do not support body content in `GET`, `HEAD` or `DELETE` requests.
+
+```go
+type CreateBlogPost struct {
+ Subject string `json:"limit"` // query if GET, HEAD or DELETE, otherwise body parameter
+ Author string // query if GET, HEAD or DELETE, otherwise body parameter
+}
+```
+
+## Supported types
+The table below lists the data types supported by each HTTP message location.
+
+| Type | Header | Path | Query | Body |
+| --------------- | ------ | ---- | ----- | ---- |
+| bool | X | X | X | X |
+| numeric | X | X | X | X |
+| string | X | X | X | X |
+| time.Time | X | X | X | X |
+| uuid.UUID | X | X | X | X |
+| json.RawMessage | X | X | X | X |
+| list | | | X | X |
+| struct | | | | X |
+| map | | | | X |
+| pointer | | | | X |
+
+## Raw endpoints
+
+In some cases you may need to fulfill an API schema that is defined by someone else, for instance when you want to accept webhooks.
+This often requires you to parse custom HTTP headers and do other low-level things that Encore usually lets you skip.
+
+For these circumstances Encore lets you define raw endpoints. Raw endpoints operate at a lower abstraction level, giving you access to the underlying HTTP request.
+
+Learn more in the [raw endpoints documentation](/docs/go/primitives/raw-endpoints).
+
+## Sensitive data
+
+Encore's built-in tracing functionality automatically captures request and response payloads
+to simplify debugging. That's not desirable if a request or response payload contains sensitive data, such
+as API keys or personally identifiable information (PII).
+
+For those use cases Encore supports marking a field as sensitive using the struct tag `encore:"sensitive"`.
+Encore's tracing system will automatically redact fields tagged as sensitive. This works for both individual
+values as well as nested fields.
+
+Note that inputs to [auth handlers](/docs/go/develop/auth) are automatically marked as sensitive and are always redacted.
+
+Raw endpoints lack a schema, which means there's no way to add a struct tag to mark certain data as sensitive.
+For this reason Encore supports tagging the whole API endpoint as sensitive by adding `sensitive` to the `//encore:api` annotation.
+This will cause the whole request and response payload to be redacted, including all request and response headers.
+
+
+
+The `encore:"sensitive"` tag is ignored for local development environments to make development and debugging with the Local Development Dashboard easier.
+
+
+
+
+## Example
+
+```go
+package blog // service name
+import (
+ "time"
+ "encore.dev/types/uuid"
+)
+
+type Updates struct {
+ Author string `json:"author,omitempty"`
+ PublishTime time.Time `json:"publish_time,omitempty"`
+}
+
+// BatchUpdateParams is the request data for the BatchUpdate endpoint.
+type BatchUpdateParams struct {
+ Requester string `header:"X-Requester"`
+ RequestTime time.Time `header:"X-Request-Time"`
+ CurrentAuthor string `query:"author"`
+ Updates *Updates `json:"updates"`
+ MySecretKey string `encore:"sensitive"`
+}
+
+// BatchUpdateResponse is the response data for the BatchUpdate endpoint.
+type BatchUpdateResponse struct {
+ ServedBy string `header:"X-Served-By"`
+ UpdatedIDs []uuid.UUID `json:"updated_ids"`
+}
+
+//encore:api public method=POST path=/section/:sectionID/posts
+func BatchUpdate(ctx context.Context, sectionID string, params *BatchUpdateParams) (*BatchUpdateResponse, error) {
+ // Update blog posts for section
+ return &BatchUpdateResponse{ServedBy: hostname, UpdatedIDs: ids}, nil
+}
+
+```
diff --git a/docs/go/primitives/app-structure.md b/docs/go/primitives/app-structure.md
new file mode 100644
index 0000000000..ad1e9ec482
--- /dev/null
+++ b/docs/go/primitives/app-structure.md
@@ -0,0 +1,156 @@
+---
+seotitle: Structuring your microservices backend application
+seodesc: Learn how to structure your microservices backend application. See recommended app structures for monoliths, small microservices backends, and large scale microservices applications.
+title: App Structure
+subtitle: Structuring your Encore application
+lang: go
+---
+
+Encore uses a monorepo design and it's best to use one Encore app for your entire backend application. This lets Encore build an application model that spans your entire app, necessary to get the most value out of many
+features like [distributed tracing](/docs/go/observability/tracing) and [Encore Flow](/docs/go/observability/encore-flow).
+
+If you have a large application, see advice on how to [structure an app with several systems](/docs/go/primitives/app-structure#large-applications-with-several-systems).
+
+It's simple to integrate Encore applications with pre-existing systems you might have, using APIs and built-in tools like [client generation](/docs/go/cli/client-generation).
+
+## Monolith or Microservices
+
+Encore is not opinionated about monoliths vs. microservices. It does however let you build microservices applications with a monolith-style developer experience. For example, you automatically get IDE auto-complete when making [API calls between services](/docs/go/primitives/api-calls), along with cross-service type-safety.
+
+When using Encore Cloud to create an environment on AWS/GCP, Encore enables you to configure if you want to combine multiple services into one process or keep them separate. This can be useful for improved efficiency at smaller scales, and for co-locating services for increased performance. Learn more in the [environments documentation](/docs/platform/deploy/environments#process-allocation).
+
+## Creating services
+
+To create an Encore service, you create a Go package and
+[define an API](/docs/go/primitives/defining-apis) within it. When using databases, you add database migrations in a subfolder `migrations` to define the structure of the database(s). Learn more in the [SQL databases docs](/docs/go/primitives/databases).
+
+On disk it might look like this:
+
+```
+/my-app
+├── encore.app // ... and other top-level project files
+│
+├── hello // hello service (a Go package)
+│ ├── migrations // hello service db migration (directory)
+│ │ └── 1_create_table.up.sql // hello service db migration
+│ ├── hello.go // hello service code
+│ └── hello_test.go // tests for hello service
+│
+└── world // world service (a Go package)
+ └── world.go // world service code
+```
+
+
+
+## Structure services using sub-packages
+
+Within a service, it's possible to have multiple sub-packages. This is a good way to define components, helper
+functions, or other code for your functions, should you wish to do that. You can create as many sub-packages, in any kind of nested structure within your service, as you want.
+
+To create sub-packages, you create sub-directories within a service package. Sub-packages are internal to services,
+they are not themselves service packages. This means sub-packages within services cannot
+themselves define APIs.
+You can however define an API in a service package that calls a function within a sub-package.
+
+For example, rather than define the entire logic for an endpoint in that endpoint's function, you can call functions
+from sub-packages and divide the logic in any way you want.
+
+**`hello/hello.go`**
+
+```go
+package hello
+
+import (
+ "context"
+
+ "encore.app/hello/foo"
+)
+
+//encore:api public path=/hello/:name
+func World(ctx context.Context, name string) (*Response, error) {
+ msg := foo.GenerateMessage(name)
+ return &Response{Message: msg}, nil
+}
+
+type Response struct {
+ Message string
+}
+```
+
+**`hello/foo/foo.go`**
+
+```go
+package foo
+
+import (
+ "fmt"
+)
+
+func GenerateMessage(name string) string {
+ return fmt.Sprintf("Hello %s!", name)
+}
+
+```
+
+On disk it might look like this:
+
+```
+/my-app
+├── encore.app // ... and other top-level project files
+│
+├── hello // hello service (a Go package)
+│ ├── migrations // hello service db migrations (directory)
+│ │ └── 1_create_table.up.sql // hello service db migration
+│ ├── foo // sub-package foo (directory)
+│ │ └── foo.go // foo code (cannot define APIs)
+│ ├── hello.go // hello service code
+│ └── hello_test.go // tests for hello service
+│
+└── world // world service (a Go package)
+ └── world.go // world service code
+```
+
+## Large applications with several systems
+
+If you have a large application with several logical domains, each consisting of multiple services, it can be practical
+to separate these into distinct systems.
+
+Systems are not a special construct in Encore, they only help you divide your application logically around common concerns and purposes. Encore only handles services, the compiler will read your
+systems and extract the services of your application. As applications grow, systems help you decompose your application
+without requiring any complex refactoring.
+
+To create systems, create a sub-directory for each system and put the relevant service packages within it.
+This is all you need to do, since with Encore each service consists of a Go package.
+
+As an example, a company building a Trello app might divide their application into three systems: the **Trello** system
+(for the end-user facing app with boards and cards), the **User** system (for user and organization management), and
+the **Premium** system (for handling payments and subscriptions).
+
+On disk it might look like this:
+
+```
+/my-trello-clone
+├── encore.app // ... and other top-level project files
+│
+├── trello // trello system (a directory)
+│ ├── board // board service (a Go package)
+│ │ └── board.go // board service code
+│ └── card // card service (a Go package)
+│ └── card.go // card service code
+│
+├── premium // premium system (a directory)
+│ ├── payment // payment service (a Go package)
+│ │ └── payment.go // payment service code
+│ └── subscription // subscription service (a Go package)
+│ └── subscription.go // subscription service code
+│
+└── usr // usr system (a directory)
+ ├── org // org service (a Go package)
+ │ └── org.go // org service code
+ └── user // user service (a Go package)
+ └── user.go // user service code
+```
+
+The only refactoring needed to divide an existing Encore application into systems is to move services into their respective
+subfolders. This is a simple way to separate the specific concerns of each system. What matters for Encore are the packages containing services, and the division in systems or subsystems will not change the endpoints or
+architecture of your application.
diff --git a/docs/go/primitives/caching.md b/docs/go/primitives/caching.md
new file mode 100644
index 0000000000..a0f8e35344
--- /dev/null
+++ b/docs/go/primitives/caching.md
@@ -0,0 +1,176 @@
+---
+seotitle: Using caches in your microservices backend application
+seodesc: Learn how to implement caches to optimize response times and reduce cost in your microservices cloud backend.
+title: Caching
+subtitle: Optimize response times and reduce costs by avoiding re-work
+infobox: {
+ title: "Caching",
+ import: "encore.dev/storage/cache",
+}
+lang: go
+---
+
+A cache is a high-speed storage layer, commonly used in distributed systems to improve user experiences
+by reducing latency, improving system performance, and avoiding expensive computation.
+
+For scalable systems you typically want to deploy the cache as a separate
+infrastructure resource, allowing you to run multiple instances of your application concurrently.
+
+Encore's built-in Caching API lets you use high-performance caches (using [Redis](https://redis.io/)) in a cloud-agnostic declarative fashion. At deployment, Encore will automatically [provision the required infrastructure](/docs/platform/infrastructure/infra).
+
+## Cache clusters
+
+To use caching in Encore, you must first define a *cache cluster*.
+Each cache cluster defined in your application will be provisioned as a separate Redis instance
+by Encore.
+
+This gives you fine-grained control over which service(s) should use the same cache cluster
+and which should have a separate one.
+
+It looks like this:
+
+```go
+import "encore.dev/storage/cache"
+
+var MyCacheCluster = cache.NewCluster("my-cache-cluster", cache.ClusterConfig{
+ // EvictionPolicy tells Redis how to evict keys when the cache reaches
+ // its memory limit. For typical cache use cases, cache.AllKeysLRU is a good default.
+ EvictionPolicy: cache.AllKeysLRU,
+})
+```
+
+
+
+When starting out it's recommended to use a single cache cluster
+that's shared between your different services.
+
+
+
+## Keyspaces
+
+When using a cache, each cached item is stored at a particular key, which is typically an arbitrary string.
+If you use a cache cluster to cache different sets of data, it's important that distinct data set have non-overlapping keys.
+
+Each value stored in the cache also has a specific type, and certain cache operations can only be performed on certain types. For example, a common cache operation is to increment an integer value that is stored in the cache. If you try to apply this operation on a value that is not an integer, an error is returned.
+
+Encore provides a simple, type-safe solution to these problems through Keyspaces.
+
+In order to begin storing data in your cache, you must first define a Keyspace.
+
+Each keyspace has a Key type and a Value type. The Key type is much like a map key, in that it tells Encore where in the cache
+the item is stored. The Key type is combined with the Key Pattern to produce a string that is the Redis cache key.
+
+The Value type is the type of the values stored in that keyspace. For many keyspaces this is specified in the name of the constructor.
+For example, `NewIntKeyspace` stores `int64` values.
+
+For example, if you want to rate limit the number of requests per user ID it looks like this:
+
+```go
+import (
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "encore.dev/middleware"
+)
+
+// RequestsPerUser tracks the number of requests per user.
+// The cache items expire after 10 seconds without activity.
+var RequestsPerUser = cache.NewIntKeyspace[auth.UID](cluster, cache.KeyspaceConfig{
+ KeyPattern: "requests/:key",
+ DefaultExpiry: cache.ExpireIn(10 * time.Second),
+})
+
+// RateLimitMiddleware is a global middleware that limits the number of authenticated requests
+// to 10 requests per 10 seconds.
+//encore:middleware target=all
+func RateLimitMiddleware(req middleware.Request, next middleware.Next) middleware.Response {
+ if userID, ok := auth.UserID(); ok {
+ val, err := RequestsPerUser.Increment(req.Context(), userID, 1)
+
+ // NOTE: this "fails open", meaning if we can't communicate with the cache
+ // we default to allowing the requests.
+ //
+ // Consider whether that's the correct behavior for your application,
+ // or if you want to return an error to the user in that case.
+ if err == nil && val > 10 {
+ return middleware.Response{
+ Err: &errs.Error{Code: errs.ResourceExhausted, Message: "rate limit exceeded"},
+ }
+ }
+ }
+ return next(req)
+}
+```
+
+As you can see, the `RequestsPerUser` defines a `KeyPattern` which is set to `"requests/:key"`.
+Here `:key` refers to the value of the Key type, which is the `auth.UID` value passed in.
+
+If you want the cache key to contain multiple values, you can define a struct type
+and pass that as the key. Then change the `KeyPattern` to specify the struct fields.
+
+For example:
+
+```go
+type MyKey struct {
+ UserID auth.UID
+ ResourcePath string // the resource being accessed
+}
+
+// ResourceRequestsPerUser tracks the number of requests per user and resource.
+// The cache items expire after 10 seconds without activity.
+var ResourceRequestsPerUser = cache.NewIntKeyspace[MyKey](cluster, cache.KeyspaceConfig{
+ KeyPattern: "requests/:UserID/:ResourcePath",
+ DefaultExpiry: cache.ExpireIn(10 * time.Second),
+})
+
+// ... then:
+key := MyKey{UserID: "some-user-id", ResourcePath: "/foo"}
+ResourceRequestsPerUser.Increment(ctx, key, 1)
+```
+
+
+
+Encore ensures that all the struct fields are present in the `KeyPattern`,
+and that the placeholder values are all valid field names.
+
+That way the connection between the struct fields and the `KeyPattern`
+become compile-time type-safe as well.
+
+
+
+Also note that Encore ensures there are no conflicting `KeyPattern` definitions across each cache cluster.
+Each keyspace must define its own, non-conflicting `KeyPattern`.
+This way, you can feel safe that there won't be any accidental overwrites of cache values, even with multiple services sharing the same cache cluster.
+
+## Keyspace operations
+
+Encore comes with a full suite of keyspace types, each with a wide variety of cache operations.
+
+Basic keyspace types include
+[strings](https://pkg.go.dev/encore.dev/storage/cache#NewStringKeyspace),
+[integers](https://pkg.go.dev/encore.dev/storage/cache#NewIntKeyspace),
+[floats](https://pkg.go.dev/encore.dev/storage/cache#NewFloatKeyspace),
+and [struct types](https://pkg.go.dev/encore.dev/storage/cache#NewStructKeyspace).
+These keyspaces all share the same set of methods (along with a few keyspace-specific ones).
+
+There are also more advanced keyspaces for storing [sets of basic types](https://pkg.go.dev/encore.dev/storage/cache#NewSetKeyspace)
+and [ordered lists of basic types](https://pkg.go.dev/encore.dev/storage/cache#NewListKeyspace).
+These keyspaces offer a different, specialized set of methods specific to set and list operations.
+
+For a list of the supported operations, see the [package documentation](https://pkg.go.dev/encore.dev/storage/cache).
+
+## Testing
+
+When running tests, Encore spins up an in-memory cache separately for each test.
+
+This way you don't have to think about clearing the cache between tests,
+or worrying about whether one test affects another.
+Each test is automatically fully isolated.
+
+## Local development
+
+For local development, Encore maintains a local, in-memory implementation of Redis.
+This implementation is designed to store a small amount of keys (currently 100).
+
+When the number of keys exceeds this value, keys are randomly purged to get below the limit.
+This is designed in order to simulate the ephemeral, transient nature of caches while also
+limiting memory use. The precise behavior for local development may change over time and should not be relied on.
diff --git a/docs/go/primitives/change-db-schema.md b/docs/go/primitives/change-db-schema.md
new file mode 100644
index 0000000000..c8665d6d75
--- /dev/null
+++ b/docs/go/primitives/change-db-schema.md
@@ -0,0 +1,52 @@
+---
+seotitle: How to change your SQL database schema
+seodesc: Learn how to change your SQL database schema for your Go backend application, using migration files and Encore's built-in schema migration functionality.
+title: Change SQL database schema
+lang: go
+---
+
+Encore database schemas are changed over time using *migration files*.
+
+Each migration file has a sequence number, and migration files are run
+in sequence when deploying. Encore tracks which migrations have already run
+and only runs new ones.
+
+To change your database schema, add a new migration file using the next
+available migration number.
+
+For example, if you have two migration files already,
+the next migration file should be named `3_something.up.sql` where
+`something` is a short description of what the migration does.
+
+
+
+Database migrations are applied before the application is restarted
+with the new code. Always make sure the old application code works with
+the new database schema, so that things don't break while your new code
+is being rolled out.
+
+
+
+## Example
+
+Let's say you have a single migration file that creates a `todo_item` table:
+
+**`todo/migrations/1_create_table.up.sql`**
+```sql
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL
+);
+```
+
+And now you want to add a `created` column to track when each todo was created.
+Add a new file:
+
+**`todo/migrations/2_add_created_col.up.sql`**
+```sql
+ALTER TABLE todo_item ADD created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW();
+```
+
+The next deploy Encore will notice the new migration file and run it, adding
+a new column.
diff --git a/docs/go/primitives/code-snippets.md b/docs/go/primitives/code-snippets.md
new file mode 100644
index 0000000000..a186411dcd
--- /dev/null
+++ b/docs/go/primitives/code-snippets.md
@@ -0,0 +1,245 @@
+---
+seotitle: Code snippets for using the Backend Framework's building blocks in your backend application
+seodesc: Learn how to build cloud-agnostic backend applications using Encore's Backend Framework.
+title: Code snippets
+subtitle: Shortcuts for building with Encore
+lang: go
+---
+
+When you're familiar with how Encore works, you can simplify your development workflow by copy-pasting these examples. If you're looking for details on how Encore works, please refer to the relevant docs section.
+
+## APIs
+
+### Defining APIs
+
+```go
+package hello // service name
+
+//encore:api public
+func Ping(ctx context.Context, params *PingParams) (*PingResponse, error) {
+ msg := fmt.Sprintf("Hello, %s!", params.Name)
+ return &PingResponse{Message: msg}, nil
+}
+```
+
+### Defining Request and Response schemas
+
+```go
+// PingParams is the request data for the Ping endpoint.
+type PingParams struct {
+ Name string
+}
+
+// PingResponse is the response data for the Ping endpoint.
+type PingResponse struct {
+ Message string
+}
+```
+
+### Calling APIs
+
+```go
+import "encore.app/hello" // import service
+
+//encore:api public
+func MyOtherAPI(ctx context.Context) error {
+ resp, err := hello.Ping(ctx, &hello.PingParams{Name: "World"})
+ if err == nil {
+ log.Println(resp.Message) // "Hello, World!"
+ }
+ return err
+}
+```
+
+**Hint:** Import the service package and call the API endpoint using a regular function call.
+
+### Receive Webhooks
+
+```go
+import "net/http"
+
+// Webhook receives incoming webhooks from Some Service That Sends Webhooks.
+//encore:api public raw
+func Webhook(w http.ResponseWriter, req *http.Request) {
+ // ... operate on the raw HTTP request ...
+}
+```
+
+**Hint:** Like any other API endpoint, this will be exposed at:
+`https://-.encr.app/service.Webhook`
+
+## Databases
+
+### Creating a SQL database
+
+To create a database, import `encore.dev/storage/sqldb` and call `sqldb.NewDatabase`, assigning the result to a package-level variable.
+`sqldb.DatabaseConfig` specifies the directory containing the database migration files, which is how you define the database schema.
+
+```
+-- todo/db.go --
+package todo
+
+// Create the todo database and assign it to the "tododb" variable
+var tododb = sqldb.NewDatabase("todo", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// Then, query the database using db.QueryRow, db.Exec, etc.
+-- todo/migrations/1_create_table.up.sql --
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT false
+ -- etc...
+);
+```
+
+### Inserting data into a database
+
+One way of inserting data is with a helper function that uses the package function `sqldb.Exec`:
+
+```go
+import "encore.dev/storage/sqldb"
+
+// insert inserts a todo item into the database.
+func insert(ctx context.Context, id, title string, done bool) error {
+ _, err := tododb.Exec(ctx, `
+ INSERT INTO todo_item (id, title, done)
+ VALUES ($1, $2, $3)
+ `, id, title, done)
+ return err
+}
+```
+
+### Querying a database
+
+To read a single todo item in the example schema above, we can use `sqldb.QueryRow`:
+
+```go
+import "encore.dev/storage/sqldb"
+
+var item struct {
+ ID int64
+ Title string
+ Done bool
+}
+err := tododb.QueryRow(ctx, `
+ SELECT id, title, done
+ FROM todo_item
+ LIMIT 1
+`).Scan(&item.ID, &item.Title, &item.Done)
+```
+
+**Hint:** If `sqldb.QueryRow` does not find a matching row, it reports an error that can be checked against
+by importing the standard library `errors` package and calling `errors.Is(err, sqldb.ErrNoRows)`.
+
+## Defining a Cron Job
+
+```go
+import "encore.dev/cron"
+
+var _ = cron.NewJob("welcome-email", cron.JobConfig{
+ Title: "Send welcome emails",
+ Every: 2 * cron.Hour,
+ Endpoint: SendWelcomeEmail,
+})
+
+//encore:api private
+func SendWelcomeEmail(ctx context.Context) error {
+ // ...
+ return nil
+}
+```
+**Hint:** Cron Jobs do not run in your local development environment.
+
+## PubSub
+
+### Creating a PubSub topic
+
+```go
+import "encore.dev/pubsub"
+
+type SignupEvent struct { UserID int }
+var Signups = pubsub.NewTopic[*SignupEvent]("signups", pubsub.TopicConfig {
+ DeliveryGuarantee: pubsub.AtLeastOnce,
+})
+```
+
+**Hint:** Topics are declared as package level variables and cannot be created inside functions. Regardless of where you create a topic, it can be published and subscribed to from any service.
+
+### Publishing an Event (Pub)
+
+```go
+if _, err := Signups.Publish(ctx, &SignupEvent{UserID: id}); err != nil {
+ return err
+}
+
+if err := tx.Commit(); err != nil {
+ return err
+}
+```
+
+**Hint:** If you want to publish to the topic from another service, import the topic package variable (`Signups` in this example) and call publish on it from there.
+
+### Subscribing to Events (Sub)
+
+Create a Subscription as a package level variable by calling `pubsub.NewSubscription`.
+
+```go
+var _ = pubsub.NewSubscription(
+ user.Signups, "send-welcome-email",
+ pubsub.SubscriptionConfig[*SignupEvent] {
+ Handler: SendWelcomeEmail,
+ },
+)
+func SendWelcomeEmail(ctx context.Context, event *SignupEvent) error {
+ ... send email ...
+ return nil
+}
+```
+
+## Defining a Cache cluster
+
+```go
+import "encore.dev/storage/cache"
+
+var MyCacheCluster = cache.NewCluster("my-cache-cluster", cache.ClusterConfig{
+ // EvictionPolicy tells Redis how to evict keys when the cache reaches
+ // its memory limit. For typical cache use cases, cache.AllKeysLRU is a good default.
+ EvictionPolicy: cache.AllKeysLRU,
+})
+```
+
+## Secrets
+
+### Defining Secrets
+
+```go
+var secrets struct {
+ GitHubAPIToken string // personal access token for deployments
+ SomeOtherSecret string // some other secret
+}
+```
+
+**Hint:** The variable must be an unexported struct named `secrets`, and all the fields must be of type `string`.
+
+### Setting secret values
+
+```shell
+$ encore secret set --type
+```
+
+**Hint:** `` defines which environment types the secret value applies to. Use a comma-separated list of `production`, `development`, `preview`, and `local`. For each Secret, there can only be one secret value for each environment type.
+
+### Using secrets
+
+```go
+func callGitHub(ctx context.Context) {
+ req, _ := http.NewRequestWithContext(ctx, "GET", "https:///api.github.com/user", nil)
+ req.Header.Add("Authorization", "token " + secrets.GitHubAPIToken)
+ resp, err := http.DefaultClient.Do(req)
+ // ... handle err and resp
+}
+```
+
+**Hint:** Secret keys are globally unique for your whole application; if multiple services use the same secret name they both receive the same secret value at runtime.
diff --git a/docs/go/primitives/connect-existing-db.md b/docs/go/primitives/connect-existing-db.md
new file mode 100644
index 0000000000..4c29223381
--- /dev/null
+++ b/docs/go/primitives/connect-existing-db.md
@@ -0,0 +1,83 @@
+---
+seotitle: How to integrate your Encore app with an existing database
+seodesc: Learn how to integrate your Encore Go backend application with an existing database, in any cloud you choose.
+title: Integrate with existing databases
+lang: go
+---
+
+Encore automatically provision the necessary infrastructure when you create a service and add a database. However, you may want to connect to an existing database for migration or prototyping purposes. It's simple to integrate your Encore app with an existing database in these cases.
+
+## Example
+
+Let's say you have an external database hosted by DigitalOcean that you would like to connect to.
+The simplest approach is to create a dedicated package that lazily instantiates a database connection pool.
+We can store the password using Encore's [secrets manager](/docs/go/primitives/secrets) to make it even easier.
+
+The connection string is something that looks like:
+
+```
+postgresql://user:password@externaldb-do-user-1234567-0.db.ondigitalocean.com:25010/externaldb?sslmode=require
+```
+
+So we write something like:
+
+**`pkg/externaldb/externaldb.go`**
+
+```go
+package externaldb
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/jackc/pgx/v4/pgxpool"
+ "go4.org/syncutil"
+)
+
+// Get returns a database connection pool to the external database.
+// It is lazily created on first use.
+func Get(ctx context.Context) (*pgxpool.Pool, error) {
+ // Attempt to setup the database connection pool if it hasn't
+ // already been successfully setup.
+ err := once.Do(func() error {
+ var err error
+ pool, err = setup(ctx)
+ return err
+ })
+ return pool, err
+}
+
+var (
+ // once is like sync.Once except it re-arms itself on failure
+ once syncutil.Once
+ // pool is the successfully created database connection pool,
+ // or nil when no such pool has been setup yet.
+ pool *pgxpool.Pool
+)
+
+var secrets struct {
+ // ExternalDBPassword is the database password for authenticating
+ // with the external database hosted on DigitalOcean.
+ ExternalDBPassword string
+}
+
+// setup attempts to set up a database connection pool.
+func setup(ctx context.Context) (*pgxpool.Pool, error) {
+ connString := fmt.Sprintf("postgresql://%s:%s@externaldb-do-user-1234567-0.db.ondigitalocean.com:25010/externaldb?sslmode=require",
+ "user", secrets.ExternalDBPassword)
+ return pgxpool.Connect(ctx, connString)
+}
+```
+
+Before running, remember to use `encore secrets set` to store the `ExternalDBPassword` to use. (But don't worry, Encore will remind you if you forget.)
+
+## Other infrastructure
+
+The same pattern can easily be adapted to other infrastructure components that Encore doesn't yet provide built-in support for:
+
+- Horizontally scalable databases like Cassandra, DynamoDB, BigTable, and so on
+- Document or graph databases like MongoDB or Neo4j
+- Other cloud primitives like queues, object storage buckets, and more
+- Or really any cloud services or APIs you can think of
+
+In this way you can easily integrate Encore with anything you want.
diff --git a/docs/go/primitives/cron-jobs.md b/docs/go/primitives/cron-jobs.md
new file mode 100644
index 0000000000..02a64f12f1
--- /dev/null
+++ b/docs/go/primitives/cron-jobs.md
@@ -0,0 +1,104 @@
+---
+seotitle: Create recurring tasks with Encore's Cron Jobs API
+seodesc: Learn how to create periodic and recurring tasks in your backend application using Encore's Cron Jobs API.
+title: Cron Jobs
+subtitle: Run recurring and scheduled tasks
+infobox: {
+ title: "Cron Jobs",
+ import: "encore.dev/cron",
+ example_link: "/docs/tutorials/uptime"
+}
+lang: go
+---
+
+When you need to run periodic and recurring tasks, Encore.go provides a declarative way of using Cron Jobs.
+
+When a Cron Job is defined in your application, Encore automatically calls your specified API according to the defined schedule. This eliminates the need for infrastructure maintenance, as Encore manages scheduling, monitoring, and execution of Cron Jobs.
+
+
+
+Cron Jobs do not run when developing locally or in [Preview Environments](/docs/platform/deploy/preview-environments), but you can always call the API manually to test the behavior.
+
+
+
+
+
+## Defining a Cron Job
+
+To define a Cron Job, import the `encore.dev/cron` [package](https://pkg.go.dev/encore.dev/cron),
+and call the `cron.NewJob()` function and store it as a package-level variable.
+
+### Example
+
+```go
+import "encore.dev/cron"
+
+// Send a welcome email to everyone who signed up in the last two hours.
+var _ = cron.NewJob("welcome-email", cron.JobConfig{
+ Title: "Send welcome emails",
+ Every: 2 * cron.Hour,
+ Endpoint: SendWelcomeEmail,
+})
+
+// SendWelcomeEmail emails everyone who signed up recently.
+// It's idempotent: it only sends a welcome email to each person once.
+//encore:api private
+func SendWelcomeEmail(ctx context.Context) error {
+ // ...
+ return nil
+}
+```
+
+The `"welcome-email"` argument to `cron.NewJob` is a unique ID you give to each Cron Job.
+If you later refactor the code and move the Cron Job definition to another package,
+we use this ID to keep track that it's the same Cron Job and not a different one.
+
+When this code gets deployed Encore will automatically register the Cron Job in Encore Cloud
+and begin calling the `SendWelcomeEmail` API every hour.
+
+The Encore Cloud dashboard provides a convenient user interface for monitoring and debugging
+Cron Job executions across all your environments via the `Cron Jobs` menu item:
+
+
+
+## Keep in mind when using Cron Jobs
+
+- Cron Jobs do not execute during local development or in [Preview Environments](/docs/platform/deploy/preview-environments). However, you can manually invoke the API to test its behavior.
+- In Encore Cloud, Cron Job executions are limited to **once every hour**, with the exact minute randomized within that hour for users on the Free Tier. To enable more frequent executions or to specify the exact minute within the hour, consider [deploying to your own cloud](/docs/platform/deploy/own-cloud) or upgrading to the [Pro plan](/pricing).
+- Both public and private APIs are supported for Cron Jobs.
+- Ensure that the API endpoints used in Cron Jobs are idempotent, as they may be called multiple times under certain network conditions.
+- The API endpoints used in Cron Jobs must not take any request parameters. That is, their signatures must be `func(context.Context) error` or `func(context.Context) (*T, error)`.
+
+## Cron schedules
+
+Above we used the `Every` field, which executes the Cron Job on a periodic basis.
+It runs around the clock each day, starting at midnight (UTC).
+
+In order to ensure a consistent delay between each run, the interval used **must divide 24 hours evenly**.
+For example, `10 * cron.Minute` and `6 * cron.Hour` are both allowed (since 24 hours is evenly divisible by both),
+whereas `7 * cron.Hour` is not (since 24 is not evenly divisible by 7).
+The Encore compiler will catch this and give you a helpful error at compile-time if you try to use an invalid interval.
+
+### Cron expressions
+
+For more advanced use cases, such as running a Cron Job on a specific day of the month, or a specific week day, or similar,
+the `Every` field is not expressive enough.
+
+For these use cases, Encore provides full support for [Cron expressions](https://en.wikipedia.org/wiki/Cron) by using the `Schedule` field
+instead of the `Every` field.
+
+Cron expressions allow you to define precise schedules for your tasks, including specific days of the week, specific hours of the day, and more. Note that all times are expressed in UTC.
+
+For example:
+
+```go
+// Run the monthly accounting sync job at 4am (UTC) on the 15th day of each month.
+var _ = cron.NewJob("accounting-sync", cron.JobConfig{
+ Title: "Cron Job Example",
+ Schedule: "0 4 15 * *",
+ Endpoint: AccountingSync,
+})
+```
diff --git a/docs/go/primitives/database-extensions.md b/docs/go/primitives/database-extensions.md
new file mode 100644
index 0000000000..384fd2d96e
--- /dev/null
+++ b/docs/go/primitives/database-extensions.md
@@ -0,0 +1,80 @@
+---
+seotitle: Pre-installed PostgreSQL extensions
+seodesc: See the list of pre-installed PostgreSQL extensions available when using Encore
+title: PostgreSQL Extensions
+subtitle: Pre-installed extensions
+infobox: {
+ title: "SQL Databases",
+ import: "encore.dev/storage/sqldb"
+}
+lang: go
+---
+
+Encore uses the [encoredotdev/postgres](https://github.com/encoredev/postgres-image) docker image for local development, CI/CD, and for databases hosted on Encore Cloud.
+
+The docker image ships with the following PostgreSQL extensions pre-installed and available for use (via `CREATE EXTENSION`):
+
+| Extension | Version | Description |
+| ------------------------------ | ------- | ------------------------------------------------------------------------------------------------------------------- |
+| refint | 1.0 | functions for implementing referential integrity (obsolete) |
+| pg_buffercache | 1.3 | examine the shared buffer cache |
+| pg_freespacemap | 1.2 | examine the free space map (FSM) |
+| plpgsql | 1.0 | PL/pgSQL procedural language |
+| citext | 1.6 | data type for case-insensitive character strings |
+| adminpack | 2.1 | administrative functions for PostgreSQL |
+| moddatetime | 1.0 | functions for tracking last modification time |
+| amcheck | 1.3 | functions for verifying relation integrity |
+| seg | 1.4 | data type for representing line segments or floating-point intervals |
+| pg_stat_statements | 1.10 | track planning and execution statistics of all SQL statements executed |
+| pg_trgm | 1.6 | text similarity measurement and index searching based on trigrams |
+| isn | 1.2 | data types for international product numbering standards |
+| btree_gist | 1.7 | support for indexing common datatypes in GiST |
+| intarray | 1.5 | functions, operators, and index support for 1-D arrays of integers |
+| pg_surgery | 1.0 | extension to perform surgery on a damaged relation |
+| uuid-ossp | 1.1 | generate universally unique identifiers (UUIDs) |
+| insert_username | 1.0 | functions for tracking who changed a table |
+| bloom | 1.0 | bloom access method - signature file based index |
+| pgcrypto | 1.3 | cryptographic functions |
+| dblink | 1.2 | connect to other PostgreSQL databases from within a database |
+| tsm_system_rows | 1.0 | TABLESAMPLE method which accepts number of rows as a limit |
+| pg_prewarm | 1.2 | prewarm relation data |
+| old_snapshot | 1.0 | utilities in support of old_snapshot_threshold |
+| pageinspect | 1.11 | inspect the contents of database pages at a low level |
+| intagg | 1.1 | integer aggregator and enumerator (obsolete) |
+| pg_visibility | 1.2 | examine the visibility map (VM) and page-level visibility info |
+| cube | 1.5 | data type for multidimensional cubes |
+| tablefunc | 1.0 | functions that manipulate whole tables, including crosstab |
+| xml2 | 1.1 | XPath querying and XSLT |
+| fuzzystrmatch | 1.1 | determine similarities and distance between strings |
+| pg_walinspect | 1.0 | functions to inspect contents of PostgreSQL Write-Ahead Log |
+| btree_gin | 1.3 | support for indexing common datatypes in GIN |
+| sslinfo | 1.2 | information about SSL certificates |
+| tcn | 1.0 | Triggered change notifications |
+| hstore | 1.8 | data type for storing sets of (key, value) pairs |
+| dict_int | 1.0 | text search dictionary template for integers |
+| earthdistance | 1.1 | calculate great-circle distances on the surface of the Earth |
+| file_fdw | 1.0 | foreign-data wrapper for flat file access |
+| autoinc | 1.0 | functions for autoincrementing fields |
+| ltree | 1.2 | data type for hierarchical tree-like structures |
+| unaccent | 1.1 | text search dictionary that removes accents |
+| pgrowlocks | 1.2 | show row-level locking information |
+| tsm_system_time | 1.0 | TABLESAMPLE method which accepts time in milliseconds as a limit |
+| dict_xsyn | 1.0 | text search dictionary template for extended synonym processing |
+| pgstattuple | 1.5 | show tuple-level statistics |
+| postgres_fdw | 1.1 | foreign-data wrapper for remote PostgreSQL servers |
+| lo | 1.1 | Large Object maintenance |
+| postgis_sfcgal-3 | 3.4.2 | PostGIS SFCGAL functions |
+| address_standardizer_data_us-3 | 3.4.2 | Address Standardizer US dataset example |
+| address_standardizer-3 | 3.4.2 | Used to parse an address into constituent elements. Generally used to support geocoding address normalization step. |
+| postgis_topology-3 | 3.4.2 | PostGIS topology spatial types and functions |
+| postgis-3 | 3.4.2 | PostGIS geometry and geography spatial types and functions |
+| postgis_raster-3 | 3.4.2 | PostGIS raster types and functions |
+| postgis_tiger_geocoder-3 | 3.4.2 | PostGIS tiger geocoder and reverse geocoder |
+| vector | 0.7.0 | vector data type and ivfflat and hnsw access methods |
+| postgis | 3.4.2 | PostGIS geometry and geography spatial types and functions |
+| address_standardizer | 3.4.2 | Used to parse an address into constituent elements. Generally used to support geocoding address normalization step. |
+| postgis_topology | 3.4.2 | PostGIS topology spatial types and functions |
+| postgis_tiger_geocoder | 3.4.2 | PostGIS tiger geocoder and reverse geocoder |
+| address_standardizer_data_us | 3.4.2 | Address Standardizer US dataset example |
+| postgis_sfcgal | 3.4.2 | PostGIS SFCGAL functions |
+| postgis_raster | 3.4.2 | PostGIS raster types and functions |
diff --git a/docs/go/primitives/database-troubleshooting.md b/docs/go/primitives/database-troubleshooting.md
new file mode 100644
index 0000000000..2d2ea38696
--- /dev/null
+++ b/docs/go/primitives/database-troubleshooting.md
@@ -0,0 +1,52 @@
+---
+seotitle: Troubleshooting SQL databases
+seodesc: Advice on troubleshooting SQL databases in Encore.go
+title: Troubleshooting Databases
+subtitle: Advice on troubleshooting SQL databases in Encore.go
+infobox: {
+ title: "SQL Databases",
+ import: "encore.dev/storage/sqldb"
+}
+lang: go
+---
+
+When you run your application locally with `encore run`, Encore provisions local databases using [Docker](https://docker.com). If this fails with a database error, it can often be resolved by making sure you have Docker installed and running, or by restarting the Encore daemon using `encore daemon`.
+
+If this does not resolve the issue, here are steps to resolve common errors:
+
+** Error: sqldb: unknown database **
+
+This error is often caused by a problem with the initial migration file, such as incorrect naming or location.
+
+- Verify that you've [created the migration file](/docs/go/primitives/databases#defining-a-database-schema) correctly, then try `encore run` again.
+
+** Error: could not connect to the database **
+
+When you can't connect to the database in your local environment, there's likely an issue with Docker:
+
+- Make sure that you have [Docker](https://docker.com) installed and running, then try `encore run` again.
+- If this fails, restart the Encore daemon by running `encore daemon`, then try `encore run` again.
+
+** Error: Creating PostgreSQL database cluster Failed **
+
+This means Encore was not able to create the database. Often this is due to a problem with Docker.
+
+- Check if you have permission to access Docker by running `docker images`.
+- Set the correct permissions with `sudo usermod -aG docker $USER` (Learn more in the [Docker documentation](https://docs.docker.com/engine/install/linux-postinstall/))
+- Then log out and log back in so that your group membership is refreshed.
+
+** Error: unable to save docker image **
+
+This error is often caused by a problem with Docker.
+
+- Make sure that you have [Docker](https://docker.com) installed and running.
+- In Docker, open **Settings > Advanced** and make sure that the setting `Allow the default Docker socket to be used` is checked.
+- If it still fails, restart the Encore daemon by running `encore daemon`, then try `encore run` again.
+
+** Error: unable to add CA to cert pool **
+
+This error is commonly caused by the presence of the file `$HOME/.postgresql/root.crt` on the filesystem.
+When this file is present the PostgreSQL client library will assume the database server has that root certificate,
+which will cause the above error.
+
+- Remove or rename the file, then try `encore run` again.
diff --git a/docs/go/primitives/databases.md b/docs/go/primitives/databases.md
new file mode 100644
index 0000000000..3658a0db28
--- /dev/null
+++ b/docs/go/primitives/databases.md
@@ -0,0 +1,238 @@
+---
+seotitle: Using SQL databases for your backend application
+seodesc: Learn how to use SQL databases for your backend application. See how to provision, migrate, and query PostgreSQL databases using Go and Encore.
+title: Using SQL databases
+subtitle: Provisioning, migrating, querying
+infobox: {
+ title: "SQL Databases",
+ import: "encore.dev/storage/sqldb",
+ example_link: "/docs/tutorials/uptime"
+}
+lang: go
+---
+
+Encore treats SQL databases as logical resources and natively supports **PostgreSQL** databases.
+
+## Creating a database
+
+To create a database, import `encore.dev/storage/sqldb` and call `sqldb.NewDatabase`, assigning the result to a package-level variable.
+Databases must be created from within an [Encore service](/docs/go/primitives/services).
+
+For example:
+
+```
+-- todo/db.go --
+package todo
+
+// Create the todo database and assign it to the "tododb" variable
+var tododb = sqldb.NewDatabase("todo", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// Then, query the database using db.QueryRow, db.Exec, etc.
+-- todo/migrations/1_create_table.up.sql --
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT false
+ -- etc...
+);
+```
+
+As seen above, the `sqldb.DatabaseConfig` specifies the directory containing the database migration files, which is how you define the database schema.
+See the [Defining the database schema](#defining-the-database-schema) section below for more details.
+
+With this code in place, Encore will automatically create the database using [Docker](https://docker.com) when you run the command `encore run` in your local environment. Make sure Docker is installed and running on your machine before running `encore run`.
+
+
+
+If your application is already running when you define a new database, you will need to stop and restart `encore run`. This is necessary for Encore to create the new database using Docker.
+
+
+
+
+
+## Database Migrations
+
+Encore automatically handles `up` migrations, while `down` migrations must be run manually. Each `up` migration runs sequentially, expressing changes in the database schema from the previous migration.
+
+### Naming Conventions
+
+**File Name Format:** Migration files must start with a number followed by an underscore (`_`), and must increase sequentially. Each file name must end with `.up.sql`.
+
+**Examples:**
+- `1_first_migration.up.sql`
+- `2_second_migration.up.sql`
+- `3_migration_name.up.sql`
+
+You can also prefix migration files with leading zeroes for better ordering in the editor (e.g., `0001_migration.up.sql`).
+
+### Defining the Database Schema
+
+The first migration typically defines the initial table structure. For instance, a `todo` service might create `todo/migrations/1_create_table.up.sql` with the following content:
+
+```sql
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT false
+);
+```
+
+### Migration File Structure
+
+Migration files are created in a `migrations` directory within an Encore service package. Each file is named `_.up.sql`, where `` is a sequence number for ordering and `` describes the migration.
+
+**Example Directory Structure:**
+
+```
+/my-app
+├── encore.app // ... and other top-level project files
+│
+└── todo // todo service (a Go package)
+ ├── migrations // todo service db migrations (directory)
+ │ ├── 1_create_table.up.sql // todo service db migration
+ │ └── 2_add_field.up.sql // todo service db migration
+ ├── todo.go // todo service code
+ └── todo_test.go // tests for todo service
+```
+
+## Inserting data into databases
+
+Once you have created the database using `var mydb = sqldb.NewDatabase(...)` you can start inserting data into the database
+by calling methods on the `mydb` variable.
+
+The interface is similar to that of the Go standard library's `database/sql` package.
+Learn more in the [package docs](https://pkg.go.dev/encore.dev/storage/sqldb).
+
+One way of inserting data is with a helper function that uses the package function `sqldb.Exec`.
+For example, to insert a single todo item using the example schema above, we can use the following helper function `insert`:
+
+```
+-- todo/insert.go --
+// insert inserts a todo item into the database.
+func insert(ctx context.Context, id, title string, done bool) error {
+ _, err := tododb.Exec(ctx, `
+ INSERT INTO todo_item (id, title, done)
+ VALUES ($1, $2, $3)
+ `, id, title, done)
+ return err
+}
+-- todo/db.go --
+package todo
+
+// Create the todo database and assign it to the "tododb" variable
+var tododb = sqldb.NewDatabase("todo", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// Then, query the database using db.QueryRow, db.Exec, etc.
+-- todo/migrations/1_create_table.up.sql --
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT false
+ -- etc...
+);
+```
+
+## Querying databases
+
+To query a database in your application, you similarly need to import `encore.dev/storage/sqldb` in your service package or sub-package.
+
+For example, to read a single todo item in the example schema above, we can use `sqldb.QueryRow`:
+
+```go
+var item struct {
+ ID int64
+ Title string
+ Done bool
+}
+err := tododb.QueryRow(ctx, `
+ SELECT id, title, done
+ FROM todo_item
+ LIMIT 1
+`).Scan(&item.ID, &item.Title, &item.Done)
+```
+
+If `QueryRow` does not find a matching row, it reports an error that can be checked against
+by importing the standard library `errors` package and calling `errors.Is(err, sqldb.ErrNoRows)`.
+
+Learn more in the [package docs](https://pkg.go.dev/encore.dev/storage/sqldb).
+
+## Provisioning databases
+
+Encore automatically provisions databases to match what your application requires.
+When you [define a database](#creating-a-database), Encore will provision the database at your next deployment.
+
+Encore provisions databases in an appropriate way depending on the environment.
+When running locally, Encore creates a database cluster using [Docker](https://www.docker.com/).
+In the cloud, it depends on the [environment type](/docs/platform/deploy/environments#environment-types):
+
+- In `production` environments, the database is provisioned through the Managed SQL Database
+ service offered by the chosen cloud provider.
+- In `development` environments, the database is provisioned as a Kubernetes deployment
+ with a persistent disk attached.
+
+See exactly what is provisioned for each cloud provider, and each environment type, in the [infrastructure documentation](/docs/platform/infrastructure/infra).
+
+## Connecting to databases
+
+It's often useful to be able to connect to the database from outside the backend application. For example for scripts, ad-hoc querying, or dumping data for analysis.
+
+Currently Encore does not expose user credentials for databases in the local environment or for environments on Encore Cloud. You can use a connection string to connect instead, see below.
+
+### Using the Encore CLI
+
+Encore's CLI comes with built-in support for connecting to databases:
+
+* `encore db shell [--env=]` opens a [psql](https://www.postgresql.org/docs/current/app-psql.html)
+ shell to the database named `` in the given environment. Leaving out `--env` defaults to the local development environment. `encore db shell` defaults to read-only permissions. Use `--write`, `--admin` and `--superuser` flags to modify which permissions you connect with.
+
+* `encore db conn-uri [--env=]` outputs a connection string for the database named ``.
+ When specifying a cloud environment, the connection string is temporary. Leaving out `--env` defaults to the local development environment.
+
+* `encore db proxy [--env=]` sets up a local proxy that forwards any incoming connection
+ to the databases in the specified environment.
+ Leaving out `--env` defaults to the local development environment.
+
+See `encore help db` for more information on database management commands.
+
+### Using database user credentials
+
+For cloud environments on AWS/GCP you can view database user credentials (created by Encore when provisioning databases) via the Encore Cloud dashboard:
+
+* Open your app in the [Encore Cloud dashboard](https://app.encore.cloud), navigate to the **Infrastructure** page for the appropriate environment, and locate the `USERS` section within the relevant **Database Cluster**.
+
+## Handling migration errors
+
+When Encore applies database migrations, there's always a possibility the migrations don't apply cleanly.
+
+This can happen for many reasons:
+- There's a problem with the SQL syntax in the migration
+- You tried to add a `UNIQUE` constraint but the values in the table aren't actually unique
+- The existing database schema didn't look like you thought it did, so the database object you tried to change doesn't actually exist
+- ... and so on
+
+If that happens, Encore rolls back the migration. If it happens during a cloud deployment, the deployment is aborted.
+Once you fix the problem, re-run `encore run` (locally) or push the updated code (in the cloud) to try again.
+
+Encore tracks which migrations have been applied in the `schema_migrations` table:
+
+```sql
+database=# \d schema_migrations
+ Table "public.schema_migrations"
+ Column | Type | Collation | Nullable | Default
+---------+---------+-----------+----------+---------
+ version | bigint | | not null |
+ dirty | boolean | | not null |
+Indexes:
+ "schema_migrations_pkey" PRIMARY KEY, btree (version)
+```
+
+The `version` column tracks which migration was last applied. If you wish to skip a migration or re-run a migration,
+change the value in this column. For example, to re-run the last migration, run `UPDATE schema_migrations SET version = version - 1;`.
+*Note that Encore does not use the `dirty` flag by default.*
diff --git a/docs/go/primitives/defining-apis.md b/docs/go/primitives/defining-apis.md
new file mode 100644
index 0000000000..dfdfbf5e40
--- /dev/null
+++ b/docs/go/primitives/defining-apis.md
@@ -0,0 +1,429 @@
+---
+seotitle: Defining type-safe APIs with Encore.go
+seodesc: Learn how to create APIs for your cloud backend application using Go and Encore.go
+title: Defining Type-Safe APIs
+subtitle: Simplifying type-safe API development
+lang: go
+---
+
+Encore.go enables you to create type-safe APIs from regular Go functions.
+
+To define an API, add the `//encore:api` annotation to a function in your code.
+This tells Encore that the function is an API endpoint and Encore will automatically generate the necessary boilerplate at compile-time.
+
+In the example below, we define the API endpoint `Ping`, in the `hello` service, which gets exposed as `hello.Ping`.
+
+```go
+package hello // service name
+
+//encore:api public
+func Ping(ctx context.Context, params *PingParams) (*PingResponse, error) {
+ msg := fmt.Sprintf("Hello, %s!", params.Name)
+ return &PingResponse{Message: msg}, nil
+}
+```
+
+
+
+## Access controls
+
+When you define an API, you have three options for how it can be accessed:
+
+* `//encore:api public` – defines a public API that anybody on the internet can call.
+* `//encore:api private` – defines a private API that is never accessible to the outside world. It can only be called from other services in your app and via cron jobs.
+* `//encore:api auth` – defines a public API that anybody can call, but requires valid authentication.
+
+You can optionally send in auth data to `public` and `private` APIs, in which case the auth handler will be used. When used for `private` APIs, they are still not accessible from the outside world.
+
+For more on defining APIs that require authentication, see the [authentication guide](/docs/go/develop/auth).
+
+## API Schemas
+
+### Request and response schemas
+
+In the example above we defined an API that uses request and response schemas. The request data is of type `PingParams` and the response data of type `PingResponse`. That means we need to define them like so:
+
+```go
+package hello // service name
+
+// PingParams is the request data for the Ping endpoint.
+type PingParams struct {
+ Name string
+}
+
+// PingResponse is the response data for the Ping endpoint.
+type PingResponse struct {
+ Message string
+}
+
+// Ping is an API endpoint that responds with a simple response.
+// This is exposed as "hello.Ping".
+//encore:api public
+func Ping(ctx context.Context, params *PingParams) (*PingResponse, error) {
+ msg := fmt.Sprintf("Hello, %s!", params.Name)
+ return &PingResponse{Message: msg}, nil
+}
+```
+Request and response schemas are both optional. There are four different ways of defining an API:
+
+**Using both request and response data:**
+`func Foo(ctx context.Context, p *Params) (*Response, error)`
+
+**Only returning a response:**
+`func Foo(ctx context.Context) (*Response, error)`
+
+**With only request data:**
+`func Foo(ctx context.Context, p *Params) error`
+
+**Without any request or response data:**
+`func Foo(ctx context.Context) error`
+
+As you can see, two parts are always present: the `ctx context.Context` parameter and the `error` return value.
+
+The `ctx` parameter is used for *cancellation*. It lets you detect when the caller is no longer interested in the result,
+and lets you abort the request processing and save resources that nobody needs.
+[Learn more about contexts on the Go blog](https://blog.golang.org/context).
+
+The `error` return type is always required because APIs can always fail from the caller's perspective.
+Therefore even though our simple `Ping` API endpoint above never fails in its implementation, from the perspective of the caller perhaps the service is crashing or the network is down and the service cannot be reached.
+
+This approach is simple but very powerful. It lets Encore use [static analysis](/docs/go/concepts/application-model)
+to understand the request and response schemas of all your APIs, which enables Encore to automatically generate API documentation, type-safe API clients, and much more.
+
+### Request and response data types
+
+Request and response data types are structs (or pointers to structs) with optional field tags, which Encore uses to encode API requests to HTTP messages. The same struct can be used for requests and responses, but the `query` tag is ignored when generating responses.
+
+All tags except `json` are ignored for nested tags, which means you can only define `header` and `query` parameters for root level fields.
+
+For example, this struct:
+```go
+type NestedRequestResponse struct {
+ Header string `header:"X-Header"`// this field will be read from the http header
+ Query string `query:"query"`// this field will be read from the query string
+ Body1 string `json:"body1"`
+ Nested struct {
+ Header2 string `header:"X-Header2"`// this field will be read from the body
+ Query2 string `query:"query2"`// this field will be read from the body
+ Body2 string `json:"body2"`
+ } `json:"nested"`
+}
+```
+
+Would be unmarshalled from this request:
+
+```output
+POST /example?query=a%20query HTTP/1.1
+Content-Type: application/json
+X-Header: A header
+
+{
+ "body1": "a body",
+ "nested": {
+ "Header2": "not a header",
+ "Query2": "not a query",
+ "body2": "a nested body"
+ }
+}
+
+```
+
+And marshalled to this response:
+
+```output
+HTTP/1.1 200 OK
+Content-Type: application/json
+X-Header: A header
+
+{
+ "Query": "not a query",
+ "body1": "a body",
+ "nested": {
+ "Header2": "not a header",
+ "Query2": "not a query",
+ "body2": "a nested body"
+ }
+}
+
+```
+
+### Path parameters
+
+Path parameters are specified by the `path` field in the `//encore:api` annotation.
+To specify a placeholder variable, use `:name` and add a function parameter with the same name to the function signature.
+Encore parses the incoming request URL and makes sure it matches the type of the parameter. The last segment of the path
+can be parsed as a wildcard parameter by using `*name` with a matching function parameter.
+
+```go
+// GetBlogPost retrieves a blog post by id.
+//encore:api public method=GET path=/blog/:id/*path
+func GetBlogPost(ctx context.Context, id int, path string) (*BlogPost, error) {
+ // Use id to query database...
+}
+```
+
+### Fallback routes
+
+Encore supports defining fallback routes that will be called if no other endpoint matches the request,
+using the syntax `path=/!fallback`.
+
+This is often useful when migrating an existing backend service over to Encore, as it allows you to gradually
+migrate endpoints over to Encore while routing the remaining endpoints to the existing HTTP router using
+a raw endpoint with a fallback route.
+
+For example:
+
+```go
+//encore:service
+type Service struct {
+ oldRouter *gin.Engine // existing HTTP router
+}
+
+// Route all requests to the existing HTTP router if no other endpoint matches.
+//encore:api public raw path=/!fallback
+func (s *Service) Fallback(w http.ResponseWriter, req *http.Request) {
+ s.oldRouter.ServeHTTP(w, req)
+}
+```
+
+### Headers
+
+Headers are defined by the `header` field tag, which can be used in both request and response data types. The tag name is used to translate between the struct field and http headers.
+In the example below, the `Language` field of `ListBlogPost` will be fetched from the
+`Accept-Language` HTTP header.
+
+```go
+type ListBlogPost struct {
+ Language string `header:"Accept-Language"`
+ Author string // Not a header
+}
+```
+
+### Cookies
+
+Cookies can be set in the response by using the `header` tag with the `Set-Cookie` header name.
+
+```go
+type LoginResponse struct {
+ SessionID string `header:"Set-Cookie"`
+}
+
+//encore:api public method=POST path=/login
+func Login(ctx context.Context) (*LoginResponse, error) {
+ return &LoginResponse{SessionID: "session=123"}, nil
+}
+````
+
+The cookies can then be read using e.g. [structured auth data](/docs/go/develop/auth#accepting-structured-auth-information).
+
+### Query parameters
+
+For `GET`, `HEAD` and `DELETE` requests, parameters are read from the query string by default.
+The query parameter name defaults to the [snake-case](https://en.wikipedia.org/wiki/Snake_case)
+encoded name of the corresponding struct field (e.g. BlogPost becomes blog_post).
+
+The `query` field tag can be used to parse a field from the query string for other HTTP methods (e.g. POST) and to override the default parameter name.
+
+Query strings are not supported in HTTP responses and therefore `query` tags in response types are ignored.
+
+In the example below, the `PageLimit` field will be read from the `limit` query
+parameter, whereas the `Author` field will be parsed from the query string (as `author`) only if the method of
+the request is `GET`, `HEAD` or `DELETE`.
+
+```go
+type ListBlogPost struct {
+ PageLimit int `query:"limit"` // always a query parameter
+ Author string // query if GET, HEAD or DELETE, otherwise body parameter
+}
+```
+
+When fetching data with `GET` endpoints, it's common to receive additional parameters for optional behavior, like filtering a list or changing the sort order.
+
+When you use a struct type as the last argument in the function signature,
+Encore automatically parses these fields from the HTTP query string (for the `GET`, `HEAD`, and `DELETE` methods).
+
+For example, if you want to have a `ListBlogPosts` endpoint:
+
+```go
+type ListParams struct {
+ Limit uint // number of blog posts to return
+ Offset uint // number of blog posts to skip, for pagination
+}
+
+type ListResponse struct {
+ Posts []*BlogPost
+}
+
+//encore:api public method=GET path=/blog
+func ListBlogPosts(ctx context.Context, opts *ListParams) (*ListResponse, error) {
+ // Use limit and offset to query database...
+}
+```
+
+This could then be queried as `/blog?limit=10&offset=20`.
+
+Query parameters are more limited than structured JSON data, and can only consist of basic types (`string`, `bool`, integer and floating point numbers), [Encore's UUID types](https://pkg.go.dev/encore.dev/types/uuid#UUID), and slices of those types.
+
+
+### Body parameters
+
+Encore will default to reading request parameters from the body (as JSON) for all HTTP methods except `GET`, `HEAD` or
+`DELETE`. The name of the body parameter defaults to the field name, but can be overridden by the
+`json` tag. Response fields will be serialized as JSON in the HTTP body unless the `header` tag is set.
+
+There is no tag to force a field to be read from the body, as some infrastructure entities
+do not support body content in `GET`, `HEAD` or `DELETE` requests.
+
+```go
+type CreateBlogPost struct {
+ Subject string `json:"limit"` // query if GET, HEAD or DELETE, otherwise body parameter
+ Author string // query if GET, HEAD or DELETE, otherwise body parameter
+}
+```
+
+### Supported types
+The table below lists the data types supported by each HTTP message location.
+
+| Type | Header | Path | Query | Body |
+| --------------- | ------ | ---- | ----- | ---- |
+| bool | X | X | X | X |
+| numeric | X | X | X | X |
+| string | X | X | X | X |
+| time.Time | X | X | X | X |
+| uuid.UUID | X | X | X | X |
+| json.RawMessage | X | X | X | X |
+| list | | | X | X |
+| struct | | | | X |
+| map | | | | X |
+| pointer | | | | X |
+
+
+## Sensitive data
+
+Encore.go comes with built-in tracing functionality that automatically captures request and response payloads
+to simplify debugging. While helpful, that's not always desirable. For instance when a request or response payload contains sensitive data, such
+as API keys or personally identifiable information (PII).
+
+For those use cases Encore supports marking a field as sensitive using the struct tag `encore:"sensitive"`.
+Encore's tracing system will automatically redact fields tagged as sensitive. This works for both individual
+values as well as nested fields.
+
+Note that inputs to [auth handlers](/docs/go/develop/auth) are automatically marked as sensitive and are always redacted.
+
+Raw endpoints lack a schema, which means there's no way to add a struct tag to mark certain data as sensitive.
+For this reason Encore supports tagging the whole API endpoint as sensitive by adding `sensitive` to the `//encore:api` annotation.
+This will cause the whole request and response payload to be redacted, including all request and response headers.
+
+
+
+The `encore:"sensitive"` tag is ignored for local development environments to make development and debugging with the Local Development Dashboard easier.
+
+
+
+
+### Example
+
+```go
+package blog // service name
+import (
+ "time"
+ "encore.dev/types/uuid"
+)
+
+type Updates struct {
+ Author string `json:"author,omitempty"`
+ PublishTime time.Time `json:"publish_time,omitempty"`
+}
+
+// BatchUpdateParams is the request data for the BatchUpdate endpoint.
+type BatchUpdateParams struct {
+ Requester string `header:"X-Requester"`
+ RequestTime time.Time `header:"X-Request-Time"`
+ CurrentAuthor string `query:"author"`
+ Updates *Updates `json:"updates"`
+ MySecretKey string `encore:"sensitive"`
+}
+
+// BatchUpdateResponse is the response data for the BatchUpdate endpoint.
+type BatchUpdateResponse struct {
+ ServedBy string `header:"X-Served-By"`
+ UpdatedIDs []uuid.UUID `json:"updated_ids"`
+}
+
+//encore:api public method=POST path=/section/:sectionID/posts
+func BatchUpdate(ctx context.Context, sectionID string, params *BatchUpdateParams) (*BatchUpdateResponse, error) {
+ // Update blog posts for section
+ return &BatchUpdateResponse{ServedBy: hostname, UpdatedIDs: ids}, nil
+}
+
+```
+
+## REST APIs
+Encore has support for RESTful APIs and lets you easily define resource-oriented API URLs, parse parameters out of them, and more.
+
+To create a REST API, start by defining an endpoint and specify the `method` and `path` fields in the `//encore:api` comment.
+
+To specify a placeholder variable, use `:name` and add a function parameter with the same name to the function signature. Encore parses the incoming request URL and makes sure it matches the type of the parameter.
+
+For example, if you want to have a `GetBlogPost` endpoint that takes a numeric id as a parameter:
+
+```go
+// GetBlogPost retrieves a blog post by id.
+//encore:api public method=GET path=/blog/:id
+func GetBlogPost(ctx context.Context, id int) (*BlogPost, error) {
+ // Use id to query database...
+}
+```
+
+You can also combine path parameters with body payloads. For example, if you want to have an `UpdateBlogPost` endpoint:
+
+```go
+// UpdateBlogPost updates an existing blog post by id.
+//encore:api public method=PUT path=/blog/:id
+func UpdateBlogPost(ctx context.Context, id int, post *BlogPost) error {
+ // Use `post` to update the blog post with the given id.
+}
+```
+
+
+
+You cannot define paths that conflict with each other, including paths
+where the static part can be mistaken for a parameter, e.g both `/blog` and `/blog/:id` would conflict with `/:username`.
+
+
+
+As a rule of thumb, try to place path parameters at the end of the path and
+prefix them with the service name, e.g:
+
+```
+GET /blog/posts
+GET /blog/posts/:id
+GET /user/profile/:username
+GET /user/me
+```
+
+## Custom HTTP status codes
+
+By default, Encore automatically sets appropriate HTTP status codes for your API responses. We recommend using these default status codes, but there are situations where you might need to set a custom HTTP status code, such as when porting an existing API that clients depend on for specific status codes.
+
+To set a custom HTTP status code, use the `encore:"httpstatus"` struct tag on a field in your response type:
+
+```go
+type Response struct {
+ Message string `json:"message"`
+ Status int `encore:"httpstatus"`
+}
+
+//encore:api public method=GET path=/example
+func Example(ctx context.Context) (*Response, error) {
+ return &Response{
+ Message: "Hello",
+ Status: 201, // HTTP 201 Created
+ }, nil
+}
+```
+
+The field with the `encore:"httpstatus"` tag can be an integer type and should contain a valid HTTP status code value.
diff --git a/docs/go/primitives/insert-test-data-db.md b/docs/go/primitives/insert-test-data-db.md
new file mode 100644
index 0000000000..da4f14faf0
--- /dev/null
+++ b/docs/go/primitives/insert-test-data-db.md
@@ -0,0 +1,46 @@
+---
+seotitle: How to insert test data in a database
+seodesc: Learn how to populate your database with test data using Go and Encore, making testing your backend application much simpler.
+title: Insert test data in a database
+lang: go
+---
+
+When you're developing or testing, it's often useful to seed databases with test data.
+This can be done is several ways depending on your use case.
+
+## Using go:embed
+
+A straightforward way to insert test data is to conditionally insert it on startup using `go:embed` in combination with Encore's [metadata API](/docs/go/develop/metadata) control in which environments the data gets inserted. E.g. only in your local environment.
+
+### Example
+
+Create a file with your test data named `fixtures.sql`.
+Then, for the service where you want to insert test data, add the following to its `.go` file in order to run on startup.
+
+```
+import (
+ _ "embed"
+ "log"
+
+ "encore.dev"
+)
+
+//go:embed fixtures.sql
+var fixtures string
+
+func init() {
+ if encore.Meta().Environment.Cloud == encore.CloudLocal {
+ if _, err := sqldb.Exec(context.Background(), fixtures); err != nil {
+ log.Fatalln("unable to add fixtures:", err)
+ }
+ }
+}
+```
+
+Not included in the above example is preventing adding duplicate data. This is straightforward to do by making the fixtures idempotent, or by tracking it with a database table.
+
+## Populating databases in Encore Cloud's Preview Environments
+
+If you are using Encore Cloud's Preview Environment, it can sometimes be useful to populate new Preview Environments with test data to simplify testing.
+
+The best way to do this depends a bit on your use case, but a common way to do this is by using Encore's [webhooks](/docs/platform/integrations/webhooks) functionality, which provides notifications for when a deployment is completed and includes information about the environment in question.
diff --git a/docs/go/primitives/object-storage.md b/docs/go/primitives/object-storage.md
new file mode 100644
index 0000000000..d18eb464a1
--- /dev/null
+++ b/docs/go/primitives/object-storage.md
@@ -0,0 +1,331 @@
+---
+seotitle: Using Object Storage in your backend application
+seodesc: Learn how you can use Object Storage to store files and unstructured data in your backend application.
+title: Object Storage
+subtitle: Simple and scalable storage APIs for files and unstructured data
+infobox: {
+ title: "Object Storage",
+ import: "encore.dev/storage/objects",
+}
+lang: go
+---
+
+Object Storage is a simple and scalable solution to store files and unstructured data in your backend application.
+
+The most common implementation is Amazon S3 ("Simple Storage Service") and its semantics are universally supported by every major cloud provider.
+
+Encore.go provides a cloud-agnostic API for working with Object Storage, allowing you to store and retrieve files with ease. It has support for Amazon S3, Google Cloud Storage, as well as any other S3-compatible implementation (such as DigitalOcean Spaces, MinIO, etc.).
+
+Additionally, when you use Encore's Object Storage API you also automatically get:
+
+* Automatic tracing and instrumentation of all Object Storage operations
+* Built-in local development support, storing objects on the local filesystem
+* Support for integration testing, using a local, in-memory storage backend
+
+## Creating a Bucket
+
+The core of Object Storage is the **Bucket**, which represents a collection of files.
+In Encore, buckets must be declared as package level variables, and cannot be created inside functions.
+Regardless of where you create a bucket, it can be accessed from any service by referencing the variable it's assigned to.
+
+When creating a bucket you can configure additional properties, like whether the objects in the bucket should be versioned.
+
+See the complete specification in the [package documentation](https://pkg.go.dev/encore.dev/storage/objects#NewBucket).
+
+For example, to create a bucket for storing profile pictures:
+
+```go
+package user
+
+import "encore.dev/storage/objects"
+
+var ProfilePictures = objects.NewBucket("profile-pictures", objects.BucketConfig{
+ Versioned: false,
+})
+```
+
+## Uploading files
+
+To upload a file to a bucket, use the `Upload` method on the bucket variable.
+It returns a writer that you can use to write the contents of the file.
+
+To complete the upload, call the `Close` method on the writer.
+To abort the upload, either cancel the context or call the `Abort` method on the writer.
+
+The `Upload` method additionally takes a set of options to configure the upload,
+like setting attributes (`objects.WithUploadAttrs`) or to reject the upload if the
+object already exists (`objects.WithPreconditions`).
+See the [package documentation](https://pkg.go.dev/encore.dev/storage/objects#Bucket.Upload) for more details.
+
+```go
+package user
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "encore.dev/storage/objects"
+)
+
+var ProfilePictures = objects.NewBucket("profile-pictures", objects.BucketConfig{})
+
+//encore:api auth raw method=POST path=/upload-profile-picture
+func UploadProfilePicture(w http.ResponseWriter, req *http.Request) {
+ // Store the user's profile picture with their user id as the key.
+ userID, _ := auth.UserID()
+ key := string(userID) // We store the profile
+
+ writer := ProfilePictures.Upload(req.Context(), key)
+ _, err := io.Copy(writer, req.Body)
+ if err != nil {
+ // If something went wrong with copying data, abort the upload and return an error.
+ writer.Abort()
+ errs.HTTPError(w, err)
+ return
+ }
+
+ if err := writer.Close(); err != nil {
+ errs.HTTPError(w, err)
+ return
+ }
+
+ // All good! Return a 200 OK.
+ w.WriteHeader(http.StatusOK)
+}
+```
+
+## Downloading files
+
+To download a file from a bucket, use the `Download` method on the bucket variable.
+It returns a reader that you can use to read the contents of the file.
+
+The `Download` method additionally takes a set of options to configure the download,
+like downloading a specific version if the bucket is versioned (`objects.WithVersion`).
+See the [package documentation](https://pkg.go.dev/encore.dev/storage/objects#Bucket.Download) for more details.
+
+For example, to download the user's profile picture and serve it:
+
+```go
+package user
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "encore.dev"
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+ "encore.dev/storage/objects"
+)
+
+var ProfilePictures = objects.NewBucket("profile-pictures", objects.BucketConfig{})
+
+//encore:api public raw method=GET path=/profile-picture/:userID
+func ServeProfilePicture(w http.ResponseWriter, req *http.Request) {
+ userID := encore.CurrentRequest().PathParams.Get("userID")
+ reader := ProfilePictures.Download(req.Context(), userID)
+
+ // Did we encounter an error?
+ if err := reader.Err(); err != nil {
+ errs.HTTPError(w, err)
+ return
+ }
+
+ // Assuming all images are JPEGs.
+ w.Header().Set("Content-Type", "image/jpeg")
+ io.Copy(w, reader)
+}
+```
+
+## Listing objects
+
+To list objects in a bucket, use the `List` method on the bucket variable.
+
+It returns an iterator of `(error, *objects.ListEntry)` pairs that you can use
+to easily iterate over the objects in the bucket using a `range` loop.
+
+For example, to list all profile pictures:
+
+```go
+for err, entry := range ProfilePictures.List(ctx, &objects.Query{}) {
+ if err != nil {
+ // Handle error
+ }
+ // Do something with entry
+}
+```
+
+The `*objects.Query` type can be used to limit the number of objects returned,
+or to filter them to a specific key prefix.
+
+See the [package documentation](https://pkg.go.dev/encore.dev/storage/objects#Bucket.List) for more details.
+
+## Deleting objects
+
+To delete an object from a bucket, use the `Remove` method on the bucket variable.
+
+For example, to delete a profile picture:
+
+```go
+err := ProfilePictures.Remove(ctx, "my-user-id")
+if err != nil && !errors.Is(err, objects.ErrObjectNotFound) {
+ // Handle error
+}
+```
+
+## Retrieving object attributes
+
+You can retrieve information about an object using the `Attrs` method on the bucket variable.
+It returns the attributes of the object, like its size, content type, and ETag.
+
+For example, to get the attributes of a profile picture:
+
+```go
+attrs, err := ProfilePictures.Attrs(ctx, "my-user-id")
+if errors.Is(err, objects.ErrObjectNotFound) {
+ // Object not found
+} else if err != nil {
+ // Some other error
+}
+// Do something with attrs
+```
+
+For convenience there is also `Exists` which returns a boolean indicating whether the object exists.
+
+```go
+exists, err := ProfilePictures.Exists(ctx, "my-user-id")
+if err != nil {
+ // Handle error
+} else if !exists {
+ // Object does not exist
+}
+```
+
+## Using Public Buckets
+
+Encore supports creating public buckets where objects can be accessed directly via HTTP/HTTPS without authentication. This is useful for serving static assets like images, videos, or other public files.
+
+To create a public bucket, set `Public: true` in the `BucketConfig`:
+
+```go
+var PublicAssets = objects.NewBucket("public-assets", objects.BucketConfig{
+ Public: true,
+})
+```
+
+Once configured as public, you can get the public URL for any object using the `PublicURL` method:
+
+```go
+// Get the public URL for an object
+url := PublicAssets.PublicURL("path/to/image.jpg")
+
+// The URL can be used directly or shared publicly
+fmt.Println(url) // e.g. https://assets.example.com/path/to/image.jpg
+```
+When self-hosting, see how to configure public buckets in the [infrastructure configuration docs](/docs/ts/self-host/configure-infra).
+
+When deploying with Encore Cloud it will automatically configure the bucket to be publicly accessible and [configure CDN](/docs/platform/infrastructure/infra#production-infrastructure) for optimal content delivery.
+
+### Using bucket references
+
+Encore uses static analysis to determine which services are accessing each bucket,
+and what operations each service is performing.
+
+That information is used to provision infrastructure correctly,
+render architecture diagrams, and configure IAM permissions.
+
+This means that `*objects.Bucket` variables can't be passed around however you'd like,
+as it makes static analysis impossible in many cases. To work around these restrictions
+Encore allows you to get a "reference" to a bucket that can be passed around any way you want
+by calling `objects.BucketRef`.
+
+To ensure Encore still is aware of which permissions each service needs, the call to `objects.BucketRef`
+must be made from within a service. Additionally, it must pre-declare the permissions it needs;
+those permissions are then assumed to be used by the service.
+
+It looks like this (using the `ProfilePictures` topic above):
+
+```go
+ref := objects.BucketRef[objects.Downloader](ProfilePictures)
+
+// ref is of type objects.Downloader, which allows downloading.
+```
+
+Encore provides permission interfaces for each operation that can be performed on a bucket:
+
+* `objects.Downloader` for downloading objects
+* `objects.Uploader` for uploading objects
+* `objects.Lister` for listing objects
+* `objects.Attrser` for getting object attributes
+* `objects.Remover` for removing objects
+* `objects.SignedDownloader` for generating signed download URLs for objects
+* `objects.SignedUploader` for generating signed upload URLs for objects
+
+If you need multiple permissions they can be combined by creating an interface
+that embeds the permissions you need.
+
+```go
+type myPerms interface {
+ objects.Downloader
+ objects.Uploader
+}
+ref := objects.BucketRef[myPerms](ProfilePictures)
+```
+
+For convenience Encore provides an `objects.ReadWriter` interface that gives complete read-write access
+with all the permissions above.
+
+See the [package documentation](https://pkg.go.dev/encore.dev/storage/objects#BucketRef) for more details.
+
+## Signed Upload URLs
+
+You can use `SignedUploadURL` to create signed URLs to allow clients to upload content directly
+into the bucket over the internet. The URL is always restricted to one filename, and has a set
+expiration date. Anyone in possession of the URL can upload data under this filename without any
+additional authentication.
+
+```go
+url, err := ProfilePictures.SignedUploadURL(ctx, "my-user-id", objects.WithTTL(time.Duration(7200)*time.Second))
+// Pass url to client
+```
+
+The client can now `PUT` to this URL with the content as a binary payload.
+
+```bash
+curl -X PUT --data-binary @/home/me/dog-wizard.jpeg "https://storage.googleapis.com/profile-pictures/my-user-id/?x-goog-signature=b7a1<...>"
+```
+
+### Why signed upload URLs?
+
+Signed URLs are an alternative to accepting the content payload directly in your API. Content
+upload requests are sometimes inconvenient to handle well: they can be long running and very large.
+With signed URLs, the content flows directly into the storage bucket, and only object IDs and
+metadata go through your API service.
+
+The trade-off is that the upload flow becomes more complex from a client point of view.
+
+## Signed Download URLs
+
+You can use `SignedDownloadURL` to create signed URLs to allow clients to download content directly
+from the bucket, even if it's private. The URL is always restricted to one filename, and has a set
+expiration date. Anyone in possession of the URL can download the file without any additional
+authentication.
+
+```go
+url, err := Documents.SignedDownloadURL(ctx, "letter-1234", objects.WithTTL(time.Duration(7200)*time.Second))
+// Pass url to client
+```
+
+### Why signed download URLs?
+
+Similar to the upload case, signed download URLs is a way to avoid handing large files or bulk
+traffic through your API. With signed URLs, the content flows directly from the storage bucket,
+and only object IDs and metadata go through your API service.
+
+Note: unless the content is private, prefer serving urls with `PublicURL()` over signed URLs.
+Public URLs go over CDN, which is typically significantly more performant and cost effective.
+
diff --git a/docs/go/primitives/pubsub.md b/docs/go/primitives/pubsub.md
new file mode 100644
index 0000000000..708b9a3559
--- /dev/null
+++ b/docs/go/primitives/pubsub.md
@@ -0,0 +1,378 @@
+---
+seotitle: Using PubSub in your backend application
+seodesc: Learn how you can use PubSub as an asynchronous message queue in your backend application, a great approach for decoupling services for better reliability.
+title: Pub/Sub
+subtitle: Decoupling services and building asynchronous systems
+infobox: {
+ title: "Pub/Sub Messaging",
+ import: "encore.dev/pubsub",
+ example_link: "/docs/tutorials/uptime"
+}
+lang: go
+---
+
+Publishers & Subscribers (Pub/Sub) let you build systems that communicate by broadcasting events asynchronously. This is a great way to decouple services for better reliability and responsiveness.
+
+Encore's Backend Framework lets you use Pub/Sub in a cloud-agnostic declarative fashion. At deployment, Encore automatically [provisions the required infrastructure](/docs/platform/infrastructure/infra).
+
+## Creating a Topic
+
+The core of Pub/Sub is the **Topic**, a named channel on which you publish events.
+Topics must be declared as package level variables, and cannot be created inside functions.
+Regardless of where you create a topic, it can be published to from any service, and subscribed to from any service.
+
+When creating a topic, it must be given an event type, a unique name, and a configuration to define its behaviour. See the complete specification in the [package documentation](https://pkg.go.dev/encore.dev/pubsub#NewTopic).
+
+For example, to create a topic with events about user signups:
+
+```go
+package user
+
+import "encore.dev/pubsub"
+
+type SignupEvent struct{ UserID int }
+
+var Signups = pubsub.NewTopic[*SignupEvent]("signups", pubsub.TopicConfig{
+ DeliveryGuarantee: pubsub.AtLeastOnce,
+})
+```
+
+
+
+### At-least-once delivery
+
+The above example configures the topic to ensure that, for each subscription, events will be delivered _at least once_.
+
+This means that if the topic believes the event was not processed, it will attempt to deliver the message again.
+**Therefore, all subscription handlers should be [idempotent](https://en.wikipedia.org/wiki/Idempotence#Computer_science_meaning).** This helps ensure that if the handler is called two or more times, from the outside there's no difference compared to calling it once.
+
+This can be achieved using a database to track if you have already performed the action that the event is meant to trigger,
+or ensuring that the action being performed is also idempotent in nature.
+
+### Exactly-once delivery
+
+Topics can also be configured to deliver events _exactly once_ by setting the `DeliveryGuarantee` field to
+`pubsub.ExactlyOnce`. This enables stronger guarantees on the infrastructure level to minimize the likelihood of
+message re-delivery.
+
+
+However, there are still some rare circumstances when a message might be redelivered. For example, if a networking issue
+causes the acknowledgement of successful processing the message to be lost before the cloud provider receives it
+(the [Two Generals' Problem](https://en.wikipedia.org/wiki/Two_Generals%27_Problem)). As such, if correctness is critical
+under all circumstances, it's still advisable to design your subscription handlers to be idempotent.
+
+By enabling exactly-once delivery on a topic the cloud provider enforces certain throughput limitations:
+- AWS: 300 messages per second for the topic (see [AWS SQS Quotas](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html)).
+- GCP: At least 3,000 messages per second across all topics in the region (can be higher on the region see [GCP PubSub Quotas](https://cloud.google.com/pubsub/quotas#quotas)).
+
+
+
+Exactly-once delivery does not perform message deduplication on the publishing side. If `Publish` is called twice with
+the same message, the message will be delivered twice.
+
+
+
+### Ordered Topics
+
+Topics are unordered by default, meaning that messages can be delivered in any order. This allows for better throughput on the topic as messages can be processed in parallel. However, in some cases, messages must be delivered in the order they were published for a given entity.
+
+To create an ordered topic, configure the topic's `OrderingAttribute` to match the `pubsub-attr` tag on one of the top-level fields of the event type. This field ensures that messages delivered to the same subscriber are delivered in the order of publishing for that specific field value. Messages with a different value on the ordering attribute are delivered in an unspecified order.
+
+To maintain topic order, messages with the same ordering key aren't delivered until the earliest message is processed or dead-lettered, potentially causing delays due to [head-of-line blocking](https://en.wikipedia.org/wiki/Head-of-line_blocking). Mitigate processing issues by ensuring robust logging and alerts, and appropriate subscription retry policies.
+
+
+
+The `OrderingAttribute` currently has no effect in local environments.
+
+
+
+#### Throughput limitations
+
+Each cloud provider enforces certain throughput limitations for ordered topics:
+- **AWS:** 300 messages per second for the topic (see [AWS SQS Quotas](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html))
+- **GCP:** 1 MBps for each ordering key (See [GCP Pub/Sub Resource Limits](https://cloud.google.com/pubsub/quotas#resource_limits))
+
+#### Ordered topic example
+
+```go
+package example
+
+import (
+ "context"
+ "encore.dev/pubsub"
+)
+
+type CartEvent struct {
+ ShoppingCartID int `pubsub-attr:"cart_id"`
+ Event string
+}
+
+var CartEvents = pubsub.NewTopic[*CartEvent]("cart-events", pubsub.TopicConfig{
+ DeliveryGuarantee: pubsub.AtLeastOnce,
+ OrderingAttribute: "cart_id",
+})
+
+func Example(ctx context.Context) error {
+ // These are delivered in order as they all have the same shopping cart ID
+ CartEvents.Publish(ctx, &CartEvent{ShoppingCartID: 1, Event: "item_added"})
+ CartEvents.Publish(ctx, &CartEvent{ShoppingCartID: 1, Event: "checkout_started"})
+ CartEvents.Publish(ctx, &CartEvent{ShoppingCartID: 1, Event: "checkout_completed"})
+
+ // This event may be delivered at any point as it has a different shopping cart ID
+ CartEvents.Publish(ctx, &CartEvent{ShoppingCartID: 2, Event: "item_added"})
+}
+```
+
+## Publishing events
+
+To publish an **Event**, call `Publish` on the topic passing in the event object (which is the type specified in the `pubsub.NewTopic[Type]` constructor).
+
+For example:
+
+```go
+messageID, err := Signups.Publish(ctx, &SignupEvent{UserID: id})
+if err != nil {
+ return err
+}
+
+// If we get here the event has been successfully published,
+// and all registered subscribers will receive the event.
+
+// The messageID variable contains the unique id of the message,
+// which is also provided to the subscribers when processing the event.
+```
+
+By defining the `Signups` topic variable as an exported variable
+you can also publish to the topic from other services in the same way.
+
+### Using topic references
+
+Encore uses static analysis to determine which services are publishing messages
+to what topics. That information is used to provision infrastructure correctly,
+render architecture diagrams, and configure IAM permissions.
+
+This means that `*pubsub.Topic` variables can't be passed around however you'd like,
+as it makes static analysis impossible in many cases. To work around these restrictions
+Encore allows you to get a reference to a topic that can be passed around any way you want.
+
+It looks like this (using the `Signups` topic above):
+
+```go
+signupRef := pubsub.TopicRef[pubsub.Publisher[*SignupEvent]](Signups)
+
+// signupRef is of type pubsub.Publisher[*SignupEvent], which allows publishing.
+```
+
+The difference between a **TopicRef** and a **Topic** is that topic references need to pre-declare
+what permissions are needed. Encore then assumes that all the permissions you declare are used.
+
+For example, if you declare a **TopicRef** with the `pubsub.Publisher` permission (as seen above)
+Encore assumes that the service will publish messages to the topic and provisions the infrastructure
+to support that.
+
+Note that a **TopicRef** must be declared _within a service_, but the reference itself
+can be freely passed around to library code, be dependency injected into [service structs](/docs/go/how-to/dependency-injection),
+and so on.
+
+## Subscribing to Events
+
+To **Subscribe** to events, you create a Subscription as a package level variable by calling the
+[`pubsub.NewSubscription`](https://pkg.go.dev/encore.dev/pubsub#NewSubscription) function.
+
+Each subscription needs:
+- the topic to subscribe to
+- a name which is unique for the topic
+- a configuration object with at least a `Handler` function to process the events
+- a configuration object
+
+Here's an example of how you create a subscription to a topic:
+
+```go
+package email
+
+import (
+ "encore.dev/pubsub"
+ "user"
+)
+
+var _ = pubsub.NewSubscription(
+ user.Signups, "send-welcome-email",
+ pubsub.SubscriptionConfig[*SignupEvent]{
+ Handler: SendWelcomeEmail,
+ },
+)
+func SendWelcomeEmail(ctx context.Context, event *SignupEvent) error {
+ // send email...
+ return nil
+}
+```
+
+Subscriptions can be in the same service as the topic is declared, or in any other service of your application. Each
+subscription to a single topic receives the events independently of any other subscriptions to the same topic. This means
+that if one subscription is running very slowly, it will grow a backlog of unprocessed events.
+However, any other subscriptions will still be processing events in real-time as they are published.
+
+The `ctx` passed to the handler function is cancelled when the `AckDeadline` for the subscription is reached.
+This is the time when the message is considered to have timed out and can be redelivered to another subscriber.
+The timeout defaults to 30 seconds if you don't explicitly configure `AckDeadline`.
+
+### Method-based handlers
+
+When using [service structs](/docs/go/primitives/service-structs) for dependency injection
+it's common to want to define the subscription handler as a method on the service struct, to be able to access the
+injected dependencies. The pubsub package provides the `pubsub.MethodHandler` function for this purpose:
+
+```go
+//encore:service
+type Service struct { /* ... */ }
+
+func (s *Service) SendWelcomeEmail(ctx context.Context, event *SignupEvent) error {
+ // ...
+}
+
+var _ = pubsub.NewSubscription(
+ user.Signups, "send-welcome-email",
+ pubsub.SubscriptionConfig[*SignupEvent]{
+ Handler: pubsub.MethodHandler((*Service).SendWelcomeEmail),
+ },
+)
+```
+
+Note that `pubsub.MethodHandler` only allows referencing methods on the service struct type, not any other type.
+
+### Subscription configuration
+
+When creating a subscription you can configure behavior such as message retention and retry policy, using the `SubscriptionConfig` type. See the [package documentation](https://pkg.go.dev/encore.dev/pubsub#SubscriptionConfig) for the complete configuration options.
+
+
+
+The `SubscriptionConfig` struct fields must be defined as compile-time constants, and cannot be defined in
+terms of function calls. This is necessary for Encore to understand the exact requirements of the subscription, in order to provision the correct infrastructure upon deployment.
+
+
+
+### Error Handling
+
+If a subscription function returns an error, the event being processed will be retried, based on the retry policy
+[configured on that subscription](https://pkg.go.dev/encore.dev/pubsub#SubscriptionConfig). After the `MaxRetries` is hit,
+the event will be placed into a dead-letter queue (DLQ) for that subscriber. This allows the subscription to continue
+processing events until the bug which caused the event to fail can be fixed. Once fixed, the messages on the dead-letter queue can be manually released to be processed again by the subscriber.
+
+## Testing Pub/Sub
+
+Encore uses a special testing implementation of Pub/Sub topics. When running tests, topics are aware of which test
+is running. This gives you the following guarantees:
+- Your subscriptions will not be triggered by events published. This allows you to test the behaviour of publishers independently of side effects caused by subscribers.
+- Message ID's generated on publish are deterministic (based on the order of publishing), thus your assertions can make use of that fact.
+- Each test is isolated from other tests, meaning that events published in one test will not impact other tests (even if you use parallel testing).
+
+Encore provides a helper function, [`et.Topic`](https://pkg.go.dev/encore.dev/et#Topic), to access the testing topic. You
+can use this object to extract the events that have been published to it during a test.
+
+Here's an example implementation:
+
+```go
+package user
+
+import (
+ "testing"
+
+ "encore.dev/et"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_Register(t *testing.T) {
+ t.Parallel()
+
+ ... Call Register() and assert changes to the database ...
+
+ // Get all published messages on the Signups topic from this test.
+ msgs := et.Topic(Signups).PublishedMessages()
+ assert.Len(t, msgs, 1)
+}
+```
+
+## Ensuring consistency between services
+
+Ensuring consistency between services in event-driven applications can be challenging, especially when database writes and Pub/Sub publishing are not transactional. This can lead to inconsistencies between services.
+
+To address this issue without adding excessive complexity, consider using a transactional outbox pattern. For more information on implementing this pattern with Encore, see the [Pub/Sub Outbox guide](/docs/primitives/pubsub-outbox).
+
+## The benefits of Pub/Sub
+
+Pub/Sub is a powerful building block in a backend application. It can be used to improve app reliability by reducing the blast radius of faulty components and bottlenecks. It can also be used to increase the speed of response to the user, and even helps reduce cognitive overhead for developers by inverting the dependencies between services.
+
+For those not familiar with Pub/Sub, lets take a look at an example API in a user registration service.
+The behavior we want to implement is that upon registration, we send a welcome email to the user and create a record of the signup in our analytics system. Now let's see how we could implement this only using APIs, compared to how a Pub/Sub implementation might look.
+
+### An API only approach
+
+Using API calls between services, we might design a system which looks like this when the user registers:
+
+
+
+
+
+
+
+
+1. The `user` service starts a database transaction and records the user in its database.
+2. The `user` service makes a call to the `email` service to send a welcome email.
+3. The `email` service then calls an email provider to actually send the email.
+4. Upon success, the `email` service replies to the `user` service that the request was processed.
+5. The `user` service then calls the `analytics` service to record the signup.
+6. The `analytics` service the writes to the data warehouse to record the information.
+7. The `analytics` service then replies to the `user` service that the request was processed.
+8. The `user` service commits the database transaction.
+9. The `user` service then can reply to the user to say the registration was successful.
+
+
+
+
+
+Notice how we have to wait for everything to complete before we can reply to the user to tell then we've registered them.
+This means that if our email provider takes 3 seconds to send the email, we've now taken 3 seconds to respond to the user,
+when in reality once the user was written to the database, we could have responded to the user instantly at that point to
+confirm the registration.
+
+Another downside to this approach is if our data warehouse is currently broken and reporting errors, our system will also
+report errors whenever anybody tries to signup! Given analytics is purely internal and doesn't impact users, why should
+the analytics system being down impact user signup?
+
+### A Pub/Sub approach
+
+A more ideal solution would be if we could decouple the behaviour of emailing the user and recording our analytics, such that
+the user service only has to record the user in its own database and let the user know they are registered - without worrying
+about the downstream impacts. Thankfully, this is exactly what [Pub/Sub topics](https://pkg.go.dev/encore.dev/pubsub#Topic) allow us to do.
+
+
+
+
+
+In this example, when a user registers we:
+
+1. The `user` service starts a database transaction and records the user in its database.
+2. Publish a signup event to the `signups` topic.
+3. Commit the transaction and reply to the user to say the registration was successful.
+
+At this point the user is free to continue interacting with the application and we've isolated the registration behaviour
+from the rest of the application.
+
+In parallel, the `email` and `analytics` services will receive the signup event from the `signups` topic and will then
+perform their respective tasks. If either service returns an error, the event will automatically be backed off and retried
+until the service is able to process the event successfully, or reaches the maximum number of attempts and is placed
+into the deadletter queue (DLQ).
+
+
+
+
+
+
+
+Notice how in this version, the processing time of the two other services did not impact the end user and in fact the `user`
+service is not even aware of the `email` and `analytics` services. This means that new systems which need to know about
+new users signing up can be added to the application, without the need to change the `user` service or impacting its
+performance.
diff --git a/docs/go/primitives/raw-endpoints.md b/docs/go/primitives/raw-endpoints.md
new file mode 100644
index 0000000000..4101bc43bb
--- /dev/null
+++ b/docs/go/primitives/raw-endpoints.md
@@ -0,0 +1,39 @@
+---
+seotitle: Raw Endpoints
+seodesc: Learn how to create raw API endpoints for your cloud backend application using Go and Encore.go
+title: Raw Endpoints
+subtitle: Drop down in abstraction to access the raw HTTP request
+lang: go
+---
+
+Sometimes you need to operate a lower abstraction than Encore.go normally provides.
+For example, you might want to access the underlying HTTP request, often useful for things like accepting webhooks.
+
+Encore.go has you covered using "raw endpoints".
+
+To define a raw endpoint, change the `//encore:api` annotation and function signature like so:
+
+```go
+package service
+
+import "net/http"
+
+// Webhook receives incoming webhooks from Some Service That Sends Webhooks.
+//encore:api public raw
+func Webhook(w http.ResponseWriter, req *http.Request) {
+ // ... operate on the raw HTTP request ...
+}
+```
+
+Like any other Encore API endpoint, once deployed this will be exposed at the URL:
+`https://-.encr.app/service.Webhook`. Just like regular endpoints, raw endpoints support the use of `:id` and `*wildcard` segments.
+
+Experienced Go developers will have already noted this is just a regular Go HTTP handler.
+(See the net/http documentation for how Go HTTP handlers work.)
+
+Learn more about receiving webhooks and using WebSockets in the [receiving regular HTTP requests guide](/docs/go/how-to/http-requests).
+
+
diff --git a/docs/go/primitives/secrets.md b/docs/go/primitives/secrets.md
new file mode 100644
index 0000000000..90a64d8a43
--- /dev/null
+++ b/docs/go/primitives/secrets.md
@@ -0,0 +1,101 @@
+---
+seotitle: Securely storing API keys and secrets
+seodesc: Learn how to store API keys, and secrets, securely for your backend application. Encore's built in vault makes it simple to keep your app secure.
+title: Storing Secrets and API keys
+subtitle: Simply storing secrets securely
+lang: go
+---
+
+Wouldn't it be nice to store secret values like API keys, database passwords, and private keys directly in the source code?
+Of course, we can’t do that – it's horrifyingly insecure!
+(Unfortunately, it's also [very common](https://www.ndss-symposium.org/ndss-paper/how-bad-can-it-git-characterizing-secret-leakage-in-public-github-repositories/).)
+
+Encore's built-in secrets manager makes it simple to store secrets in a secure way and lets you use them in your program like regular variables.
+
+
+
+## Using secrets in your application
+
+To use a secret in your application, first define it directly in your code by creating an unexported struct named `secrets`, where all fields are of type `string`. For example:
+
+```go
+var secrets struct {
+ SSHPrivateKey string // ed25519 private key for SSH server
+ GitHubAPIToken string // personal access token for deployments
+ // ...
+}
+```
+
+When you've defined secrets in your program, the Encore compiler will check that they are set before running or deploying your application. If a secret is not set, you will get a compilation error notifying you that a secret value is missing.
+
+Once you've provided values for all secrets, you can just use them in your application like a regular variable. For example:
+
+```go
+func callGitHub(ctx context.Context) {
+ req, _ := http.NewRequestWithContext(ctx, "GET", "https:///api.github.com/user", nil)
+ req.Header.Add("Authorization", "token " + secrets.GitHubAPIToken)
+ resp, err := http.DefaultClient.Do(req)
+ // ... handle err and resp
+}
+```
+
+
+
+Secret keys are globally unique for your whole application. If multiple services use the same secret name they both receive the same secret value at runtime.
+
+
+
+## Storing secret values
+
+### Using the Encore Cloud dashboard
+
+The simplest way to set up secrets is with the Secrets Manager in the Encore Cloud dashboard. Open your app in [app.encore.cloud](https://app.encore.cloud), go to **Settings** in the main navigation, and then click on **Secrets** in the settings menu.
+
+From here you can create secrets, save secret values, and configure different values for different environments.
+
+
+
+### Using the CLI
+
+If you prefer, you can also set up secrets from the CLI using: `encore secret set --type `
+
+`` defines which environment types the secret value applies to. Use a comma-separated list of `production`, `development`, `preview`, and `local`. Shorthands: `prod`, `dev`, `pr`.
+
+For example `encore secret set --type prod SSHPrivateKey` sets the secret value for production environments, and `encore secret set --type dev,preview,local GitHubAPIToken` sets the secret value for development, preview, and local environments.
+
+In some cases, it can be useful to define a secret for a specific environment instead of an environment type.
+You can do so with `encore secret set --env `. Secret values for specific environments
+take precedence over values for environment types.
+
+### Environment settings
+
+Each secret can only have one secret value for each environment type. For example: If you have a secret value that's shared between `development`, `preview` and `local`, and you want to override the value for `local`, you must first edit the existing secret and remove `local` using the Secrets Manager in the [Encore Cloud dashboard](https://app.encore.cloud). You can then add a new secret value for `local`. The end result should look something like the picture below.
+
+
+
+## How it works: Where secrets are stored
+
+When you store a secret Encore stores it encrypted using Google Cloud Platform's [Key Management Service](https://cloud.google.com/security-key-management) (KMS).
+
+- **Production / Your own cloud:** When you deploy to production using your own cloud account on GCP or AWS, Encore provisions a secrets manager in your account (using either KMS or AWS Secrets Manager) and replicates your secrets to it. The secrets are then injected into the container using secret environment variables.
+- **Local:** For local secrets Encore automatically replicates them to developers' machines when running `encore run`.
+- **Development / Encore Cloud:** Environments on Encore's development cloud (running on GCP under the hood) work the same as self-hosted GCP environments, using GCP Secrets Manager.
+
+### Overriding local secrets
+
+When setting secrets via the `encore secret set` command, they are automatically synced to all developers
+working on the same application, courtesy of Encore Cloud.
+
+In some cases, however, you want to override a secret only for your local machine.
+This can be done by creating a file named `.secrets.local.cue` in the root of your Encore application,
+next to the `encore.app` file.
+
+The file contains key-value pairs of secret names to secret values. For example:
+
+```cue
+GitHubAPIToken: "my-local-override-token"
+SSHPrivateKey: "custom-ssh-private-key"
+```
diff --git a/docs/go/primitives/service-structs.md b/docs/go/primitives/service-structs.md
new file mode 100644
index 0000000000..97bdd09c43
--- /dev/null
+++ b/docs/go/primitives/service-structs.md
@@ -0,0 +1,122 @@
+---
+seotitle: Service Structs
+seodesc: Learn how to use service structs to define APIs as methods.
+title: Service structs
+lang: go
+---
+
+Encore lets you define a type, called a service struct, to represent your running service. This lets you define an initialization function (similar to the `main` function in regular Go programs).
+
+You can also define API endpoints as methods on the service struct type, enabling you to use [dependency injection](/docs/go/how-to/dependency-injection) for testing purposes.
+
+It works by defining a struct type of your choice (typically called `Service`)
+and declaring it with `//encore:service`.
+Then, you can define a special function named `initService`
+(or `initWhatever` if you named the type `Whatever`)
+that gets called by Encore to initialize your service when it starts up.
+
+It looks like this:
+```go
+//encore:service
+type Service struct {
+ // Add your dependencies here
+}
+
+func initService() (*Service, error) {
+ // Write your service initialization code here.
+}
+
+//encore:api public
+func (s *Service) MyAPI(ctx context.Context) error {
+ // ...
+}
+```
+
+
+
+## Calling APIs defined on service structs
+
+When using a service struct like above, Encore will create a file named `encore.gen.go`
+in your service directory. This file contains package-level functions for the APIs defined
+as methods on the service struct. In the example above, you would see:
+
+```go
+// Code generated by encore. DO NOT EDIT.
+
+package email
+
+import "context"
+
+// These functions are automatically generated and maintained by Encore
+// to simplify calling them from other services, as they were implemented as methods.
+// They are automatically updated by Encore whenever your API endpoints change.
+
+func Send(ctx context.Context, p *SendParams) error {
+ // The implementation is elided here, and generated at compile-time by Encore.
+ return nil
+}
+```
+
+These functions are generated in order to allow other services to keep calling your
+APIs as package-level functions, in the same way as before: `email.Send(...)`.
+This means other services do not need to care about whether you're using Dependency Injection
+internally. You must always use these generated package-level functions for making API calls.
+
+
+
+Encore will automatically generate these files and keep them up to date
+whenever your code changes. There is no need to manually invoke anything
+to regenerate this code.
+
+
+
+Encore adds all `encore.gen.go` files to your `.gitignore` since you typically
+don't want to commit them to your repository; doing so ends up creating
+a lot of unnecessary merge conflicts.
+
+However, in some cases when running third-party linters in a CI/CD environment
+it can be helpful to generate these wrappers to make the linter happy.
+You can do that by invoking `encore gen wrappers`.
+
+## Graceful Shutdown
+
+When defining a service struct, Encore supports notifying
+your service when it's time to gracefully shut down. This works
+by having your service struct implement the method
+`func (s *Service) Shutdown(force context.Context)`.
+
+If that method exists, Encore will call it when it's time to begin
+gracefully shutting down. Initially the shutdown is in "graceful mode",
+which means that you have a few seconds to complete ongoing work.
+
+The provided `force` context is canceled when the graceful shutdown window
+is over, and it's time to forcefully shut down. How much time you have
+from when `Shutdown` is called to when forceful shutdown begins depends on the
+cloud provider and the underlying infrastructure. Typically it's in the range 5-30 seconds.
+
+
+
+Encore automatically handles graceful shutdown of all Encore-managed
+functionality, such as HTTP servers, database connection pools,
+Pub/Sub message receivers, distributed tracing recorders, and so on.
+
+The graceful shutdown functionality is provided if you have additional,
+non-Encore-related resources that need graceful shutdown.
+
+
+
+Note that graceful shutdown in Encore is *cooperative*: Encore will wait indefinitely
+for your `Shutdown` method to return. If your `Shutdown` method does not return promptly
+after the `force` context is closed, the underlying infrastructure at your cloud provider
+will typically force-kill your service, which can lead to lingering connections and other
+such issues.
+
+In summary, when your `Shutdown(force context.Context)` function is called:
+
+- Immediately begin gracefully shutting down
+- When the `force` context is canceled, you should forcefully shut down
+ the resources that haven't yet completed their shutdown
+- Wait until the shutdown is complete before returning from the `Shutdown` function
diff --git a/docs/go/primitives/services.md b/docs/go/primitives/services.md
new file mode 100644
index 0000000000..82151e77de
--- /dev/null
+++ b/docs/go/primitives/services.md
@@ -0,0 +1,42 @@
+---
+seotitle: Defining services with Encore.go
+seodesc: Learn how to create microservices and define APIs for your cloud backend application using Go and Encore. The easiest way of building cloud backends.
+title: Defining Services
+subtitle: Simplifying (micro-)service development
+lang: go
+---
+
+Encore.go makes it simple to build applications with one or many services, without needing to manually handle the typical complexity of developing microservices.
+
+## Defining a service
+
+With Encore.go you define a service by [defining at least one API](/docs/go/primitives/defining-apis) within a regular Go package. Encore recognizes this as a service, and uses the package name as the service name.
+
+On disk it might look like this:
+
+```
+/my-app
+├── encore.app // ... and other top-level project files
+│
+├── hello // hello service (a Go package)
+│ ├── hello.go // hello service code
+│ └── hello_test.go // tests for hello service
+│
+└── world // world service (a Go package)
+ └── world.go // world service code
+```
+
+
+This means building a microservices architecture is as simple as creating multiple Go packages within your application.
+See the [app structure documentation](/docs/go/primitives/app-structure) for more details.
+
+
+
+## Service Initialization
+
+Under the hood Encore automatically generates a `main` function that initializes all your infrastructure resources when the application starts up. This means you don't write a `main` function for your Encore application.
+
+If you want to customize the initialization behavior of your service, you can define a service struct and define custom initialization logic with that. See the [service struct docs](/docs/go/primitives/service-structs) for more info.
diff --git a/docs/go/primitives/share-db-between-services.md b/docs/go/primitives/share-db-between-services.md
new file mode 100644
index 0000000000..fc1f5c0b33
--- /dev/null
+++ b/docs/go/primitives/share-db-between-services.md
@@ -0,0 +1,64 @@
+---
+seotitle: How to share SQL databases between services
+seodesc: Learn how to share a SQL database between multiple Go backend services using Encore.
+title: Share SQL databases between services
+lang: go
+---
+
+By default, each service in an Encore app has its own database. This approach has many benefits:
+- Which database is used and how it works is abstracted away from other services
+- The database is more isolated, making changes to it smaller and safer
+- By making the services more independent your application becomes more reliable by being able to more gracefully handle partial outages, such as if your database is temporarily overloaded or offline.
+
+But like everything else in software engineering, there are trade-offs involved, and sometimes it's simpler and more reliable to use a single database that's accessed by multiple services. Encore makes this easy to do.
+
+Each database in Encore is defined within a service. That service's name becomes the name of the database. Other services can then access that database by creating a database reference with `sqldb.Named("dbname")`.
+
+## Example
+
+Let's say you have a simple `todo` service, with only one table:
+
+**`todo/migrations/1_create_table.up.sql`**
+
+```sql
+CREATE TABLE todo_item (
+ id BIGSERIAL PRIMARY KEY,
+ title TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT FALSE
+);
+```
+
+You want to create a `report` service that produces various reports for internal business processes, but for simplicity you decide it makes sense to directly access the `todo` database. All that's needed is to define the `todoDB` variable like so:
+
+**`report/report.go`**
+
+```go
+package report
+
+import (
+ "context"
+
+ "encore.dev/storage/sqldb"
+)
+
+// todoDB connects to the "todo" service's database.
+var todoDB = sqldb.Named("todo")
+
+type ReportResponse struct {
+ Total int
+}
+
+// CountCompletedTodos generates a report with the number of completed todo items.
+//encore:api method=GET path=/report/todo
+func CountCompletedTodos(ctx context.Context) (*ReportResponse, error) {
+ var report ReportResponse
+ err := todoDB.QueryRow(ctx,`
+ SELECT COUNT(*)
+ FROM todo_item
+ WHERE completed = TRUE
+ `).Scan(&report.Total)
+ return &report, err
+}
+```
+
+With that, Encore understands that the `report` service depends on the `todo` service's database, and orchestrates the necessary connections to make that happen. And like everything else with Encore, it works exactly the same regardless of where it's running: for local development as well as in the cloud.
diff --git a/docs/go/quick-start.mdx b/docs/go/quick-start.mdx
new file mode 100644
index 0000000000..6e591a3b2a
--- /dev/null
+++ b/docs/go/quick-start.mdx
@@ -0,0 +1,227 @@
+---
+seotitle: Quick Start Guide – Learn how to build backends with Encore.go
+seodesc: See how you to build and ship a cloud based backend application using Go and Encore. Install Encore and build a REST API in just a few minutes.
+title: Quick Start Guide
+subtitle: Build your first Encore.go app in 5 minutes
+lang: go
+---
+
+In this short guide, you'll learn key concepts and experience the Encore workflow.
+It should only take about 5 minutes to complete and by the end you'll have an API running in Encore's free development Cloud (Encore Cloud).
+
+To make it easy to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+## 1. Install the Encore CLI
+To develop with Encore, you need the Encore CLI. It provisions your local environment, and runs your local
+development dashboard complete with tracing and API documentation.
+
+🥐 Install by running the appropriate command for your system:
+
+
+
+## 2. Create your app
+🥐 Create your app by running:
+```shell
+$ encore app create
+```
+If this is the first time you're using Encore, you'll be asked if you wish to create a free account.
+This is needed when you want Encore to manage functionality like secrets and handle cloud deployments (which we'll use later on in the tutorial).
+
+🥐 Continue by picking a name for your app and select the `Hello World` template.
+
+This will create an example application, with a simple REST API, in a new folder using the app name you picked.
+
+### Optional: Add Encore LLM instructions
+
+To help LLM powered tools like Cursor and GitHub Copilot understand how to use Encore, you can add pre-made instructions to your app.
+
+🥐 Download the [go_llm_instructions.txt](https://github.com/encoredev/encore/blob/main/go_llm_instructions.txt) file.
+
+ **How to use:**
+ - Cursor: Rename the file to `.cursorrules`.
+ - GitHub Copilot: Paste content in `.github/copilot-instructions.md`.
+ - For other tools, place the file in your app root.
+
+### Let's take a look at the code
+
+Part of what makes Encore different is the simple developer experience when building distributed systems.
+Let's look at the code to better understand how to build applications with Encore.
+
+🥐 Open the `hello.go` file in your code editor. It's located in the folder: `your-app-name/hello/`.
+
+You should see this:
+
+```go
+-- hello/hello.go --
+// Service hello implements a simple hello world REST API.
+package hello
+
+import (
+ "context"
+)
+
+// This is a simple REST API that responds with a personalized greeting.
+//
+//encore:api public path=/hello/:name
+func World(ctx context.Context, name string) (*Response, error) {
+ msg := "Hello, " + name + "!"
+ return &Response{Message: msg}, nil
+}
+
+type Response struct {
+ Message string
+}
+```
+
+As you can see, it's all standard Go code except for a few lines specific to Encore's Backend Framework.
+
+One such element is the API annotation:
+
+```
+//encore:api public path=/hello/:name
+```
+
+This annotation is all that's needed for Encore to understand that the Go package `hello` is a service, and
+the `World` function is a public API endpoint.
+
+To create more services and endpoints, you simply create new Go packages and define endpoints using
+the `//encore:api` annotation. _If you're curious, you can read more about [defining APIs](/docs/go/primitives/defining-apis)._
+
+Encore.go provides several other declarative ways of using backend
+primitives, such as databases, Pub/Sub, and scheduled tasks. All defined in your application code.
+
+## 3. Start your app & Explore the Local Development Dashboard
+
+🥐 Run your app locally:
+
+```shell
+$ cd your-app-name # replace with the app name you picked
+$ encore run
+```
+
+You should see this:
+
+
+
+
+
+That means your local development environment is up and running!
+Encore takes care of setting up all the necessary infrastructure for your applications, even including databases and Pub/Sub.
+
+### Open the Local Development Dashboard
+
+You can now start using your [Local Development Dashboard](/docs/go/observability/dev-dash).
+
+🥐 Open [http://localhost:9400](http://localhost:9400) in your browser to access it.
+
+
+
+
+
+The Local Development Dashboard is a powerful tool to help you move faster when you're developing new features.
+
+It comes with an API explorer, a Service Catalog with automatically generated documentation, and powerful observability features
+like [distributed tracing](/docs/go/observability/tracing).
+
+Through the Local Development Dashboard you also have access to [Encore Flow](/docs/go/observability/encore-flow),
+a visual representation of your microservice architecture that updates in real-time as you develop your application.
+
+### Call your API
+
+🥐 While you keep the app running, call your API from the API Explorer:
+
+
+
+You can also open a separate terminal to call your API endpoint:
+
+```shell
+$ curl http://localhost:4000/hello/world
+{"Message": "Hello, world!"}
+```
+
+If you see this JSON response, you've successfully made an API call to your very first Encore application. Well done, you're on your way!
+
+### Review a trace of the request
+
+You can now take a look at the trace for the request you just made by clicking on it in the right column in the local dashboard.
+
+
+
+With such a simple API, there's not much to it, just a simple request and response.
+
+However, just imagine how powerful it is to have tracing when you're developing a more complex system with multiple services, Pub/Sub, and databases.
+(Learn more about Encore's tracing capabilities in the [tracing docs](/docs/go/observability/tracing).)
+
+## 4. Make a code change
+
+Let's put our mark on this API and make our first code change.
+
+🥐 Head back to your code editor and look at the `hello.go` file again.
+If you can't come up a creative change yourself, why not simply change the "Hello" message to a more sassy "Howdy"?
+
+🥐 Once you've made your change, save the file.
+
+When you save, the daemon run by the Encore CLI instantly detects the change and automatically recompiles your application and reloads your local development environment.
+
+The output where you're running your app will look something like this:
+
+```output
+Changes detected, recompiling...
+Reloaded successfully.
+INF registered endpoint endpoint=World path=/hello/:name service=hello
+INF listening for incoming HTTP requests
+```
+
+🥐 Test your change by calling your API again.
+
+```shell
+$ curl http://localhost:4000/hello/world
+{"Message": "Howdy, world!"}
+```
+
+Great job, you made a change and your app was reloaded automatically.
+
+Now you're ready to head to the cloud!
+
+## 5. Deploy your app
+
+### Generating Docker image
+
+You can either deploy by generating a Docker image for you app using:
+```shell
+$ encore build docker MY-IMAGE:TAG
+````
+
+This will compile your application using the host machine and then produce a Docker image containing the compiled application.
+You can now deploy this anywhere you like. Learn more in the [self-host docs](/docs/go/self-host/docker-build).
+
+### Deploy using Encore Cloud
+Optionally, you can use [Encore Cloud](https://encore.dev/use-cases/devops-automation) to automatically deploy your application.
+It comes with built-in free development hosting, and for production offers fully automated deployment to your own cloud on AWS or GCP.
+
+🥐 To deploy, simply push your changes to Encore:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore Cloud will now build and test your app, provision the needed infrastructure, and deploy your application to a staging environment.
+
+After triggering the deployment, you will see a URL where you can view its progress in the Encore Cloud dashboard.
+It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+🥐 Open the URL to access the Encore Cloud dashboard and check the progress of your deployment.
+
+You can now use the Cloud Dashboard to view production [traces](/docs/go/observability/tracing), [connect your cloud account](/docs/platform/deploy/own-cloud), [integrate with GitHub](/docs/platform/integrations/github), and much more.
+
+
+
+
+
+## What's next?
+
+- Check out the [REST API tutorial](/docs/go/tutorials/rest-api) to learn how to create endpoints, use databases, and more.
+- Join the friendly community on [Discord](/discord) to ask questions and meet other Encore developers.
diff --git a/docs/go/self-host/ci-cd.md b/docs/go/self-host/ci-cd.md
new file mode 100644
index 0000000000..f961e3ec5e
--- /dev/null
+++ b/docs/go/self-host/ci-cd.md
@@ -0,0 +1,85 @@
+---
+seotitle: Integrate with your CI/CD pipeline
+seodesc: Learn how to integrate Encore.go with your CI/CD pipeline.
+title: Integrate with your CI/CD pipeline
+lang: go
+---
+
+Encore seamlessly integrates with any CI/CD pipeline through its CLI tools. You can automate Docker image creation using the `encore build` command as part of your deployment workflow.
+
+## Integrating with CI/CD Platforms
+
+While every CI/CD pipeline is unique, integrating Encore follows a straightforward process. Here are the key steps:
+
+1. Install the Encore CLI in your CI environment
+2. Use `encore build docker` to create Docker images
+3. Push the images to your container registry
+4. Deploy to your infrastructure
+
+Refer to your CI/CD platform's documentation for more details on how to integrate CLI tools like `encore build`.
+
+### GitHub actions example
+
+This example shows how to build, push, and deploy an Encore Docker image to DigitalOcean using GitHub Actions.
+The DigitalOcean application is set up re-deploy the application every time an image with the tag `latest` is uploaded.
+
+```yaml
+name: Build, Push and Deploy a Encore Docker Image to DigitalOcean
+
+on:
+ push:
+ branches: [ main ]
+
+permissions:
+ contents: read
+ packages: write
+
+jobs:
+ build-push-deploy-image:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Download Encore CLI script
+ uses: sozo-design/curl@v1.0.2
+ with:
+ args: --output install.sh -L https://encore.dev/install.sh
+
+ - name: Install Encore CLI
+ run: bash install.sh
+
+ - name: Log in to DigitalOcean container registry
+ run: docker login registry.digitalocean.com -u my-email@gmail.com -p ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
+
+ - name: Build Docker image
+ run: /home/runner/.encore/bin/encore build docker myapp
+
+ - name: Tag Docker image
+ run: docker tag myapp registry.digitalocean.com//:latest
+
+ - name: Push Docker image
+ run: docker push registry.digitalocean.com//:latest
+```
+
+## Building Docker Images
+
+The `encore build docker` command provides several options to customize your builds:
+
+```bash
+# Build specific services and gateways
+encore build docker --services=service1,service2 --gateways=api-gateway MY-IMAGE:TAG
+
+# Customize the base image
+encore build docker --base=node:18-alpine MY-IMAGE:TAG
+```
+
+The image will default to run on port 8080, but you can customize it by setting the `PORT` environment variable when starting your image.
+
+```bash
+docker run -e PORT=8081 -p 8081:8081 MY-IMAGE:TAG
+```
+
+Learn more about the `encore build docker` command in the [build Docker images](/docs/go/self-host/docker-build) guide.
+
+Continue to learn how to [configure infrastructure](/docs/go/self-host/configure-infra).
diff --git a/docs/go/self-host/configure-infra.md b/docs/go/self-host/configure-infra.md
new file mode 100644
index 0000000000..454c2ce647
--- /dev/null
+++ b/docs/go/self-host/configure-infra.md
@@ -0,0 +1,546 @@
+---
+title: Configure Infrastructure
+seotitle: Configure Infrastructure
+seodesc: Learn how to configure infrastructure resources for your Encore app.
+lang: go
+---
+If you are using infrastructure resources, such as SQL databases, Pub/Sub, or metrics, you will need to configure your Docker image with the necessary configuration.
+The `build` command lets you provide this by specifying a path to a config file using the `--config` flag.
+
+```bash
+encore build docker --config path/to/infra-config.json MY-IMAGE:TAG
+```
+
+The configuration file should be a JSON file using the [Encore Infra Config](https://encore.dev/schemas/infra.schema.json) schema.
+
+This supports configuring things like:
+
+- How to access infrastructure resources (what provider to use, what credentials to use, etc.)
+- How to call other services over the network ("service discovery"),
+ most notably their base URLs.
+- Observability configuration (where to export metrics, etc.)
+- Metadata about the environment the application is running in, to power Encore's metadata APIs
+- The values for any application-defined secrets.
+
+This configuration is necessary for the application to behave correctly.
+
+## Example
+
+Here's an example configuration file you can use.
+
+```json
+{
+ "$schema": "https://encore.dev/schemas/infra.schema.json",
+ "metadata": {
+ "app_id": "my-app",
+ "env_name": "my-env",
+ "env_type": "production",
+ "cloud": "gcp",
+ "base_url": "https://my-app.com"
+ },
+ "sql_servers": [
+ {
+ "host": "my-db-host:5432",
+ "databases": {
+ "my-db": {
+ "username": "my-db-owner",
+ "password": {"$env": "DB_PASSWORD"}
+ }
+ }
+ }
+ ],
+ "service_discovery": {
+ "myservice": {
+ "base_url": "https://myservice:8044"
+ }
+ },
+ "redis": {
+ "my-redis": {
+ "database_index": 0,
+ "auth": {
+ "type": "acl",
+ "username": "encoreredis",
+ "password": {"$env": "REDIS_PASSWORD"}
+ },
+ "host": "my-redis-host",
+ }
+ },
+ "metrics": {
+ "type": "prometheus",
+ "remote_write_url": "https://my-remote-write-url"
+ },
+ "graceful_shutdown": {
+ "total": 30
+ },
+ "auth": [
+ {
+ "type": "key",
+ "id": 1,
+ "key": {"$env": "SVC_TO_SVC_KEY"}
+ }
+ ],
+ "secrets": {
+ "AppSecret": {"$env": "APP_SECRET"}
+ },
+ "pubsub": [
+ {
+ "type": "gcp_pubsub",
+ "project_id": "my-project",
+ "topics": {
+ "my-topic": {
+ "name": "gcp-topic-name",
+ "subscriptions": {
+ "encore-subscription": {
+ "name": "gcp-subscription-name"
+ }
+ }
+ }
+ }
+ }
+ ],
+ "object_storage": [
+ {
+ "type": "gcs",
+ "buckets": {
+ "my-gcs-bucket": {
+ "name": "my-gcs-bucket",
+ }
+ }
+ }
+ ]
+}
+```
+
+## Configuring Infrastructure
+To use infrastructure resources, additional configuration must be added so that Encore is aware of how to access each infrastructure resource.
+See below for examples of each type of infrastructure resource.
+
+### 1. Basic Environment Metadata Configuration
+
+```json
+{
+ "metadata": {
+ "app_id": "my-encore-app",
+ "env_name": "production",
+ "env_type": "production",
+ "cloud": "aws",
+ "base_url": "https://api.myencoreapp.com"
+ }
+}
+```
+
+- `app_id`: The ID of your Encore application.
+- `env_name`: The environment name, such as `production`, `staging`, or `development`.
+- `env_type`: Specifies the type of environment (`production`, `test`, `development`, or `ephemeral`).
+- `cloud`: The cloud provider hosting the infrastructure (e.g., `aws`, `gcp`, or `azure`).
+- `base_url`: The base URL for services in the environment.
+
+### 2. Graceful Shutdown Configuration
+
+```json
+{
+ "graceful_shutdown": {
+ "total": 30,
+ "shutdown_hooks": 10,
+ "handlers": 20
+ }
+}
+```
+
+- `total`: The total time allowed for the shutdown process in seconds.
+- `shutdown_hooks`: The time allowed for executing shutdown hooks.
+- `handlers`: The time allocated for processing request handlers during the shutdown.
+
+### 3. Authentication Methods Configuration
+Private endpoints will not require authentication if no authentication methods are specified. This is typically fine when services are deployed on a private network such as a VPC. But sometimes you might need to connect to other services over the public internet, in which case you'll want to ensure private endpoints are only accessible to other backend services. To do that you can configure authentication methods.
+Encore currently supports authentication through a shared key, which you can specify in your infrastructure configuration file.
+```json
+{
+ "auth": [
+ {
+ "type": "key",
+ "id": 1,
+ "key": {
+ "$env": "SERVICE_API_KEY"
+ }
+ }
+ ]
+}
+```
+
+- `type`: The authentication method type (e.g., `key`).
+- `id`: The ID associated with the authentication method.
+- `key`: The authentication key, which can be set using an environment variable reference.
+
+### 4. Service Discovery Configuration
+Service discovery is used to access other services over the network. You can configure service discovery in the infrastructure configuration file.
+If you export all services into the same docker image, you don't need to configure service discovery as it will be automatically
+configured when the services are started.
+
+```json
+{
+ "service_discovery": {
+ "myservice": {
+ "base_url": "https://myservice.myencoreapp.com",
+ "auth": [
+ {
+ "type": "key",
+ "id": 1,
+ "key": {
+ "$env": "MY_SERVICE_API_KEY"
+ }
+ }
+ ]
+ }
+ }
+}
+```
+- `myservice`: This is the name of the service as it is declared in your Encore app.
+- `base_url`: The base URL for the service.
+- `auth`: Authentication methods used for accessing the service. If no authentication methods are specified, the service will use the auth methods defined in the `auth` section.
+
+### 5. Metrics Configuration
+Similarly to cloud infrastructure resources, Encore supports configurable metrics exports:
+
+* Prometheus
+* DataDog
+* GCP Cloud Monitoring
+* AWS CloudWatch
+
+This is configured by setting the metrics field. Below are examples for each of the supported metrics providers:
+#### 5.1. Prometheus Configuration
+
+```json
+{
+ "metrics": {
+ "type": "prometheus",
+ "collection_interval": 15,
+ "remote_write_url": {
+ "$env": "PROMETHEUS_REMOTE_WRITE_URL"
+ }
+ }
+}
+```
+
+#### 5.2. Datadog Configuration
+
+```json
+{
+ "metrics": {
+ "type": "datadog",
+ "collection_interval": 30,
+ "site": "datadoghq.com",
+ "api_key": {
+ "$env": "DATADOG_API_KEY"
+ }
+ }
+}
+```
+
+#### 5.3. GCP Cloud Monitoring Configuration
+
+```json
+{
+ "metrics": {
+ "type": "gcp_cloud_monitoring",
+ "collection_interval": 60,
+ "project_id": "my-gcp-project",
+ "monitored_resource_type": "gce_instance",
+ "monitored_resource_labels": {
+ "instance_id": "1234567890",
+ "zone": "us-central1-a"
+ },
+ "metric_names": {
+ "cpu_usage": "compute.googleapis.com/instance/cpu/usage_time"
+ }
+ }
+}
+```
+
+#### 5.4. AWS CloudWatch Configuration
+
+```json
+{
+ "metrics": {
+ "type": "aws_cloudwatch",
+ "collection_interval": 60,
+ "namespace": "MyAppMetrics"
+ }
+}
+```
+
+### 6. SQL Database Configuration
+The SQL databases you've declared in your Encore app must be configured in the infrastructure configuration file.
+There must be exactly one database configuration for each declared database. You can configure multiple SQL servers if needed.
+
+```json
+{
+ "sql_servers": [
+ {
+ "host": "db.myencoreapp.com:5432",
+ "tls_config": {
+ "disabled": false,
+ "ca": "---BEGIN CERTIFICATE---\n...",
+ "disable_tls_hostname_verification": false,
+ "disable_ca_verification": false
+ },
+ "databases": {
+ "my-database": {
+ "name": "my-postgres-db-name",
+ "max_connections": 100,
+ "min_connections": 10,
+ "username": "db_user",
+ "password": {
+ "$env": "DB_PASSWORD"
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-database`: This is the name of the database as it is declared in your Encore app.
+- `name`: The name of the database on the database server. Defaults to the declared Encore name.
+- `host`: SQL server host, optionally including the port.
+- `tls_config`: TLS configuration for secure connections. If the server uses TLS with a non-system CA root, or requires a client certificate, specify the appropriate fields as PEM-encoded strings. Otherwise, they can be left empty.
+- `databases`: List of databases, each with connection settings.
+
+### 7. Secrets Configuration
+
+#### 7.1. Using Direct Secrets
+You can set the secret value directly in the configuration file, or use an environment variable reference to set the secret value.
+
+```json
+{
+ "secrets": {
+ "API_TOKEN": "embedded-secret-value",
+ "DB_PASSWORD": {
+ "$env": "DB_PASSWORD"
+ }
+ }
+}
+```
+
+- `API_TOKEN`: This is the name of a secret as it is declared in your Encore app.
+
+#### 7.2. Using Environment Reference
+As an alternative, you can use an environment variable reference to set the secret value. The env variable should be set in the environment where the application is running. The content
+of the environment variable should be a JSON string where each key is the secret name and the value is the secret value.
+
+```json
+{
+ "secrets": {
+ "$env": "SECRET_JSON"
+ }
+}
+```
+
+### 8. Redis Configuration
+
+```json
+{
+ "redis": {
+ "my-redis": {
+ "host": "redis.myencoreapp.com:6379",
+ "database_index": 0,
+ "auth": {
+ "type": "auth",
+ "auth_string": {
+ "$env": "REDIS_AUTH_STRING"
+ }
+ },
+ "max_connections": 50,
+ "min_connections": 5
+ }
+ }
+}
+```
+
+- `my-redis`: This is the name of the redis resource as it is declared in your Encore app.
+- `host`: Redis server host, optionally including the port.
+- `auth`: Authentication configuration for the Redis server.
+- `key_prefix`: Prefix applied to all keys.
+
+### 9. Pub/Sub Configuration
+Encore currently supports the following Pub/Sub providers:
+- `nsq` for [NSQ](https://nsq.io/)
+- `gcp` for [Google Cloud Pub/Sub](https://cloud.google.com/pubsub)
+- `aws` for AWS [SNS](https://aws.amazon.com/sns/) + [SQS](https://aws.amazon.com/sqs/)
+- `azure` for [Azure Service Bus](https://azure.microsoft.com/en-us/products/service-bus)
+
+The configuration for each provider is different. Below are examples for each provider.
+#### 9.1. GCP Pub/Sub
+
+```json
+{
+ "pubsub": [
+ {
+ "type": "gcp_pubsub",
+ "project_id": "my-gcp-project",
+ "topics": {
+ "my-topic": {
+ "name": "my-topic",
+ "project_id": "my-gcp-project",
+ "subscriptions": {
+ "my-subscription": {
+ "name": "my-subscription",
+ "push_config": {
+ "id": "my-push",
+ "service_account": "service-account@my-gcp-project.iam.gserviceaccount.com"
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-topic`: This is the name of the topic as it is declared in your Encore app.
+- `my-subscription`: This is the name of the subscription as it is declared in your Encore app.
+- `project_id`: The default GCP project ID. This can be overridden by setting the `project_id` field in the topic or subscription.
+- `name`: The name of the topic or subscription.
+- `push_config/id`: The id will be appended to `/__encore/pubsub/push/` to form the full push path of your service, e.g. `/__encore/pubsub/push/`. This is the path your service expects to receive push messages on.
+- `push_config/service_account`: The service account configured for the push subscription.
+
+#### 9.2. AWS SNS/SQS
+
+```json
+{
+ "pubsub": [
+ {
+ "type": "aws_sns_sqs",
+ "topics": {
+ "my-topic": {
+ "arn": "arn:aws:sns:us-east-1:123456789012:my-topic",
+ "subscriptions": {
+ "my-queue": {
+ "arn": "arn:aws:sqs:us-east-1:123456789012:my-queue"
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-topic`: This is the name of the topic as it is declared in your Encore app.
+- `my-queue`: This is the name of the queue as it is declared in your Encore app.
+- `arn`: The ARN of the SNS topic or SQS queue.
+
+#### 9.3. NSQ Configuration
+
+```json
+{
+ "pubsub": [
+ {
+ "type": "nsq",
+ "hosts": "nsq.myencoreapp.com:4150",
+ "topics": {
+ "my-topic": {
+ "name": "my-topic",
+ "subscriptions": {
+ "my-subscription": {
+ "name": "my-subscription"
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-topic`: This is the name of the topic as it is declared in your Encore app.
+- `my-subscription`: This is the name of the subscription as it is declared in your Encore app.
+
+### 10. Object Storage Configuration
+Encore currently supports the following object storage providers:
+- `gcs` for [Google Cloud Storage](https://cloud.google.com/storage)
+- `s3` for [AWS S3](https://aws.amazon.com/s3/) or a custom S3-compatible provider
+
+#### 10.1. GCS Configuration
+
+```json
+{
+ "object_storage": [
+ {
+ "type": "gcs",
+ "buckets": {
+ "my-gcs-bucket": {
+ "name": "my-gcs-bucket",
+ "key_prefix": "my-optional-prefix/",
+ "public_base_url": "https://my-gcs-bucket-cdn.example.com/my-optional-prefix"
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-gcs-bucket`: This is the name of the bucket as it is declared in your Encore app.
+- `name`: The full name of the GCS bucket.
+- `key_prefix`: An optional prefix to apply to all keys in the bucket.
+- `public_base_url`: A URL to use for public access to the bucket. This field is required if you configure your bucket to be public. Encore will append the object key to this URL when generating public URLs. The optional prefix will not be appended.
+
+#### 10.2. S3 Configuration
+
+```json
+{
+ "object_storage": [
+ {
+ "type": "s3",
+ "region": "us-east-1",
+ "buckets": {
+ "my-s3-bucket": {
+ "name": "my-s3-bucket",
+ "key_prefix": "my-optional-prefix/",
+ "public_base_url": "https://my-gcs-bucket-cdn.example.com/my-optional-prefix"
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-s3-bucket`: This is the name of the bucket as it is declared in your Encore app.
+- `region`: The AWS region where the bucket is located.
+- `name`: The full name of the S3 bucket.
+- `key_prefix`: An optional prefix to apply to all keys in the bucket.
+- `public_base_url`: A URL to use for public access to the bucket. This field is required if you configure your bucket to be public. Encore will append the object key to this URL when generating public URLs. The optional prefix will not be appended.
+
+#### 10.3. Custom S3 Provider Configuration
+You can also configure a custom S3 provider by specifying the endpoint, access key id, and secret access key. Custom S3 providers are useful if you are using a S3-compatible storage provider such as [Cloudflare R2](https://developers.cloudflare.com/r2/).
+```json
+{
+ "object_storage": [
+ {
+ "type": "s3",
+ "region": "auto",
+ "endpoint": "https://...",
+ "access_key_id": "...",
+ "secret_access_key": {
+ "$env": "BUCKET_SECRET_ACCESS_KEY"
+ },
+ "buckets": {
+ "my-custom-bucket": {
+ "name": "my-custom-bucket",
+ "key_prefix": "my-optional-prefix/",
+ "public_base_url": "https://my-gcs-bucket-cdn.example.com/my-optional-prefix"
+ }
+ }
+ }
+ ]
+}
+```
+
+- `my-custom-bucket`: This is the name of the bucket as it is declared in your Encore app.
+- `region`: The region where the bucket is located.
+- `name`: The full name of the bucket
+- `key_prefix`: An optional prefix to apply to all keys in the bucket.
+- `public_base_url`: A URL to use for public access to the bucket. This field is required if you configure your bucket to be public. Encore will append the object key to this URL when generating public URLs. The optional prefix will not be appended.
+
+This guide covers typical infrastructure configurations. Adjust according to your specific requirements to optimize your Encore app's infrastructure setup.
diff --git a/docs/go/self-host/deploy-to-digital-ocean-wip.md b/docs/go/self-host/deploy-to-digital-ocean-wip.md
new file mode 100644
index 0000000000..474ea39e3e
--- /dev/null
+++ b/docs/go/self-host/deploy-to-digital-ocean-wip.md
@@ -0,0 +1,197 @@
+---
+seotitle: How to deploy an Encore app to DigitalOcean
+seodesc: Learn how to deploy an Encore application to DigitalOcean's App Platform using Docker.
+title: Deploy to DigitalOcean
+lang: go
+---
+
+If you prefer manual deployment over the automation offered by Encore's Platform, Encore simplifies the process of deploying your app to the cloud provider of your choice. This guide will walk you through deploying an Encore app to DigitalOcean's App Platform using Docker.
+
+### Prerequisites
+1. **DigitalOcean Account**: Make sure you have a DigitalOcean account. If not, you can [sign up here](https://www.digitalocean.com/).
+2. **Docker Installed**: Ensure Docker is installed on your local machine. You can download it from the [Docker website](https://www.docker.com/get-started).
+3. **Encore CLI**: Install the Encore CLI if you haven’t already. You can follow the installation instructions from the [Encore documentation](https://encore.dev/docs/go/install).
+4. **DigitalOcean CLI (Optional)**: You can install the DigitalOcean CLI for more flexibility and automation, but it’s not necessary for this tutorial.
+
+### Step 1: Create an Encore App
+1. **Create a New Encore App**:
+ - If you haven’t already, create a new Encore app using the Encore CLI.
+ - You can use the following command to create a new app:
+ ```bash
+ encore app create myapp
+ ```
+ - Select the `Hello World` template.
+ - Follow the prompts to create the app.
+
+2. **Build a Docker image**:
+ - Build the Encore app to generate the docker image for deployment:
+ ```bash
+ encore build docker myapp
+ ```
+### Step 2: Push the Docker Image to a Container Registry
+To deploy your Docker image to DigitalOcean, you need to push it to a container registry. DigitalOcean supports
+its own container registry, but you can also use DockerHub or other registries. Here’s how to push the image to DigitalOcean’s registry:
+
+1. **Create a DigitalOcean Container Registry**:
+ - Go to the [DigitalOcean Control Panel](https://cloud.digitalocean.com/registries) and create a new container registry.
+ - Follow the instructions to set it up.
+
+2. **Login to DigitalOcean's registry**:
+ Use the login command provided by DigitalOcean, which will look something like this:
+ ```bash
+ doctl registry login
+ ```
+ You’ll need the DigitalOcean CLI for this, which can be installed from [DigitalOcean CLI documentation](https://docs.digitalocean.com/reference/doctl/how-to/install/).
+
+3. **Tag your Docker image**:
+ Tag your image to match the registry’s URL.
+ ```bash
+ docker tag myapp registry.digitalocean.com/YOUR_REGISTRY_NAME/myapp:latest
+ ```
+
+4. **Push your Docker image to the registry**:
+ ```bash
+ docker push registry.digitalocean.com/YOUR_REGISTRY_NAME/myapp:latest
+ ```
+
+### Step 3: Deploy the Docker Image to DigitalOcean App Platform
+1. **Navigate to the App Platform**:
+ Go to [DigitalOcean's App Platform](https://cloud.digitalocean.com/apps).
+
+2. **Create a New App**:
+ - Click on **"Create App"**.
+ - Choose the **"DigitalOcean Container Registry"** option.
+
+3. **Select the Docker Image Source**:
+ - Select the image you pushed earlier.
+
+4. **Configure the App Settings**:
+ - **Set up scaling options**: Configure the number of containers, CPU, and memory settings.
+ - **Environment variables**: Add any environment variables your application might need.
+ - **Choose the region**: Pick a region close to your users for better performance.
+
+5. **Deploy the App**:
+ - Click **"Next"**, review the settings, and click **"Create Resources"**.
+ - DigitalOcean will take care of provisioning the infrastructure, pulling the Docker image, and starting the application.
+
+### Step 4: Monitor and Manage the App
+1. **Access the Application**:
+ - Once deployed, you will get a public URL to access your application.
+ - Test the app to ensure it’s running as expected, e.g.
+ ```bash
+ curl https://myapp.ondigitalocean.app/hello/world
+ ```
+
+2. **View Logs and Metrics**:
+ - Go to the **"Runtime Logs"** tab in the App Platform to view logs
+ - Go to the **"Insights"** tab to view performance metrics.
+
+3. **Manage Scaling and Deployment Settings**:
+ - You can change the app configuration, such as scaling settings, deployment region, or environment variables.
+
+### Step 5: Add a Database to Your App
+
+DigitalOcean’s App Platform provides managed databases, allowing you to add a database to your app easily. Here’s how to set up a managed database for your app:
+
+1. **Navigate to the DigitalOcean Control Panel**:
+ - Go to [DigitalOcean Control Panel](https://cloud.digitalocean.com/).
+ - Click on **"Databases"** in the left-hand sidebar.
+
+2. **Create a New Database Cluster**:
+ - Click **"Create Database Cluster"**.
+ - Choose **PostgreSQL**
+ - Select the **database version**, **data center region**, and **cluster configuration** (e.g., development or production settings based on your needs).
+ - **Name the database** and configure other settings if necessary, then click **"Create Database Cluster"**.
+
+3. **Configure the Database Settings**:
+ - Once the database is created, go to the **"Connection Details"** tab of the database dashboard.
+ - Copy the **connection string** or individual settings (host, port, username, password, database name). You will need these details to connect your app to the database.
+ - Download the **CA certificate**
+
+4. **Create a Database**
+ - Connect to the database using the connection string provided by DigitalOcean.
+ ```bash
+ psql -h mydb.db.ondigitalocean.com -U doadmin -d mydb -p 25060
+ ```
+ - Create a database
+ ```sql
+ CREATE DATABASE mydb;
+ ```
+ - Create a table
+ ```sql
+ CREATE TABLE users (
+ id SERIAL PRIMARY KEY,
+ name VARCHAR(50)
+ );
+ INSERT INTO users (name) VALUES ('Alice');
+ ```
+
+5. **Declare a Database in your Encore app**:
+ - Open your Encore app’s codebase.
+ - Add `mydb` database to your app ([Encore Database Documentation](https://encore.dev/docs/ts/primitives/databases))
+ ```typescript
+ const mydb = new SQLDatabase("mydb", {
+ migrations: "./migrations",
+ });
+
+ export const getUser = api(
+ { expose: true, method: "GET", path: "/names/:id" },
+ async ({id}: {id:number}): Promise<{ id: number; name: string }> => {
+ return await mydb.queryRow`SELECT * FROM users WHERE id = ${id}` as { id: number; name: string };
+ }
+ );
+ ```
+
+6. **Create an Encore Infrastructure config**
+ - Create a file named `infra.config.json` in the root of your Encore app.
+ - Add the **CA certificate** and the connection details to the file:
+ ```json
+ {
+ "$schema": "https://encore.dev/schemas/infra.schema.json",
+ "sql_servers": [
+ {
+ "host": "mydb.db.ondigitalocean.com:25060",
+ "tls_config": {
+ "ca": "-----BEGIN CERTIFICATE-----\n..."
+ },
+ "databases": {
+ "mydb": {
+ "username": "doadmin",
+ "password": {"$env": "DB_PASSWORD"}
+ }
+ }
+ }]
+ }
+ ```
+
+7. **Set Up Environment Variables (Optional)**:
+ - Go to the DigitalOcean App Platform dashboard.
+ - Select your app.
+ - In the **"Settings"** section, go to **"App-Level Environment Variables"**
+ - Add the database password as an encrypted environment variable called `DB_PASSWORD`.
+
+8. **Build and push the Docker image**:
+ - Build the Docker image with the updated configuration.
+ ```bash
+ encore build docker --config infra.config.json myapp
+ ```
+ - Tag and push the Docker image to the DigitalOcean container registry.
+ ```bash
+ docker tag myapp registry.digitalocean.com/YOUR_REGISTRY_NAME/myapp:latest
+ docker push registry.digitalocean.com/YOUR_REGISTRY_NAME/myapp:latest
+ ```
+
+9. **Test the Database Connection**:
+ - Redeploy the app on DigitalOcean to apply the changes.
+ - Test the database connection by calling the API
+ ```bash
+ curl https://myapp.ondigitalocean.app/names/1
+ ```
+
+### Troubleshooting Tips
+- **Deployment Failures**: Check the build logs for any errors. Make sure the Docker image is correctly tagged and pushed to the registry.
+- **App Not Accessible**: Verify that the correct port is exposed in the Dockerfile and the App Platform configuration.
+- **Database Connection Issues**: Ensure the database connection details are correct and the database is accessible from the app.
+
+### Conclusion
+That’s it! You’ve successfully deployed an Encore app to DigitalOcean’s App Platform using Docker. You can now scale your app, monitor its performance, and manage it easily through the DigitalOcean dashboard. If you encounter any issues, refer to the DigitalOcean documentation or the Encore community for help. Happy coding!
\ No newline at end of file
diff --git a/docs/go/self-host/self-host.md b/docs/go/self-host/self-host.md
new file mode 100644
index 0000000000..285c605dd4
--- /dev/null
+++ b/docs/go/self-host/self-host.md
@@ -0,0 +1,33 @@
+---
+seotitle: Build Docker Images
+seodesc: Learn how to build Docker images for your Encore application, which can be self-hosted on your own infrastructure.
+title: Build Docker Images
+lang: go
+---
+
+Encore supports building Docker images directly from the CLI, which can then be self-hosted on your own infrastructure of choice.
+
+This can be a good choice if Encore Cloud isn't a good fit for your use case, or if you want to [migrate away](/docs/go/migration/migrate-away).
+
+## Building your own Docker image
+
+To build your own Docker image, use `encore build docker MY-IMAGE:TAG` from the CLI.
+
+This will compile your application using the host machine and then produce a Docker image containing the compiled application. The base image defaults to `scratch` for GO apps and `node:slim` for TS, but can be customized with `--base`.
+
+This is exactly the same code path that Encore's CI system uses to build Docker images, ensuring compatibility.
+
+By default, all your services are included and started by the Docker image. If you want to specify specific services and gateways to include, you can use the `--services` and `--gateways` flags.
+
+```bash
+encore build docker --services=service1,service2 --gateways=api-gateway MY-IMAGE:TAG
+```
+
+The image will default to run on port 8080, but you can customize it by setting the `PORT` environment variable when starting your image.
+
+```bash
+docker run -e PORT=8081 -p 8081:8081 MY-IMAGE:TAG
+```
+
+Congratulations, you've built your own Docker image! 🎉
+Continue to learn how to [configure infrastructure](/docs/go/self-host/configure-infra).
\ No newline at end of file
diff --git a/docs/go/tutorials/booking-system.mdx b/docs/go/tutorials/booking-system.mdx
new file mode 100644
index 0000000000..cc2397f65c
--- /dev/null
+++ b/docs/go/tutorials/booking-system.mdx
@@ -0,0 +1,977 @@
+---
+title: Building a Booking System
+subtitle: Learn how to build your own appointment booking system with both user facing and admin functionality
+seotitle: How to build an Appointment Booking System in Go
+seodesc: Learn how to build an appointment booking tool using Go and Encore. Get your entire application running in the cloud in 30 minutes!
+lang: go
+---
+
+In this tutorial we'll build a booking system with a user facing UI (see available slots and book appointments) and an admin dashboard (manage scheduled appointments and set availability). You will learn how to:
+
+* Create API endpoints using Encore (both public and authenticated).
+* Working with PostgreSQL databases using [sqlc](https://sqlc.dev/) and [pgx](https://github.com/jackc/pgx).
+* Scrub sensitive user data from traces.
+* Work with dates and times in Go.
+* Authenticate requests using an auth handler.
+* Send emails using a SendGrid integration.
+
+[Demo version of the app](https://prod-booking-system-teti.encr.app/frontend)
+
+The final result will look like this:
+
+
+
+
+
+If you want to skip ahead you can view the final project here: [https://github.com/encoredev/examples/tree/main/booking-system](https://github.com/encoredev/examples/tree/main/booking-system)
+
+## 1. Create your Encore application
+
+
+
+To make it easier to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+Make sure you have [Docker](https://docker.com) installed and running, it is used by Encore to run PostgreSQL databases locally.
+
+
+🥐 Create a new Encore application, using this tutorial project's starting-point branch. This gives you a ready-to-go frontend to use.
+
+```shell
+$ encore app create booking-system --example=github.com/encoredev/example-booking-system/tree/starting-point
+```
+
+
+🥐 Check that your frontend works:
+
+```shell
+$ cd booking-system
+$ encore run
+```
+
+Then visit [http://localhost:4000/frontend/](http://localhost:4000/frontend/) to see the frontend.
+It won't function yet, since we haven't yet built the backend, so let's do just that!
+
+When we're done we'll have a backend with this [architecture](/docs/go/observability/encore-flow):
+
+
+
+## 2. Create booking service
+
+Let's start by creating the functionality to view bookable slots.
+
+With Encore you define a service by [defining one or more APIs](/docs/go/primitives/defining-apis) within a regular Go package. Encore recognizes this as a service, and uses the package name as the service name. When deploying, Encore will automatically [provision the required infrastructure](/docs/platform/infrastructure/infra) for each service.
+
+We already have a Go package named `booking`, let's turn that into an Encore service.
+
+🥐 Inside the `booking` folder, create a file named `slots.go`.
+
+```shell
+$ touch booking/slots.go
+```
+
+🥐 Add an Encore API endpoint named `GetBookableSlots` that takes a date as input. The endpoint will return a list of bookable slots from the supplied date and six days forward (so that we can show a week view calendar in the UI).
+
+```go
+-- booking/slots.go --
+// Service booking keeps track of bookable slots in the calendar.
+package booking
+
+import (
+ "context"
+ "github.com/jackc/pgx/v5/pgtype"
+ "time"
+)
+
+const DefaultBookingDuration = 1 * time.Hour
+
+type BookableSlot struct {
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+}
+
+type SlotsParams struct{}
+
+type SlotsResponse struct{ Slots []BookableSlot }
+
+//encore:api public method=GET path=/slots/:from
+func GetBookableSlots(ctx context.Context, from string) (*SlotsResponse, error) {
+ fromDate, err := time.Parse("2006-01-02", from)
+ if err != nil {
+ return nil, err
+ }
+
+ const numDays = 7
+
+ var slots []BookableSlot
+ for i := 0; i < numDays; i++ {
+ date := fromDate.AddDate(0, 0, i)
+ daySlots, err := bookableSlotsForDay(date)
+ if err != nil {
+ return nil, err
+ }
+ slots = append(slots, daySlots...)
+ }
+
+ return &SlotsResponse{Slots: slots}, nil
+}
+
+func bookableSlotsForDay(date time.Time) ([]BookableSlot, error) {
+ // 09:00
+ availStartTime := pgtype.Time{
+ Valid: true,
+ Microseconds: int64(9*3600) * 1e6,
+ }
+ // 17:00
+ availEndTime := pgtype.Time{
+ Valid: true,
+ Microseconds: int64(17*3600) * 1e6,
+ }
+
+ availStart := date.Add(time.Duration(availStartTime.Microseconds) * time.Microsecond)
+ availEnd := date.Add(time.Duration(availEndTime.Microseconds) * time.Microsecond)
+
+ // Compute the bookable slots in this day, based on availability.
+ var slots []BookableSlot
+ start := availStart
+ for {
+ end := start.Add(DefaultBookingDuration)
+ if end.After(availEnd) {
+ break
+ }
+ slots = append(slots, BookableSlot{
+ Start: start,
+ End: end,
+ })
+ start = end
+ }
+
+ return slots, nil
+}
+```
+
+The availability is currently hardcoded to be 09:00 - 17:00 for each day. Later we'll add the functionality to set it for each day of the week.
+We are also returning time slots that have already passed. Don't worry, we'll come back and fix it later on.
+
+🥐 Let's try it! Open up the Local Development Dashboard running at [http://localhost:9400](http://localhost:9400) and try calling
+the `booking.GetBookableSlots` endpoint, passing in `2024-12-01`.
+
+If you prefer to use the terminal instead run `curl http://localhost:4000/slots/2024-12-01` in
+a new terminal instead. Either way you should see the response:
+
+```json
+{
+ "Slots": [
+ {
+ "start": "2024-12-01T09:00:00Z",
+ "end": "2024-12-01T10:00:00Z"
+ },
+ {
+ "start": "2024-12-01T10:00:00Z",
+ "end": "2024-12-01T11:00:00Z"
+ },
+ {
+ "start": "2024-12-01T11:00:00Z",
+ "end": "2024-12-01T12:00:00Z"
+ },
+ ...
+ ]
+}
+```
+
+## 3. Book an appointment
+
+Next, we want to make it possible to book an appointment. We'll need a database to store the bookings in. Encore makes it really simple to [create and use databases](/docs/go/primitives/databases) (both for local and cloud environments), but for this example we will also make use of [sqlc](https://sqlc.dev/) that will compile our SQL queries into type-safe Go code that we can use in our application.
+
+🥐 Let's create a SQL database for our booking service and the required sqlc scaffolding. Create the following file structure:
+
+```
+/my-app
+└── booking // booking service (a Go package)
+ ├── db // (New) db related files (directory)
+ │ ├── migrations // (New) db migrations (directory)
+ │ │ └── 1_create_tables.up.sql // (New) db migration schema
+ │ └── query.sql // (New) SQL queries
+ ├── sqlc.yaml // (New) sqlc config file
+ ├── slots.go // booking service code
+ └── helpers.go // booking service code
+```
+
+🥐 Naming of the database migration file is important, it must look something like: `1_.up.sql`.
+
+Add the following contents to the migration file:
+
+```sql
+-- booking/db/migrations/1_create_tables.up.sql --
+CREATE TABLE booking (
+ id BIGSERIAL PRIMARY KEY,
+ start_time TIMESTAMP NOT NULL,
+ end_time TIMESTAMP NOT NULL,
+ email TEXT NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+```
+
+🥐 Next, install the sqlc library:
+
+```shell
+$ go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
+```
+
+🥐 Next, we need to configure sqlc. Add the following contents to `sqlc.yaml`:
+
+```yaml
+-- booking/sqlc.yaml --
+version: "2"
+sql:
+ - engine: "postgresql"
+ queries: "db/query.sql"
+ schema: "./db/migrations"
+ gen:
+ go:
+ package: "db"
+ out: "db"
+ sql_package: "pgx/v5"
+```
+
+This instructs sqlc to generate Go code from the queries in `db/query.sql` and models from the schemas in the `db/migrations` folder.
+
+🥐 Let's create our first SQL queries. Add the following contents to `db/query.sql`:
+
+```sql
+-- name: InsertBooking :one
+INSERT INTO booking (start_time, end_time, email)
+VALUES ($1, $2, $3)
+RETURNING *;
+
+-- name: ListBookingsBetween :many
+SELECT * FROM booking
+WHERE start_time >= $1 AND end_time <= $2;
+
+-- name: ListBookings :many
+SELECT * FROM booking;
+
+-- name: DeleteBooking :exec
+DELETE FROM booking WHERE id = $1;
+
+```
+
+🥐 It's time for sqlc to shine! Run the following command in your terminal:
+
+```shell
+$ cd booking
+$ sqlc generate
+```
+
+Three files should now have been generated inside the `db` folder: `query.sql.go`, `db.go` and `models.go`. These files contain generated Go code and should not be manually edited. We will be adding more queries to `db/query.sql` later and then re-run `sqlc generate` to update the generated Go code.
+
+Now let's create an endpoint that makes use of one of these queries.
+
+🥐 Create `booking/booking.go` with the contents:
+
+```go
+-- booking/booking.go --
+package booking
+
+import (
+ "context"
+ "time"
+
+ "encore.app/booking/db"
+ "github.com/jackc/pgx/v5/pgtype"
+ "github.com/jackc/pgx/v5/pgxpool"
+
+ "encore.dev/beta/errs"
+ "encore.dev/storage/sqldb"
+)
+
+var (
+ bookingDB = sqldb.NewDatabase("booking", sqldb.DatabaseConfig{
+ Migrations: "./db/migrations",
+ })
+
+ pgxdb = sqldb.Driver[*pgxpool.Pool](bookingDB)
+ query = db.New(pgxdb)
+)
+
+type Booking struct {
+ ID int64 `json:"id"`
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ Email string `encore:"sensitive"`
+}
+
+type BookParams struct {
+ Start time.Time `json:"start"`
+ Email string `encore:"sensitive"`
+}
+
+//encore:api public method=POST path=/booking
+func Book(ctx context.Context, p *BookParams) error {
+ eb := errs.B()
+
+ now := time.Now()
+ if p.Start.Before(now) {
+ return eb.Code(errs.InvalidArgument).Msg("start time must be in the future").Err()
+ }
+
+ tx, err := pgxdb.Begin(ctx)
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to start transaction").Err()
+ }
+ defer tx.Rollback(context.Background()) // committed explicitly on success
+
+ _, err = query.InsertBooking(ctx, db.InsertBookingParams{
+ StartTime: pgtype.Timestamp{Time: p.Start, Valid: true},
+ EndTime: pgtype.Timestamp{Time: p.Start.Add(DefaultBookingDuration), Valid: true},
+ Email: p.Email,
+ })
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to insert booking").Err()
+ }
+
+ if err := tx.Commit(ctx); err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to commit transaction").Err()
+ }
+ return nil
+}
+```
+
+We are now using the generated type-safe `query.InsertBooking` function to make the database operation.
+
+Notice the `encore:"sensitive"` tag on the `Email` field. This tells Encore to scrub this field so that the data is not viewable in the traces for deployed environments. This is useful for fields that contain [sensitive data](/docs/go/primitives/defining-apis#sensitive-data) such as email addresses, passwords, etc.
+
+🥐 Restart `encore run` to cause the database to be created, and then call the `booking.Book` endpoint:
+
+```shell
+$ curl -X POST 'http://localhost:4000/booking' -d '{"start": "2024-12-11T09:00:00Z", "email": "test@example.com"}'
+```
+
+Congratulations, you have now booked your first appointment!
+
+## 4. Authentication
+
+To provide an admin dashboard for our booking system, we need to add authentication to our application so that we can have protected endpoints.
+
+Keep in mind, in this tutorial we'll only include a very basic implementation.
+
+🥐 Let's start by creating a new service named `user`:
+
+```shell
+$ mkdir user
+$ touch user/auth.go
+```
+
+🥐 Add the following contents to `user/auth.go`:
+
+```go
+-- user/auth.go --
+// Service user authenticates users.
+package user
+
+import (
+ "context"
+ "encore.dev/beta/auth"
+ "encore.dev/beta/errs"
+)
+
+type Data struct {
+ Email string
+}
+
+type AuthParams struct {
+ Authorization string `header:"Authorization"`
+}
+
+//encore:authhandler
+func AuthHandler(ctx context.Context, p *AuthParams) (auth.UID, *Data, error) {
+ if p.Authorization != "" {
+ return "test", &Data{}, nil
+ }
+ return "", nil, errs.B().Code(errs.Unauthenticated).Msg("no auth header").Err()
+}
+
+```
+
+This function is our [auth handler](/docs/go/develop/auth#the-auth-handler). An Encore applications can designate a special function to handle authentication,
+by defining a function and annotating it with `//encore:authhandler`. This annotation tells Encore to run the function whenever an
+incoming API call contains authentication data.
+
+The auth handler is responsible for validating the incoming authentication data and returning an `auth.UID` (a string type representing a user id).
+The `auth.UID` can be whatever you wish, but in practice it usually maps directly to the primary key stored in a user table (either defined in the Encore service or in an external service like Firebase or Okta).
+
+In order to keep this example simple, we'll just approve any request containing a token that is not empty.
+
+Next we will implement some of our auth endpoints and make use of our newly created auth handler.
+
+## 5. Setting availability
+
+Right now the availability is hardcoded to 9:00 - 17:00. Let's add the functionality to let our admin users customize this.
+
+Let's start by adding another migration file, this time to create an `availability` table.
+
+🥐 Create a file called `2_add_availability.up.sql` inside the `booking/db/migrations` folder. Add the following contents to that file:
+
+```sql
+-- booking/db/migrations/2_add_availability.up.sql --
+CREATE TABLE availability (
+ weekday SMALLINT NOT NULL PRIMARY KEY, -- Sunday=0, Monday=1, etc.
+ start_time TIME NULL, -- null indicates not available
+ end_time TIME NULL -- null indicates not available
+);
+
+-- Add some placeholder availability to get started
+INSERT INTO availability (weekday, start_time, end_time) VALUES
+ (0, '09:30', '17:00'),
+ (1, '09:00', '17:00'),
+ (2, '09:00', '18:00'),
+ (3, '08:30', '18:00'),
+ (4, '09:00', '17:00'),
+ (5, '09:00', '17:00'),
+ (6, '09:30', '16:30');
+```
+
+🥐 We can now add two queries to `booking/db/query.sql` so that we can store and retrieve availability:
+
+```sql
+-- booking/db/query.sql --
+-- name: GetAvailability :many
+SELECT * FROM availability
+ORDER BY weekday;
+
+-- name: UpdateAvailability :exec
+INSERT INTO availability (weekday, start_time, end_time)
+VALUES (@weekday, @start_time, @end_time)
+ON CONFLICT (weekday) DO UPDATE
+SET start_time = @start_time, end_time = @end_time;
+```
+
+🥐 Run `sqlc generate` to update the generated Go code.
+
+🥐 Create a new file in the `booking` service named `availability.go`:
+
+```shell
+$ touch booking/availability.go
+```
+
+🥐 Add the following to that file:
+
+```go
+-- booking/availability.go --
+package booking
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "encore.app/booking/db"
+ "github.com/jackc/pgx/v5/pgtype"
+
+ "encore.dev/beta/errs"
+ "encore.dev/rlog"
+)
+
+type Availability struct {
+ Start *string `json:"start" encore:"optional"`
+ End *string `json:"end" encore:"optional"`
+}
+
+type GetAvailabilityResponse struct {
+ Availability []Availability
+}
+
+//encore:api public method=GET path=/availability
+func GetAvailability(ctx context.Context) (*GetAvailabilityResponse, error) {
+ rows, err := query.GetAvailability(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ availability := make([]Availability, 7)
+ for _, row := range rows {
+ day := row.Weekday
+ if day < 0 || day > 6 {
+ rlog.Error("invalid week day in availability table", "row", row)
+ continue
+ }
+
+ // These never fail
+ start, _ := row.StartTime.TimeValue()
+ end, _ := row.EndTime.TimeValue()
+ availability[day] = Availability{
+ Start: timeToStr(start),
+ End: timeToStr(end),
+ }
+ }
+
+ return &GetAvailabilityResponse{Availability: availability}, nil
+}
+
+type SetAvailabilityParams struct {
+ Availability []Availability
+}
+
+//encore:api auth method=POST path=/availability
+func SetAvailability(ctx context.Context, params SetAvailabilityParams) error {
+ eb := errs.B()
+ tx, err := pgxdb.Begin(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback(context.Background()) // committed explicitly on success
+
+ qry := query.WithTx(tx)
+ for weekday, a := range params.Availability {
+ if weekday > 6 {
+ return eb.Code(errs.InvalidArgument).Msgf("invalid weekday %d", weekday).Err()
+ }
+
+ start, err1 := strToTime(a.Start)
+ end, err2 := strToTime(a.End)
+ if err := errors.Join(err1, err2); err != nil {
+ return eb.Cause(err).Code(errs.InvalidArgument).Msg("invalid start/end time").Err()
+ } else if start.Valid != end.Valid {
+ return eb.Code(errs.InvalidArgument).Msg("both start/stop must be set, or both null").Err()
+ } else if start.Valid && start.Microseconds > end.Microseconds {
+ return eb.Code(errs.InvalidArgument).Msg("start must be before end").Err()
+ }
+
+ err = qry.UpdateAvailability(ctx, db.UpdateAvailabilityParams{
+ Weekday: int16(weekday),
+ StartTime: start,
+ EndTime: end,
+ })
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to update availability").Err()
+ }
+ }
+
+ err = tx.Commit(ctx)
+ return errs.WrapCode(err, errs.Unavailable, "failed to commit transaction")
+}
+```
+
+This file contains two endpoints, a setter and a getter. The `SetAvailability` endpoint is protected by the `auth` middleware which means that the user must be authenticated in order to call it. The `GetAvailability` endpoint is public and can be called without authentication.
+
+🥐 Let's set the availability for each day of the week. Open the Development Dashboard at [http://localhost:9400](http://localhost:9400) and select the `booking.SetAvailability` endpoint in the API Explorer. For the request body, paste the following:
+
+```json
+{
+ "Availability": [{
+ "start": "09:30",
+ "end": "17:00"
+ },{
+ "start": "09:00",
+ "end": "17:00"
+ },{
+ "start": "09:00",
+ "end": "18:00"
+ },{
+ "start": "08:30",
+ "end": "18:00"
+ },{
+ "start": "09:00",
+ "end": "17:00"
+ },{
+ "start": "09:00",
+ "end": "17:00"
+ },{
+ "start": "09:30",
+ "end": "16:30"
+ }]
+}
+```
+
+
+
+Don't leave the auth token empty, it will cause the auth handler to reject the request. You can use any value for the auth token.
+
+
+
+Now try retrieving the availability by calling the `booking.GetAvailability` endpoint through the API Explorer in the Development Dashboard.
+
+🥐 Add the following functions inside the `booking` package, and import the `slices` package:
+
+```go
+func listBookingsBetween(
+ ctx context.Context,
+ start, end time.Time,
+) ([]*Booking, error) {
+ rows, err := query.ListBookingsBetween(ctx, db.ListBookingsBetweenParams{
+ StartTime: pgtype.Timestamp{Time: start, Valid: true},
+ EndTime: pgtype.Timestamp{Time: end, Valid: true},
+ })
+ if err != nil {
+ return nil, err
+ }
+ var bookings []*Booking
+ for _, row := range rows {
+ bookings = append(bookings, &Booking{
+ ID: row.ID,
+ Start: row.StartTime.Time,
+ End: row.EndTime.Time,
+ Email: row.Email,
+ })
+ }
+ return bookings, nil
+}
+
+func filterBookableSlots(
+ slots []BookableSlot,
+ now time.Time,
+ bookings []*Booking,
+) []BookableSlot {
+ // Remove slots for which the start time has already passed.
+ slots = slices.DeleteFunc(slots, func(s BookableSlot) bool {
+ // Has the slot already passed?
+ if s.Start.Before(now) {
+ return true
+ }
+
+ // Is there a booking that overlaps with this slot?
+ for _, b := range bookings {
+ if b.Start.Before(s.End) && b.End.After(s.Start) {
+ return true
+ }
+ }
+
+ return false
+ })
+ return slots
+}
+```
+
+We'll use these functions to figure out which slots are bookable, and which are not, to avoid double bookings.
+
+🥐 Now we can update the `Book` endpoint inside `booking.go` and make use of these new functions:
+
+```go
+HL booking/booking.go 15:27
+-- booking/booking.go --
+//encore:api public method=POST path=/booking
+func Book(ctx context.Context, p *BookParams) error {
+ eb := errs.B()
+
+ now := time.Now()
+ if p.Start.Before(now) {
+ return eb.Code(errs.InvalidArgument).Msg("start time must be in the future").Err()
+ }
+
+ tx, err := pgxdb.Begin(ctx)
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to start transaction").Err()
+ }
+ defer tx.Rollback(context.Background()) // committed explicitly on success
+
+ // Get the bookings for this day.
+ startOfDay := time.Date(p.Start.Year(), p.Start.Month(), p.Start.Day(), 0, 0, 0, 0, p.Start.Location())
+ bookings, err := listBookingsBetween(ctx, startOfDay, startOfDay.AddDate(0, 0, 1))
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to list bookings").Err()
+ }
+
+ // Is this slot bookable?
+ slot := BookableSlot{Start: p.Start, End: p.Start.Add(DefaultBookingDuration)}
+ if len(filterBookableSlots([]BookableSlot{slot}, now, bookings)) == 0 {
+ return eb.Code(errs.InvalidArgument).Msg("slot is unavailable").Err()
+ }
+
+ _, err = query.InsertBooking(ctx, db.InsertBookingParams{
+ StartTime: pgtype.Timestamp{Time: p.Start, Valid: true},
+ EndTime: pgtype.Timestamp{Time: p.Start.Add(DefaultBookingDuration), Valid: true},
+ Email: p.Email,
+ })
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to insert booking").Err()
+ }
+
+ if err := tx.Commit(ctx); err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to commit transaction").Err()
+ }
+ return nil
+}
+```
+
+🥐 Inside `slots.go`, update the `GetBookableSlots` endpoint and the `bookableSlotsForDay` functions to look like this:
+
+```go
+HL booking/slots.go 7:12
+HL booking/slots.go 18:23
+HL booking/slots.go 29:36
+HL booking/slots.go 39:48
+-- booking/slots.go --
+//encore:api public method=GET path=/slots/:from
+func GetBookableSlots(ctx context.Context, from string) (*SlotsResponse, error) {
+ fromDate, err := time.Parse("2006-01-02", from)
+ if err != nil {
+ return nil, err
+ }
+
+ availabilityResp, err := GetAvailability(ctx)
+ if err != nil {
+ return nil, err
+ }
+ availability := availabilityResp.Availability
+
+ const numDays = 7
+
+ var slots []BookableSlot
+ for i := 0; i < numDays; i++ {
+ date := fromDate.AddDate(0, 0, i)
+ weekday := int(date.Weekday())
+ if len(availability) <= weekday {
+ break
+ }
+ daySlots, err := bookableSlotsForDay(date, &availability[weekday])
+ if err != nil {
+ return nil, err
+ }
+ slots = append(slots, daySlots...)
+ }
+
+ // Get bookings for the next 7 days.
+ activeBookings, err := listBookingsBetween(ctx, fromDate, fromDate.AddDate(0, 0, numDays))
+ if err != nil {
+ return nil, err
+ }
+
+ slots = filterBookableSlots(slots, time.Now(), activeBookings)
+ return &SlotsResponse{Slots: slots}, nil
+}
+
+func bookableSlotsForDay(date time.Time, avail *Availability) ([]BookableSlot, error) {
+ if avail.Start == nil || avail.End == nil {
+ return nil, nil
+ }
+ availStartTime, err1 := strToTime(avail.Start)
+ availEndTime, err2 := strToTime(avail.End)
+ if err := errors.Join(err1, err2); err != nil {
+ return nil, err
+ }
+
+ availStart := date.Add(time.Duration(availStartTime.Microseconds) * time.Microsecond)
+ availEnd := date.Add(time.Duration(availEndTime.Microseconds) * time.Microsecond)
+
+ // Compute the bookable slots in this day, based on availability.
+ var slots []BookableSlot
+ start := availStart
+ for {
+ end := start.Add(DefaultBookingDuration)
+ if end.After(availEnd) {
+ break
+ }
+ slots = append(slots, BookableSlot{
+ Start: start,
+ End: end,
+ })
+ start = end
+ }
+
+ return slots, nil
+}
+```
+
+## 6. Managing scheduled bookings
+
+To display the scheduled bookings in the admin dashboard, we need to add the functionality to list all bookings. While we're at it, we'll also make it possible to delete bookings.
+
+🥐 Add two new endpoints to `booking/booking.go`:
+
+```go
+-- booking/booking.go --
+type ListBookingsResponse struct {
+ Booking []*Booking `json:"bookings"`
+}
+
+//encore:api auth method=GET path=/booking
+func ListBookings(ctx context.Context) (*ListBookingsResponse, error) {
+ rows, err := query.ListBookings(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ var bookings []*Booking
+ for _, row := range rows {
+ bookings = append(bookings, &Booking{
+ ID: row.ID,
+ Start: row.StartTime.Time,
+ End: row.EndTime.Time,
+ Email: row.Email,
+ })
+ }
+ return &ListBookingsResponse{Booking: bookings}, nil
+}
+
+//encore:api auth method=DELETE path=/booking/:id
+func DeleteBooking(ctx context.Context, id int64) error {
+ return query.DeleteBooking(ctx, id)
+}
+```
+
+That's it! We now have all the backend endpoints in place to be able to supply the frontend with data. 🎉
+
+## 7. Running the React frontend
+
+The frontend should now be working as expected.
+
+🥐 Go to [http://localhost:4000/frontend/](http://localhost:4000/frontend/) and try out your new booking system.
+
+The frontend is built using [React](https://react.dev/) and [Tailwind CSS](https://tailwindcss.com/). It uses Encore's ability to generate type-safe [request clients](https://encore.dev/docs/go/cli/client-generation). This means you don't need to manually keep the request/response objects in sync on the frontend. To generate a client:
+
+```bash
+$ encore gen client --output=./src/client.ts --env=
+```
+
+While you're developing, you are going to want to run this command quite often (whenever you make a change to your endpoints) so having it as an `npm` script is a good idea. Take a look at the scripts in the `package.json` file:
+
+```json
+{
+...
+"scripts": {
+ ...
+ "gen": "encore gen client --output=./src/lib/client.ts --env=staging",
+ "gen:local": "encore gen client --output=./src/lib/client.ts --env=local"
+ },
+}
+```
+
+For this frontend we use the request client together with [TanStack Query](https://tanstack.com/query/latest). When building something a bit more complex, you will likely need to deal with caching, refetching, and data going stale. [TanStack Query](https://tanstack.com/query/latest) is a popular library that was built to solve exactly these problems and works great with the Encore request client.
+
+See our the docs page about [integrating with a web frontend](/docs/how-to/integrate-frontend) to learn more.
+
+## 8. Deploy to Encore's development cloud
+
+Let's deploy the project to Encore's free development cloud.
+
+Encore comes with built-in CI/CD, and the deployment process is as simple as a `git push`.
+(You can also integrate with GitHub to activate per Pull Request Preview Environments, learn more in the [CI/CD docs](/docs/platform/deploy/deploying).)
+
+🥐 Now, let's deploy your app to Encore's free development cloud by running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore will now build and test your app, provision the needed infrastructure, and deploy your application to the cloud.
+
+After triggering the deployment, you will see a URL where you can view its progress in the [Encore Cloud dashboard](https://app.encore.cloud). It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+From there you can also see metrics, traces, link your app to a GitHub repo to get automatic deploys on new commits, and connect your own AWS or GCP account to use for production deployment.
+
+🥐 When the deploy has finished, you can try out your booking system by going to `https://staging-$APP_ID.encr.app/frontend/`.
+
+*You now have an Appointment Booking System running in the cloud, well done!*
+
+## 8. Sending confirmation emails using SendGrid
+
+In order for the users to get a confirmation email when they book an appointment we need to add an email integration.
+
+Conveniently for us, there is a ready to use SendGrid integration as an [Encore Bit](https://github.com/encoredev/examples?tab=readme-ov-file#bits).
+
+🥐 [Follow the instructions](https://github.com/encoredev/examples/tree/main/bits/sendgrid) to add the SendGrid integration to your project.
+
+Next, we need to call our new `sendgrid` service when an appointment is booked.
+
+🥐 Add a call to `sendgrid.Send` in the `Book` endpoint:
+
+```go
+HL booking/booking.go 41:59
+-- booking/booking.go --
+//encore:api public method=POST path=/booking
+func Book(ctx context.Context, p *BookParams) error {
+ eb := errs.B()
+
+ now := time.Now()
+ if p.Start.Before(now) {
+ return eb.Code(errs.InvalidArgument).Msg("start time must be in the future").Err()
+ }
+
+ tx, err := pgxdb.Begin(ctx)
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to start transaction").Err()
+ }
+ defer tx.Rollback(context.Background()) // committed explicitly on success
+
+ // Get the bookings for this day.
+ startOfDay := time.Date(p.Start.Year(), p.Start.Month(), p.Start.Day(), 0, 0, 0, 0, p.Start.Location())
+ bookings, err := listBookingsBetween(ctx, startOfDay, startOfDay.AddDate(0, 0, 1))
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to list bookings").Err()
+ }
+
+ // Is this slot bookable?
+ slot := BookableSlot{Start: p.Start, End: p.Start.Add(DefaultBookingDuration)}
+ if len(filterBookableSlots([]BookableSlot{slot}, now, bookings)) == 0 {
+ return eb.Code(errs.InvalidArgument).Msg("slot is unavailable").Err()
+ }
+
+ _, err = query.InsertBooking(ctx, db.InsertBookingParams{
+ StartTime: pgtype.Timestamp{Time: p.Start, Valid: true},
+ EndTime: pgtype.Timestamp{Time: p.Start.Add(DefaultBookingDuration), Valid: true},
+ Email: p.Email,
+ })
+ if err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to insert booking").Err()
+ }
+
+ if err := tx.Commit(ctx); err != nil {
+ return eb.Cause(err).Code(errs.Unavailable).Msg("failed to commit transaction").Err()
+ }
+
+ // Send confirmation email using SendGrid
+ formattedTime := pgtype.Timestamp{Time: p.Start, Valid: true}.Time.Format("2006-01-02 15:04")
+ _, err = sendgrid.Send(ctx, &sendgrid.SendParams{
+ From: sendgrid.Address{
+ Name: "",
+ Email: "",
+ },
+ To: sendgrid.Address{
+ Email: p.Email,
+ },
+ Subject: "Booking Confirmation",
+ Text: "Thank you for your booking!\nWe look forward to seeing you soon at " + formattedTime,
+ Html: "",
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+```
+
+
+
+The `From` email used when sending emails needs to go through the SendGrid verification process before it can be used. You can read more about it here: https://sendgrid.com/docs/ui/sending-email/sender-verification/
+
+The default behaviour of the SendGrid integration is to only send emails on production environments. You can create production environments through the Encore Cloud Dashboard.
+
+
+
+## 9. Deploy your finished Booking System
+
+Now you're ready to deploy your finished Booking System, complete with a SendGrid integration.
+
+🥐 As before, deploying your app to the cloud is as simple as running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Add sendgrid integration'
+$ git push encore
+```
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
diff --git a/docs/go/tutorials/graphql.mdx b/docs/go/tutorials/graphql.mdx
new file mode 100644
index 0000000000..674ffc666a
--- /dev/null
+++ b/docs/go/tutorials/graphql.mdx
@@ -0,0 +1,332 @@
+---
+title: Building a GraphQL API
+subtitle: Learn how to build a GraphQL API using Encore.go
+seotitle: How to build a GraphQL API using Encore.go
+seodesc: Learn how to build a microservices backend in Go, powered by GraphQL and Encore.
+lang: go
+---
+
+Encore has great support for GraphQL with its type-safe approach to building APIs.
+
+Encore's automatic tracing also makes it easy to find and fix
+performance issues that often arise in GraphQL APIs (like the [N+1 problem](https://hygraph.com/blog/graphql-n-1-problem)).
+
+The best way to use GraphQL with Encore is using [gqlgen](https://gqlgen.com/), which
+has similar goals as Encore (type-safe APIs, minimal boilerplate, code generation, etc).
+
+The final code will look like this:
+
+
+
+
+
+## 1. Create your Encore application
+
+This tutorial uses the [REST API](/docs/go/tutorials/rest-api) tutorial as a starting point.
+
+You can either follow that tutorial first, or you can create a new Encore application
+using the `url-shortener` template by running:
+
+```shell
+$ encore app create --example=url-shortener
+```
+
+## 2. Initialize gqlgen
+
+To get started, initialize gqlgen by creating a `tools.go` file in the application root:
+
+```go
+-- tools.go --
+//go:build tools
+
+package tools
+
+import (
+ _ "github.com/99designs/gqlgen"
+ _ "github.com/99designs/gqlgen/graphql/introspection"
+)
+```
+
+Then run `go mod tidy` to download the dependencies.
+
+Next, create a `gqlgen.yml` file in the application root containing:
+
+```
+-- gqlgen.yml --
+# Where are all the schema files located? globs are supported eg src/**/*.graphqls
+schema:
+ - graphql/*.graphqls
+
+# Where should the generated server code go?
+exec:
+ filename: graphql/generated/generated.go
+ package: generated
+
+# Where should any generated models go?
+model:
+ filename: graphql/model/models_gen.go
+ package: model
+
+# Where should the resolver implementations go?
+resolver:
+ layout: follow-schema
+ dir: graphql
+ package: graphql
+
+# gqlgen will search for any type names in the schema in these go packages
+# if they match it will use them, otherwise it will generate them.
+autobind:
+ - "encore.app/url"
+
+# This section declares type mapping between the GraphQL and go type systems
+#
+# The first line in each type will be used as defaults for resolver arguments and
+# modelgen, the others will be allowed when binding to fields. Configure them to
+# your liking
+models:
+ ID:
+ model:
+ - github.com/99designs/gqlgen/graphql.ID
+ - github.com/99designs/gqlgen/graphql.Int
+ - github.com/99designs/gqlgen/graphql.Int64
+ - github.com/99designs/gqlgen/graphql.Int32
+ Int:
+ model:
+ - github.com/99designs/gqlgen/graphql.Int
+ - github.com/99designs/gqlgen/graphql.Int64
+ - github.com/99designs/gqlgen/graphql.Int32
+```
+
+## 3. Create Encore service
+
+Now it's time to create our Encore service that will provide the GraphQL API.
+
+First generate the gqlgen boilerplate:
+
+```shell
+$ mkdir -p graphql/generated graphql/model
+$ echo "package model" > graphql/model/model.go
+$ go run github.com/99designs/gqlgen generate
+```
+
+This will create a bunch of files in the `graphql` directory.
+
+Next, create a `graphql/service.go` file containing:
+
+```go
+-- graphql/service.go --
+// Service graphql exposes a GraphQL API.
+package graphql
+
+import (
+ "net/http"
+
+ "encore.app/graphql/generated"
+ "encore.dev"
+ "github.com/99designs/gqlgen/graphql/handler"
+ "github.com/99designs/gqlgen/graphql/playground"
+)
+
+//go:generate go run github.com/99designs/gqlgen generate
+
+//encore:service
+type Service struct {
+ srv *handler.Server
+ playground http.Handler
+}
+
+func initService() (*Service, error) {
+ srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &Resolver{}}))
+ pg := playground.Handler("GraphQL Playground", "/graphql")
+ return &Service{srv: srv, playground: pg}, nil
+}
+
+//encore:api public raw path=/graphql
+func (s *Service) Query(w http.ResponseWriter, req *http.Request) {
+ s.srv.ServeHTTP(w, req)
+}
+
+//encore:api public raw path=/graphql/playground
+func (s *Service) Playground(w http.ResponseWriter, req *http.Request) {
+ // Disable playground in production
+ if encore.Meta().Environment.Type == encore.EnvProduction {
+ http.Error(w, "Playground disabled", http.StatusNotFound)
+ return
+ }
+
+ s.playground.ServeHTTP(w, req)
+}
+```
+
+This creates an Encore service that exposes the `/graphql` and `/graphql/playground` endpoints.
+
+It also adds a `//go:generate` directive that lets you re-run the gqlgen code generation
+by running `go generate ./graphql`.
+
+## 4. Add GraphQL schema
+
+Now it's time to define the GraphQL schema. Create a `graphql/schema.graphqls` file containing:
+
+```
+-- graphql/url.graphqls --
+type Query {
+ urls: [URL!]!
+ get(id: ID!): URL!
+}
+
+type Mutation {
+ shorten(input: String!): URL!
+}
+
+type URL {
+ id: ID! # shortened id
+ url: String! # full URL
+}
+```
+
+Then, re-run the code generation to generate the resolver stubs:
+
+```shell
+$ go generate ./graphql
+```
+The stubs will be written to `graphql/url.resolvers.go` and will contain a bunch of unimplemented resolver methods
+that look something like this:
+
+```go
+// Shorten is the resolver for the shorten field.
+func (r *mutationResolver) Shorten(ctx context.Context, input string) (*url.URL, error) {
+ panic(fmt.Errorf("not implemented: Shorten - shorten"))
+}
+```
+
+## 5. Implement resolvers
+
+Now, modify the resolvers to call the `url` service. Since the GraphQL API uses the same types
+(thanks to the `autobind` directive in `gqlgen.yml`) as the Encore API exposes, we can just call the
+endpoints directly. Implement the resolvers in `graphql/url.resolvers.go` like this:
+
+```go
+-- graphql/url.resolvers.go --
+// Shorten is the resolver for the shorten field.
+func (r *mutationResolver) Shorten(ctx context.Context, input string) (*url.URL, error) {
+ return url.Shorten(ctx, &url.ShortenParams{URL: input})
+}
+
+// Urls is the resolver for the urls field.
+func (r *queryResolver) Urls(ctx context.Context) ([]*url.URL, error) {
+ resp, err := url.List(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return resp.URLs, nil
+}
+
+// Get is the resolver for the get field.
+func (r *queryResolver) Get(ctx context.Context, id string) (*url.URL, error) {
+ return url.Get(ctx, id)
+}
+```
+
+As you can see, the resolvers are just thin wrappers around the Encore API endpoints themselves.
+
+## 6. Trying it out
+
+With that, the GraphQL API is done! Try it out by running `encore run` and opening up [the playground](http://localhost:4000/graphql/playground).
+
+Enter the query:
+```graphql
+mutation {
+ shorten(input: "https://encore.dev") {
+ id
+ }
+}
+```
+
+You should get back an id like `MnTWA8Jo`. Pass the id you got (it will be something different) to a `get` query:
+
+```graphql
+query {
+ get(id: "") {
+ url
+ }
+}
+```
+
+And you should get back `https://encore.dev`.
+
+
+## 7. Deploy
+
+
+
+### Self-hosting
+
+Encore supports building Docker images directly from the CLI, which can then be self-hosted on your own infrastructure of choice.
+
+If your app is using infrastructure resources, such as SQL databases, Pub/Sub, or metrics, you will need to supply a [runtime configuration](/docs/go/self-host/configure-infra) your Docker image.
+
+🥐 Build a Docker image by running `encore build docker graphql:v1.0`.
+
+This will compile your application using the host machine and then produce a Docker image containing the compiled application.
+
+🥐 Upload the Docker image to the cloud provider of your choice and run it.
+
+
+
+
+
+### Deploy to Encore Cloud
+
+Encore Cloud provides automated infrastructure and DevOps. Deploy to a free development environment or to your own cloud account on AWS or GCP.
+
+### Create account
+
+Before deploying with Encore Cloud, you need to have a free Encore Cloud account and link your app to the platform. If you already have an account, you can move on to the next step.
+
+If you don’t have an account, the simplest way to get set up is by running `encore app create` and selecting **Y** when prompted to create a new account. Once your account is set up, continue creating a new app, selecting the `empty app` template.
+
+After creating the app, copy your project files into the new app directory, ensuring that you do not replace the `encore.app` file (this file holds a unique id which links your app to the platform).
+
+### Commit changes
+
+The final step before you deploy is to commit all changes to the project repo.
+
+
+Push your changes and deploy your application to Encore's free development cloud by running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore will now build and test your app, provision the needed infrastructure, and deploy your application to the cloud.
+
+After triggering the deployment, you will see a URL where you can view its progress in the [Encore Cloud dashboard](https://app.encore.cloud). It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+From there you can also see metrics, traces, link your app to a GitHub repo to get automatic deploys on new commits, and connect your own AWS or GCP account to use for production deployment.
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
+
+
+
+## Conclusion
+
+We've now built a GraphQL API gateway that forwards requests to the application's
+underlying Encore services in a type-safe way with minimal boilerplate.
+
+Note that the concepts discussed here are general and can be easily adapted to any GraphQL schema.
+
+Whenever you make a change to the schema or configuration, re-run `go generate ./graphql` to
+regenerate the GraphQL boilerplate. And for more information on how to use `gqlgen`,
+see the [gqlgen documentation](https://gqlgen.com/).
diff --git a/docs/go/tutorials/incident-management-tool.md b/docs/go/tutorials/incident-management-tool.md
new file mode 100644
index 0000000000..2c96f8daa8
--- /dev/null
+++ b/docs/go/tutorials/incident-management-tool.md
@@ -0,0 +1,854 @@
+---
+seotitle: How to build an Incident Management Tool with Go
+seodesc: Learn how to build an incident management tool like PagerDuty using Go and Encore. Get a working app running in the cloud in 30 minutes!
+title: Building an Incident Management Tool
+subtitle: Set up your own PagerDuty from zero-to-production in just 30 minutes
+social_card: /assets/docs/incident-og-image.png
+lang: go
+---
+
+In this tutorial, we're going to walk through together how to build our very own Incident Management Tool like [Incident.io](https://incident.io) or [PagerDuty](https://pagerduty.com). We can then have our own on call schedule that can be rotated between many users, and have incidents come and be assigned according to the schedule!
+
+
+
+In about 30 minutes, your application will be able to support:
+
+- Creating users, as well as schedules for when users will be on call
+- Creating incidents, and reminders for unacknowledged incidents on Slack every 10 minutes
+- Auto-assign incidents which are unassigned (when the next user is on call)
+
+_ Sounds good? Let's dig in! _
+
+Or if you'd rather watch a video of this tutorial, you can do that below.
+
+VIDEO
+
+View full project on [GitHub](https://github.com/encoredev/example-app-oncall)
+
+
+
+To make it easier to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+
+
+## 1. Create your Encore application
+
+🥐 Create a new Encore application by running `encore app create`, select `Empty app` as the template and name it `oncall-tutorial`.
+
+## 2. Integrate with Slack
+
+🥐 Follow [this guide to create your own Incoming Webhook](https://api.slack.com/messaging/webhooks) for your Slack workspace. Incoming webhooks cannot read messages, and can only post to a specific channel of your choice.
+
+
+
+🥐 Once you have your Webhook URL which starts with `https://hooks.slack.com/services/...` then copy and paste that and run the following commands to save these as secrets. We recommend having a different webhook/channel for development and production.
+
+```shell
+$ encore secret set --type dev,local,pr SlackWebhookURL
+$ encore secret set --type prod SlackWebhookURL
+```
+
+🥐 Next, let's create our `slack` service that contains the logic for calling the Webhook URL in order to post notifications to our Slack. To do this we need to implement our code in `slack/slack.go`:
+
+```go
+// Service slack calls a webhook to post notifications to Slack.
+package slack
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encore.dev/beta/errs"
+ "io"
+ "net/http"
+)
+
+type NotifyParams struct {
+ Text string `json:"text"`
+}
+
+//encore:api private
+func Notify(ctx context.Context, p *NotifyParams) error {
+ eb := errs.B()
+ reqBody, err := json.Marshal(p)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", secrets.SlackWebhookURL, bytes.NewReader(reqBody))
+ if err != nil {
+ return err
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ body, _ := io.ReadAll(resp.Body)
+ return eb.Code(errs.Unavailable).Msgf("notify slack: %s: %s", resp.Status, body).Err()
+ }
+ return nil
+}
+
+var secrets struct {
+ SlackWebhookURL string
+}
+```
+
+
+
+The `slack` service can be reused across any of your Encore apps. All you need is the `slack/slack.go` code and the `SlackWebhookURL` secret to be defined. Then you can call the following method signature anywhere in your app:
+
+```go
+slack.Notify(context, &slack.NotifyParams{ Text: "Send a Slack notification" })
+```
+
+
+
+## 3. Create a service to manage users
+
+With an Incident Management Tool (or usually any tool, for that matter) we need a service for users.
+This will allow us to figure out who we should assign incoming incidents to!
+
+To get started, we need to create a `users` service with the following resources:
+
+| # | Type | Description / Filename |
+| --- | ------------------------------------ | ---------------------------------------------------------------------------------------- |
+| #1 | SQL Migration | Our PostgreSQL schema for scheduling data `users/migrations/1_create_users.up.sql` |
+| #2 | HTTP Endpoint `POST /users` | Create a new User `users/users.go` |
+| #3 | HTTP Endpoint `GET /users/:id` | Get an existing User `users/users.go` |
+
+With #1, let's design our database schema for a User in our system. For now let's store a first and last name as well as a Slack handle in case we need to notify them about any incidents which may have been assigned to them or acknowledged by them.
+
+🥐 Let's create our migration file in `users/migrations/1_create_users.up.sql`:
+
+```sql
+CREATE TABLE users (
+ id BIGSERIAL PRIMARY KEY,
+ first_name VARCHAR(255) NOT NULL,
+ last_name VARCHAR(255) NOT NULL,
+ slack_handle VARCHAR(255) NOT NULL
+);
+```
+
+🥐 Then, we need to write our code to implement the HTTP endpoints listed in #2 (for creating a user) and #3 (for listing a user) belonging in `users/users.go`. Let's split them out into three sections: our structs (i.e. data models) and methods.
+
+```go
+// Service users manages users and assigns incidents.
+package users
+
+import (
+ "context"
+ "encore.dev/storage/sqldb"
+)
+
+// This is a Go struct representing our PostgreSQL schema for `users`
+type User struct {
+ Id int32
+ FirstName string
+ LastName string
+ SlackHandle string
+}
+
+// Define a database named 'users', using the database migrations
+// in the "./migrations" folder. Encore automatically provisions,
+// migrates, and connects to the database.
+var db = sqldb.NewDatabase("users", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+//encore:api public method=POST path=/users
+func Create(ctx context.Context, params CreateParams) (*User, error) {
+ user := User{}
+ err := db.QueryRow(ctx, `
+ INSERT INTO users (first_name, last_name, slack_handle)
+ VALUES ($1, $2, $3)
+ RETURNING id, first_name, last_name, slack_handle
+ `, params.FirstName, params.LastName, params.SlackHandle).Scan(&user.Id, &user.FirstName, &user.LastName, &user.SlackHandle)
+ if err != nil {
+ return nil, err
+ }
+ return &user, nil
+}
+
+// This is what JSON params our POST /users endpoint will accept
+type CreateParams struct {
+ FirstName string
+ LastName string
+ SlackHandle string
+}
+
+//encore:api public method=GET path=/users/:id
+func Get(ctx context.Context, id int32) (*User, error) {
+ user := User{}
+ err := db.QueryRow(ctx, `
+ SELECT id, first_name, last_name, slack_handle
+ FROM users
+ WHERE id = $1
+ `, id).Scan(&user.Id, &user.FirstName, &user.LastName, &user.SlackHandle)
+ if err != nil {
+ return nil, err
+ }
+ return &user, nil
+}
+```
+
+🥐 Next, type `encore run` in your Terminal and in a separate window run the command under **cURL Request** (feel free to edit the values!) to create our first user:
+
+```bash
+curl -d '{
+ "FirstName":"Katy",
+ "LastName":"Smith",
+ "SlackHandle":"katy"
+}' http://localhost:4000/users
+
+# Example JSON response
+# {
+# "Id":1,
+# "FirstName":"Katy",
+# "LastName":"Smith",
+# "SlackHandle":"katy"
+# }
+```
+
+Fantastic, we now have a user system in our app! Next we need a list of start and end times of each scheduled rotation so we know who to assign incoming incidents to (as well as notify them on Slack!)
+
+## 4. Add scheduling
+
+A good incident management tool should be able to spread the workload of diagnosing and fixing incidents across multiple users in a team. Being able to know who the correct person to assign an incident to is very important; our incidents might not get resolved quickly otherwise!
+
+In order to achieve this, let's create a new service called `schedules`:
+
+| # | Type | Description / Filename |
+| --- | ----------------------------------------------- | ------------------------------------------------------------------------------------------ |
+| #1 | SQL Migration | Our PostgreSQL schema for user data `schedules/migrations/1_create_schedules.up.sql` |
+| #2 | HTTP Endpoint `GET /schedules` | Get list of schedules between time range `schedules/schedules.go` |
+| #3 | HTTP Endpoint `POST /users/:id/schedules` | Create a new Schedule `schedules/schedules.go` |
+| #4 | HTTP Endpoint `GET /scheduled/:timestamp` | Get Schedule at specific time `schedules/schedules.go` |
+
+
+For the SQL migration in #1, we need to create both a table and an index. For every rotation let's need a new entry containing the user who it is for as well as the start and end times of the scheduled rotation.
+
+🥐 Let's create our migration file in `schedules/migrations/1_create_schedules.up.sql`:
+
+```sql
+CREATE TABLE schedules
+(
+ id BIGSERIAL PRIMARY KEY,
+ user_id INTEGER NOT NULL,
+ start_time TIMESTAMP NOT NULL,
+ end_time TIMESTAMP NOT NULL
+);
+
+CREATE INDEX schedules_range_index ON schedules (start_time, end_time);
+```
+
+
+
+
+Table indexes are used to optimize lookups without having to search every row in the table. In this case, looking up rows against both `start_time` and `end_time` will be faster _with the index_ as the dataset grows. [Learn more about PostgreSQL indexes here](https://www.tutorialspoint.com/postgresql/postgresql_indexes.htm).
+
+
+
+🥐 Next, let's implement the HTTP endpoints for #2 (listing schedules), #3 (creating a schedule) and #4 (getting the schedule/user at a specific time) in `schedules/schedules.go`:
+
+```go
+// Service schedules implements schedules to answer who should be assigned to an incident.
+package schedules
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "encore.app/users"
+ "encore.dev/beta/errs"
+ "encore.dev/storage/sqldb"
+)
+
+// Define a database named 'schedules', using the database migrations
+// in the "./migrations" folder. Encore automatically provisions,
+// migrates, and connects to the database.
+var db = sqldb.NewDatabase("schedules", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// This struct holds multiple Schedule structs
+type Schedules struct {
+ Items []Schedule
+}
+
+// This is a Go struct representing our PostgreSQL schema for `schedules`
+type Schedule struct {
+ Id int32
+ User users.User
+ Time TimeRange
+}
+
+// As we use time ranges in our schedule, we created a generic TimeRange struct
+type TimeRange struct {
+ Start time.Time
+ End time.Time
+}
+
+//encore:api public method=POST path=/users/:userId/schedules
+func Create(ctx context.Context, userId int32, timeRange TimeRange) (*Schedule, error) {
+ eb := errs.B().Meta("userId", userId, "timeRange", timeRange)
+ // check for existing overlapping schedules
+ if schedule, err := ScheduledAt(ctx, timeRange.Start.String()); schedule != nil && err == nil {
+ return nil, eb.Code(errs.InvalidArgument).Cause(err).Msg("schedule already exists within this start timestamp").Err()
+ }
+ if schedule, err := ScheduledAt(ctx, timeRange.End.String()); schedule != nil && err == nil {
+ return nil, eb.Code(errs.InvalidArgument).Cause(err).Msg("schedule already exists within this end timestamp").Err()
+ }
+
+ // check user exists
+ user, err := users.Get(ctx, userId)
+ if err != nil {
+ return nil, eb.Code(errs.Unavailable).Cause(err).Msg("failed to get user").Err()
+ }
+
+ schedule := Schedule{User: *user, Time: TimeRange{}}
+ err = db.QueryRow(
+ ctx,
+ `INSERT INTO schedules (user_id, start_time, end_time) VALUES ($1, $2, $3) RETURNING id, start_time, end_time`,
+ userId, timeRange.Start, timeRange.End,
+ ).Scan(&schedule.Id, &schedule.Time.Start, &schedule.Time.End)
+ if err != nil {
+ return nil, eb.Code(errs.Unavailable).Cause(err).Msg("failed to insert schedule").Err()
+ }
+
+ return &schedule, nil
+}
+
+//encore:api public method=GET path=/scheduled
+func ScheduledNow(ctx context.Context) (*Schedule, error) {
+ return scheduled(ctx, time.Now())
+}
+
+//encore:api public method=GET path=/scheduled/:timestamp
+func ScheduledAt(ctx context.Context, timestamp string) (*Schedule, error) {
+ eb := errs.B().Meta("timestamp", timestamp)
+ parsedtime, err := time.Parse(time.RFC3339, timestamp)
+ if err != nil {
+ return nil, eb.Code(errs.InvalidArgument).Msg("timestamp is not in a valid format").Err()
+ }
+
+ return scheduled(ctx, parsedtime)
+}
+
+func scheduled(ctx context.Context, timestamp time.Time) (*Schedule, error) {
+ eb := errs.B().Meta("timestamp", timestamp)
+ schedule, err := RowToSchedule(ctx, db.QueryRow(ctx, `
+ SELECT id, user_id, start_time, end_time
+ FROM schedules
+ WHERE start_time <= $1
+ AND end_time >= $1
+ `, timestamp.UTC()))
+ if errors.Is(err, db.ErrNoRows) {
+ return nil, eb.Code(errs.NotFound).Msg("no schedule found").Err()
+ }
+ if err != nil {
+ return nil, err
+ }
+ return schedule, nil
+}
+
+//encore:api public method=GET path=/schedules
+func ListByTimeRange(ctx context.Context, timeRange TimeRange) (*Schedules, error) {
+ rows, err := db.Query(ctx, `
+ SELECT id, user_id, start_time, end_time
+ FROM schedules
+ WHERE start_time >= $1
+ AND end_time <= $2
+ ORDER BY start_time ASC
+ `, timeRange.Start, timeRange.End)
+ if err != nil {
+ return nil, err
+ }
+
+ defer rows.Close()
+
+ var schedules []Schedule
+ for rows.Next() {
+ schedule, err := RowToSchedule(ctx, rows)
+ if err != nil {
+ return nil, err
+ }
+ schedules = append(schedules, *schedule)
+ }
+
+ return &Schedules{Items: schedules}, nil
+}
+
+//encore:api public method=DELETE path=/schedules
+func DeleteByTimeRange(ctx context.Context, timeRange TimeRange) (*Schedules, error) {
+ schedules, err := ListByTimeRange(ctx, timeRange)
+ if err != nil {
+ return nil, err
+ }
+ _, err = db.Exec(ctx, `DELETE FROM schedules WHERE start_time >= $1 AND end_time <= $2`, timeRange.Start, timeRange.End)
+ if err != nil {
+ return nil, err
+ }
+
+ return schedules, err
+}
+
+// Helper function to convert a Row object to to Schedule
+func RowToSchedule(ctx context.Context, row interface {
+ Scan(dest ...interface{}) error
+}) (*Schedule, error) {
+ var schedule = &Schedule{Time: TimeRange{}}
+ var userId int32
+
+ err := row.Scan(&schedule.Id, &userId, &schedule.Time.Start, &schedule.Time.End)
+ if err != nil {
+ return nil, err
+ }
+
+ user, err := users.Get(ctx, userId)
+ if err != nil {
+ return nil, err
+ }
+
+ schedule.User = *user
+ return schedule, nil
+}
+```
+
+🥐 Next, type `encore run` in your Terminal and in a separate window run the command under **cURL Request** (also feel free to edit the values!) to create our first schedule against the user we created earlier:
+
+```bash
+curl -d '{
+ "Start":"2023-11-28T10:00:00Z",
+ "End":"2023-11-30T10:00:00Z"
+}' "http://localhost:4000/users/1/schedules"
+
+# Example JSON response
+# {
+# "Id":1,
+# "User":{
+# "Id":1,
+# "FirstName":"Katy",
+# "LastName":"Smith",
+# "SlackHandle":"katy"
+# },
+# "Time":{
+# "Start":"2023-11-28T10:00:00Z",
+# "End":"2023-11-30T10:00:00Z"
+# }
+# }
+```
+
+## 5. Create a service to manage incidents
+
+So we have users, and we know who is available to be notified (or if nobody should be notified) at any given time with the introduction of the `schedules` service. The only thing we're missing is the ability to report, assign and acknowledge incidents!
+
+The flow we're going to implement is: an incoming incident will arrive, let's either unassign or auto-assign it based on the `schedules` service, and incidents have to be acknowledged. If they are not acknowledged, they will continue to be notified on Slack every 10 minutes until it has.
+
+To start with, we need to create a new `incidents` service with the following resources:
+
+
+| # | Type | Description / Filename |
+| --- | ---------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
+| #1 | SQL Migration | Our PostgreSQL schema for storing incidents `incidents/migrations/1_create_incidents.up.sql` |
+| #2 | HTTP Endpoint `GET /incidents` | Get list of all unacknowledged incidents `incidents/incidents.go` |
+| #3 | HTTP Endpoint `PUT /incidents/:id/acknowledge` | Acknowledge an incident `incidents/incidents.go` |
+| #4 | HTTP Endpoint `GET /scheduled/:timestamp` | Get `incidents/incidents.go` |
+
+For the SQL migration in #1, we need to create the table for our incidents. We need to have a one-to-many relationship between an user and an incident. That is, an incident can only be assigned to a single user but a single user can be assigned to many incidents.
+
+🥐 Let's create our migration file in `incidents/migrations/1_create_incidents.up.sql`:
+
+```sql
+CREATE TABLE incidents
+(
+ id BIGSERIAL PRIMARY KEY,
+ assigned_user_id INTEGER,
+ body TEXT NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ acknowledged_at TIMESTAMP
+);
+```
+
+🥐 Next, our code belonging in `incidents/incidents.go` for being able to support incidents is below:
+
+```go
+// Service incidents reports, assigns and acknowledges incidents.
+package incidents
+
+import (
+ "context"
+ "encore.app/schedules"
+ "encore.app/slack"
+ "encore.app/users"
+ "encore.dev/beta/errs"
+ "encore.dev/storage/sqldb"
+ "fmt"
+ "time"
+)
+
+// Define a database named 'incidents', using the database migrations
+// in the "./migrations" folder. Encore automatically provisions,
+// migrates, and connects to the database.
+var db = sqldb.NewDatabase("incidents", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// This struct holds multiple Incidents structs
+type Incidents struct {
+ Items []Incident
+}
+
+// This is a Go struct representing our PostgreSQL schema for `incidents`
+type Incident struct {
+ Id int32
+ Body string
+ CreatedAt time.Time
+ Acknowledged bool
+ AcknowledgedAt *time.Time
+ Assignee *users.User
+}
+
+//encore:api public method=GET path=/incidents
+func List(ctx context.Context) (*Incidents, error) {
+ rows, err := db.Query(ctx, `
+ SELECT id, assigned_user_id, body, created_at, acknowledged_at
+ FROM incidents
+ WHERE acknowledged_at IS NULL
+ `)
+ if err != nil {
+ return nil, err
+ }
+ return RowsToIncidents(ctx, rows)
+}
+
+//encore:api public method=PUT path=/incidents/:id/acknowledge
+func Acknowledge(ctx context.Context, id int32) (*Incident, error) {
+ eb := errs.B().Meta("incidentId", id)
+ rows, err := db.Query(ctx, `
+ UPDATE incidents
+ SET acknowledged_at = NOW()
+ WHERE acknowledged_at IS NULL
+ AND id = $1
+ RETURNING id, assigned_user_id, body, created_at, acknowledged_at
+ `, id)
+ if err != nil {
+ return nil, err
+ }
+
+ incidents, err := RowsToIncidents(ctx, rows)
+ if err != nil {
+ return nil, err
+ }
+ if incidents.Items == nil {
+ return nil, eb.Code(errs.NotFound).Msg("no incident found").Err()
+ }
+
+ incident := &incidents.Items[0]
+ _ = slack.Notify(ctx, &slack.NotifyParams{
+ Text: fmt.Sprintf("Incident #%d assigned to %s %s <@%s> has been acknowledged:\n%s", incident.Id, incident.Assignee.FirstName, incident.Assignee.LastName, incident.Assignee.SlackHandle, incident.Body),
+ })
+
+ return incident, err
+}
+
+//encore:api public method=POST path=/incidents
+func Create(ctx context.Context, params *CreateParams) (*Incident, error) {
+ // check who is on-call
+ schedule, err := schedules.ScheduledNow(ctx)
+
+ incident := Incident{}
+ if schedule != nil {
+ incident.Assignee = &schedule.User
+ }
+
+ var row *db.Row
+ if schedule != nil {
+ // Someone is on-call
+ row = db.QueryRow(ctx, `
+ INSERT INTO incidents (assigned_user_id, body)
+ VALUES ($1, $2)
+ RETURNING id, body, created_at
+ `, &schedule.User.Id, params.Body)
+ } else {
+ // Nobody is on-call
+ row = db.QueryRow(ctx, `
+ INSERT INTO incidents (body)
+ VALUES ($1)
+ RETURNING id, body, created_at
+ `, params.Body)
+ }
+
+ if err = row.Scan(&incident.Id, &incident.Body, &incident.CreatedAt); err != nil {
+ return nil, err
+ }
+
+ var text string
+ if incident.Assignee != nil {
+ text = fmt.Sprintf("Incident #%d created and assigned to %s %s <@%s>\n%s", incident.Id, incident.Assignee.FirstName, incident.Assignee.LastName, incident.Assignee.SlackHandle, incident.Body)
+ } else {
+ text = fmt.Sprintf("Incident #%d created and unassigned\n%s", incident.Id, incident.Body)
+ }
+ _ = slack.Notify(ctx, &slack.NotifyParams{Text: text})
+
+ return &incident, nil
+}
+
+type CreateParams struct {
+ Body string
+}
+
+// Helper to take a db.Rows instance and convert it into a list of Incidents
+func RowsToIncidents(ctx context.Context, rows *db.Rows) (*Incidents, error) {
+ eb := errs.B()
+
+ defer rows.Close()
+
+ var incidents []Incident
+ for rows.Next() {
+ var incident = Incident{}
+ var assignedUserId *int32
+ if err := rows.Scan(&incident.Id, &assignedUserId, &incident.Body, &incident.CreatedAt, &incident.AcknowledgedAt); err != nil {
+ return nil, eb.Code(errs.Unknown).Msgf("could not scan: %v", err).Err()
+ }
+ if assignedUserId != nil {
+ user, err := users.Get(ctx, *assignedUserId)
+ if err != nil {
+ return nil, eb.Code(errs.NotFound).Msgf("could not retrieve user for incident %v", assignedUserId).Err()
+ }
+ incident.Assignee = user
+ }
+ incident.Acknowledged = incident.AcknowledgedAt != nil
+ incidents = append(incidents, incident)
+ }
+
+ return &Incidents{Items: incidents}, nil
+}
+```
+
+Fantastic! We have an _almost_ working application. The main two things we're missing are:
+
+1. For unacknowledged incidents, we need to post a reminder on Slack every 10 minutes until they have been acknolwedged.
+2. Whenever a user is currently on call, we should assign all previously unassigned incidents to them.
+
+🥐 To achieve this, we'll need to create two [Cron Jobs](http://localhost:3000/docs/develop/cron-jobs) which thankfully Encore makes incredibly simple. So let's go ahead and create the first one for reminding us every 10 minutes of incidents we haven't acknowledged. Go ahead and add the code below to our `incidents/incidents.go` file:
+
+```go
+// Track unacknowledged incidents
+var _ = cron.NewJob("unacknowledged-incidents-reminder", cron.JobConfig{
+ Title: "Notify on Slack about incidents which are not acknowledged",
+ Every: 10 * cron.Minute,
+ Endpoint: RemindUnacknowledgedIncidents,
+})
+
+//encore:api private
+func RemindUnacknowledgedIncidents(ctx context.Context) error {
+ incidents, err := List(ctx) // we never query for acknowledged incidents
+ if err != nil {
+ return err
+ }
+ if incidents == nil {
+ return nil
+ }
+
+ var items = []string{"These incidents have not been acknowledged yet. Please acknowledge them otherwise you will be reminded every 10 minutes:"}
+ for _, incident := range incidents.Items {
+ var assignee string
+ if incident.Assignee != nil {
+ assignee = fmt.Sprintf("%s %s (<@%s>)", incident.Assignee.FirstName, incident.Assignee.LastName, incident.Assignee.SlackHandle)
+ } else {
+ assignee = "Unassigned"
+ }
+
+ items = append(items, fmt.Sprintf("[%s] [#%d] %s", assignee, incident.Id, incident.Body))
+ }
+
+ if len(incidents.Items) > 0 {
+ _ = slack.Notify(ctx, &slack.NotifyParams{Text: strings.Join(items, "\n")})
+ }
+
+ return nil
+}
+```
+
+And for our second cronjob, when someone goes on call we need to automatically assign the previously unassigned incidents to them. We don't have a HTTP endpoint for assigning incidents so we need to implement a `PUT /incidents/:id/assign` endpoint.
+
+🥐 So let's also add that endpoint as well as the cronjob code to our `incidents/incidents.go` file:
+
+```go
+//encore:api public method=PUT path=/incidents/:id/assign
+func Assign(ctx context.Context, id int32, params *AssignParams) (*Incident, error) {
+ eb := errs.B().Meta("params", params)
+ rows, err := db.Query(ctx, `
+ UPDATE incidents
+ SET assigned_user_id = $1
+ WHERE acknowledged_at IS NULL
+ AND id = $2
+ RETURNING id, assigned_user_id, body, created_at, acknowledged_at
+ `, params.UserId, id)
+ if err != nil {
+ return nil, err
+ }
+
+ incidents, err := RowsToIncidents(ctx, rows)
+ if err != nil {
+ return nil, err
+ }
+ if incidents.Items == nil {
+ return nil, eb.Code(errs.NotFound).Msg("no incident found").Err()
+ }
+
+ incident := &incidents.Items[0]
+ _ = slack.Notify(ctx, &slack.NotifyParams{
+ Text: fmt.Sprintf("Incident #%d is re-assigned to %s %s <@%s>\n%s", incident.Id, incident.Assignee.FirstName, incident.Assignee.LastName, incident.Assignee.SlackHandle, incident.Body),
+ })
+
+ return incident, err
+}
+
+type AssignParams struct {
+ UserId int32
+}
+
+var _ = cron.NewJob("assign-unassigned-incidents", cron.JobConfig{
+ Title: "Assign unassigned incidents to user on-call",
+ Every: 1 * cron.Minute,
+ Endpoint: AssignUnassignedIncidents,
+})
+
+//encore:api private
+func AssignUnassignedIncidents(ctx context.Context) error {
+ // if this fails, we don't have anyone on call so let's skip this
+ schedule, err := schedules.ScheduledNow(ctx)
+ if err != nil {
+ return err
+ }
+
+ incidents, err := List(ctx) // we never query for acknowledged incidents
+ if err != nil {
+ return err
+ }
+
+ for _, incident := range incidents.Items {
+ if incident.Assignee != nil {
+ continue // this incident has already been assigned
+ }
+
+ _, err := Assign(ctx, incident.Id, &AssignParams{UserId: schedule.User.Id})
+ if err == nil {
+ rlog.Info("OK assigned unassigned incident", "incident", incident, "user", schedule.User)
+ } else {
+ rlog.Error("FAIL to assign unassigned incident", "incident", incident, "user", schedule.User, "err", err)
+ return err
+ }
+ }
+
+ return nil
+}
+```
+
+🥐 Next, call `encore run` in your Terminal and in a separate window run the command under **cURL Request** (also feel free to edit the values!) to trigger our first incident. Most likely we won't have an assigned user unless you have scheduled a time that overlaps with right now in the last cURL request for creating a schedule:
+
+```bash
+curl -d '{
+ "Body":"An unexpected error happened on example-website.com on line 38. It needs addressing now!"
+}' http://localhost:4000/incidents
+
+# Example JSON response
+# {
+# "Id":1,
+# "Body":"An unexpected error happened on example-website.com on line 38. It needs addressing now!",
+# "CreatedAt":"2022-09-28T15:09:00Z",
+# "Acknowledged":false,
+# "AcknowledgedAt":null,
+# "Assignee":null
+# }
+```
+
+## 6. Try your app and deploy
+
+Congratulations! Our application looks ready for others to try - we have our `users`, `schedules` `incidents` and `slack` services along with 3 database tables and 2 cronjobs. Even better that all of the deployment and maintenance is taken care by Encore!
+
+🥐 To try out your application, type `encore run` in your Terminal and run the following cURL commands:
+
+```bash
+# Step 1: Create a User and copy the User ID to your clipboard
+curl -d '{
+ "FirstName":"Katy",
+ "LastName":"Smith",
+ "SlackHandle":"katy"
+}' http://localhost:4000/users
+
+# Step 2: Create a schedule for the user we just created
+curl -d '{
+ "Start":"2022-09-28T10:00:00Z",
+ "End":"2022-09-29T10:00:00Z"
+}' "http://localhost:4000/users/1/schedules"
+
+# Step 3: Trigger an incident
+curl -d '{
+ "Body":"An unexpected error happened on example-website.com on line 38. It needs addressing now!"
+}' http://localhost:4000/incidents
+
+# Step 4: Acknowledge the Incident
+curl -X PUT "http://localhost:4000/incidents/1/acknowledge"
+```
+
+And if you don't acknowledge incoming incidents on Step 4, you will be reminded on Slack every 10 minutes:
+
+
+
+### Deploy to the cloud
+
+🥐 Push your changes and deploy your application to Encore's free development cloud by running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore will now build and test your app, provision the needed infrastructure, and deploy your application to the cloud.
+
+After triggering the deployment, you will see a URL where you can view its progress in the [Encore Cloud dashboard](https://app.encore.cloud). It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+From there you can also see metrics, traces, link your app to a GitHub repo to get automatic deploys on new commits, and connect your own AWS or GCP account to use for production deployment.
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
+
+### Architecture Diagram
+
+Take a look at the [Encore Flow](/docs/go/observability/encore-flow) diagram that was automatically generated for our new application too!
+
+
+
+### GitHub Repository
+
+🥐 Check out the `example-app-oncall` repository on GitHub for this example which includes additional features and tests:
+[https://github.com/encoredev/example-app-oncall](https://github.com/encoredev/example-app-oncall)
+
+Alternatively, you can clone our example application by running this in your Terminal:
+
+```shell
+$ encore app create --example https://github.com/encoredev/example-app-oncall
+```
+
+### Feedback
+
+🥐 We'd love to hear your thoughts about this tutorial and learn about what you're building next.
+Let us know by [tweeting your experience](https://twitter.com/encoredotdev), blog about it, or talk to us about it on [Discord](https://encore.dev/discord).
diff --git a/docs/go/tutorials/meeting-notes.mdx b/docs/go/tutorials/meeting-notes.mdx
new file mode 100644
index 0000000000..6e2c8b0328
--- /dev/null
+++ b/docs/go/tutorials/meeting-notes.mdx
@@ -0,0 +1,361 @@
+---
+title: Building a Meeting Notes app
+subtitle: Learn how to set up a web app backend (with database) in less than 100 lines of code
+seotitle: How to build a Meeting Notes app in Go & React
+seodesc: Learn how to set up a free & production-ready web app backend in Go (with database) in less than 100 lines
+lang: go
+---
+
+In this tutorial, we will create a backend in less than 100 lines of code. The backend will:
+
+- Store data in a cloud SQL database
+- Make API calls to a third-party service
+- Deploy to the cloud and be publicly available
+
+The example app we will build is a markdown meeting notes app BUT it’s trivial to replace the specifics if you have another idea in mind (again, less than 100 lines of code).
+
+**[Demo version of the app](https://encoredev.github.io/meeting-notes)**
+
+
+
+
+
+This is the end result:
+
+
+
+
+## Create your Encore application
+
+Create a new app from the meeting-notes example. This will start you off with everything described in this tutorial:
+
+```shell
+$ encore app create my-app --example=meeting-notes
+```
+
+
+
+Before running the project locally, make sure you have [Docker](https://www.docker.com/products/docker-desktop/) installed and running. Docker is needed for Encore to create databases for locally running projects. Also, if you want to try the photo search functionality then you will need an API key from [pexels.com/api/](https://www.pexels.com/api/) (more on that below)
+
+
+
+To run the backend locally:
+
+```shell
+$ cd you-app-name # replace with the app name you picked
+$ encore run
+```
+
+You should see the following:
+
+
+
+
+
+That means your local development backend is up and running! Encore takes care of setting up all the necessary infrastructure for your application, including databases. Encore also starts the local development dashboard which is a tool to help you move faster when you're developing new features.
+
+
+
+
+
+To start the front-end, run the following commands in another terminal window:
+
+```shell
+$ cd you-app-name/frontend
+$ npm install
+$ npm run dev
+```
+
+You can now open http://localhost:5173/example-meeting-notes/ in your browser 🔥
+
+## Storing and retrieving from an SQL database
+
+Let's take a look at the backend code. There are essentially only three files of interest, let's start by looking at `note.go`. This file contains two endpoints and one interface, all standard Go code except for a few lines specific to Encore.
+
+The `Note` type represents our data structure:
+
+```go
+type Note struct {
+ ID string `json:"id"`
+ Text string `json:"text"`
+ CoverURL string `json:"cover_url"`
+}
+```
+
+Every note will have an `ID` (uuid that is created on the frontend), `Text` (Markdown text content), and `CoverURL` (background image URL).
+
+The `SaveNote` function handles storing a meeting note:
+
+```go
+//encore:api public method=POST path=/note
+func SaveNote(ctx context.Context, note *Note) (*Note, error) {
+ // Save the note to the database.
+ // If the note already exists (i.e. CONFLICT), we update the notes text and the cover URL.
+ _, err := sqldb.Exec(ctx, `
+ INSERT INTO note (id, text, cover_url) VALUES ($1, $2, $3)
+ ON CONFLICT (id) DO UPDATE SET text=$2, cover_url=$3
+ `, note.ID, note.Text, note.CoverURL)
+
+ // If there was an error saving to the database, then we return that error.
+ if err != nil {
+ return nil, err
+ }
+
+ // Otherwise, we return the note to indicate that the save was successful.
+ return note, nil
+}
+```
+
+The comment above the function tells Encore that this is a public endpoint that should be reachable by POST on `/note`. The second argument to the function (`Note`) is the POST body and the function returns a `Note` and an `error` (a `nil` error means a 200 response).
+
+The `GetNote` function takes care of fetching a meeting note from our database given an `id`:
+
+```go
+//encore:api public method=GET path=/note/:id
+func GetNote(ctx context.Context, id string) (*Note, error) {
+ note := &Note{ID: id}
+
+ // We use the note ID to query the database for the note's text and cover URL.
+ err := sqldb.QueryRow(ctx, `
+ SELECT text, cover_url FROM note
+ WHERE id = $1
+ `, id).Scan(¬e.Text, ¬e.CoverURL)
+
+ // If the note doesn't exist, we return an error.
+ if err != nil {
+ return nil, err
+ }
+
+ // Otherwise, we return the note.
+ return note, nil
+}
+```
+
+Here we have a public GET endpoint with a dynamic path parameter which is the `id` of the meeting note to fetch. The second argument, in this case, is the dynamic path parameter, a request to this endpoint will look like `/note/123-abc` where `id` will be set to `123-abc`.
+
+Both `SaveNote` and `GetNote` makes use of a SQL database table named `note`, let's look at how that table is defined.
+
+## Defining a SQL database
+
+To create a SQL database using Encore we first create a folder named `migrations` and inside that folder a migration file named `1_create_tables.up.sql`. The file name is important (it must look something like `1_name.up.sql`). Our migration file is only five lines long and looks like this:
+
+```sql
+CREATE TABLE note (
+ id TEXT PRIMARY KEY,
+ text TEXT,
+ cover_url TEXT
+);
+```
+
+When recognizing this file, Encore will create a `note` table with three columns `id`, `text` and `cover_url`. The `id` is the primary key, used to identify specific meeting notes.
+
+## Making requests to a third-party API
+
+Let's look at how we can use an Encore endpoint to proxy requests to a third-party service (in this example photo service [pexels.com](http://www.pexels.com/) but the idea would be the same for any other third-party API).
+
+The file `pexels.go` only has one endpoint, `SearchPhoto`:
+
+```go
+//encore:api public method=GET path=/images/:query
+func SearchPhoto(ctx context.Context, query string) (*SearchResponse, error) {
+ // Create a new http client to proxy the request to the Pexels API.
+ URL := "https://api.pexels.com/v1/search?query=" + query
+ client := &http.Client{}
+ req, _ := http.NewRequest("GET", URL, nil)
+
+ // Add authorization header to the req with the API key.
+ req.Header.Set("Authorization", secrets.PexelsApiKey)
+
+ // Make the request, and close the response body when we're done.
+ res, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode >= 400 {
+ return nil, fmt.Errorf("Pexels API error: %s", res.Status)
+ }
+
+ // Decode the data into the searchResponse struct.
+ var searchResponse *SearchResponse
+ err = json.NewDecoder(res.Body).Decode(&searchResponse)
+ if err != nil {
+ return nil, err
+ }
+
+ return searchResponse, nil
+}
+```
+
+Again a GET endpoint with a dynamic path parameter which this time represents the query text we want to send to the Pexels API.
+
+The type we use to decode the response from the Pexels API looks like this:
+
+```go
+type SearchResponse struct {
+ Photos []struct {
+ Id int `json:"id"`
+ Src struct {
+ Medium string `json:"medium"`
+ Landscape string `json:"landscape"`
+ } `json:"src"`
+ Alt string `json:"alt"`
+ } `json:"photos"`
+}
+```
+
+We get a lot more data from Pexels but here we only pick the fields that we want to propagate to our frontend.
+
+[Pexels API](https://www.pexels.com/api/) requires an API key, as most open APIs do. The API key is added as a header to the requests (from the `SearchPhoto` function above):
+
+```go
+req.Header.Set("Authorization", secrets.PexelsApiKey)
+```
+
+Here we could have hardcoded the API key but that would have made it readable for everyone with access to our repo. Instead, we made use of Encore's built-in [secrets management](https://encore.dev/docs/go/primitives/secrets). To set this secret, run the following command in your project folder and follow the prompt:
+
+```shell
+encore secret set --type dev,prod,local,pr PexelsApiKey
+```
+
+## Creating a request client
+
+Encore is able to generate frontend [request clients](https://encore.dev/docs/go/cli/client-generation) (TypeScript or JavaScript). This means that you do not need to manually keep the request/response objects in sync on the frontend, huge time saver. To generate a client run:
+
+```shell
+$ encore gen client --output=./src/client.ts --env=
+```
+
+You are going to want to run this command quite often (whenever you make a change to your endpoints) so having it as an `npm` script is a good idea:
+
+```json
+{
+...
+"scripts": {
+ ...
+ "generate-client:staging": "encore gen client --output=./src/client.ts --env=staging",
+ "generate-client:local": "encore gen client --output=./src/client.ts --env=local"
+ },
+}
+```
+
+After that you are ready to use the request client in your code. Here is an example of calling the `GetNote` endpoint:
+
+```tsx
+import Client, { Environment, Local } from "src/client.ts";
+
+// Making request to locally running backend...
+const client = new Client(Local);
+// or to a specific deployed environment
+const client = new Client(Environment("staging"));
+
+// Calling APIs as typesafe functions 🌟
+const response = await client.note.GetNote("note-uuid");
+console.log(response.id);
+console.log(response.cover_url);
+console.log(response.text);
+```
+
+## Deploying the backend to the cloud
+
+It’s deploy time! To get your backend deployed in the cloud all you need to do is to commit your code and push it to the `encore` remote:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+When running `git push encore` you will get a link to the Encore Cloud dashboard where you can view the deploy for your app and after about a minute you have a backend running in the cloud ☁️
+
+
+
+
+
+## Hosting the frontend
+
+The frontend can be deployed to any static site hosting platform. The example project is pre-configured to deploy the frontend to [GitHub Pages](https://docs.github.com/en/pages/getting-started-with-github-pages/creating-a-github-pages-site). Take a look at `.github/workflows/node.yml` to see the GitHub actions workflow being triggered on new commits to the repo:
+
+```yaml
+name: Build and Deploy
+
+on: [push]
+
+permissions:
+ contents: write
+
+jobs:
+ build-and-deploy:
+ concurrency: ci-${{ github.ref }}
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: frontend
+
+ steps:
+ - name: Checkout 🛎️
+ uses: actions/checkout@v3
+
+ - name: Use Node.js
+ uses: actions/setup-node@v3
+ with:
+ node-version: "16.15.1"
+
+ - name: Install and Build 🔧
+ run: |
+ npm install
+ npm run build
+
+ - name: Deploy 🚀
+ uses: JamesIves/github-pages-deploy-action@v4.3.3
+ with:
+ branch: gh-pages
+ folder: frontend/dist
+```
+
+The interesting part is towards the bottom where we build the frontend code and make use of the [github-pages-deploy-action](https://github.com/JamesIves/github-pages-deploy-action) step to automatically make a new commit with the compiled frontend code to a `gh-pages` branch.
+
+**Steps to deploy to GitHub pages:**
+
+1. Create a repo on GitHub
+2. In the `vite.config.js` file, set the `base` property to the name of your repo:
+
+```yaml
+base: "/my-repo-name/",
+```
+
+1. Push your code to GitHub and wait for the GitHub actions workflow to finish.
+2. Go to _Settings_ → _Pages_ for your repo on GitHub and set _Branch_ to `gh-pages`.
+
+## Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
+
+## Wrapping up
+
+You’ve learned how to build and deploy a Go backend using Encore, store data in an SQL database, and make API calls to an external service. All of this in under 100 lines of code.
diff --git a/docs/go/tutorials/rest-api.mdx b/docs/go/tutorials/rest-api.mdx
new file mode 100644
index 0000000000..28740e71f3
--- /dev/null
+++ b/docs/go/tutorials/rest-api.mdx
@@ -0,0 +1,413 @@
+---
+seotitle: How to build a REST API
+seodesc: Learn how to build and ship a REST API in just a few minutes, Encore.go. Go from zero to running API with this tutorial.
+title: Building a REST API
+subtitle: Learn how to build a URL shortener with a REST API and PostgreSQL database
+lang: go
+---
+
+In this tutorial you will create a REST API for a URL Shortener service. In a few short minutes, you'll learn how to:
+
+* Create REST APIs with Encore
+* Use PostgreSQL databases
+* Use the local development dashboard to test your app
+* Create and run tests
+
+This is the end result:
+
+
+
+
+
+
+To make it easier to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+
+
+## 1. Create a service and endpoint
+
+If you haven't already, create a new application by running `encore app create` and select `Empty app` as the template.
+
+If this is the first time you're using Encore, you'll be asked if you wish to create a free account.
+This is needed when you want Encore to manage functionality like secrets and handle cloud deployments (which we'll use later on in the tutorial).
+
+Now let's create a new `url` service.
+
+🥐 In your application's root folder, create a new folder `url` and create a new file `url.go` that looks like this:
+
+```go
+-- url/url.go --
+// Service url takes URLs, generates random short IDs, and stores the URLs in a database.
+package url
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+)
+
+type URL struct {
+ ID string // short-form URL id
+ URL string // complete URL, in long form
+}
+
+type ShortenParams struct {
+ URL string // the URL to shorten
+}
+
+// Shorten shortens a URL.
+//encore:api public method=POST path=/url
+func Shorten(ctx context.Context, p *ShortenParams) (*URL, error) {
+ id, err := generateID()
+ if err != nil {
+ return nil, err
+ }
+ return &URL{ID: id, URL: p.URL}, nil
+}
+
+// generateID generates a random short ID.
+func generateID() (string, error) {
+ var data [6]byte // 6 bytes of entropy
+ if _, err := rand.Read(data[:]); err != nil {
+ return "", err
+ }
+ return base64.RawURLEncoding.EncodeToString(data[:]), nil
+}
+```
+
+This sets up the `POST /url` endpoint (see the `//encore:api` annotation on the `Shorten` function).
+
+🥐 Let’s see if it works! Start your app by running `encore run`.
+
+You should see this:
+
+```output
+Your API is running at: http://localhost:4000
+Development Dashboard URL: http://localhost:9400
+4:19PM INF registered endpoint path=/url service=url endpoint=Shorten
+```
+
+🥐 Next, call your endpoint from the Local Development Dashboard at [http://localhost:9400](http://localhost:9400) and view a trace of the response.
+It should look like this:
+
+
+
+
+
+You can also call it from the terminal:
+
+```shell
+$ curl http://localhost:4000/url -d '{"URL": "https://encore.dev"}'
+```
+
+And you should see this:
+
+```json
+{
+ "ID": "5cJpBVRp",
+ "URL": "https://encore.dev"
+}
+```
+
+It works! There’s just one problem...
+
+Right now, we’re not actually storing the URL anywhere. That means we can generate shortened IDs but there’s no way to get back to the original URL! We need to store a mapping from the short ID to the complete URL.
+
+## 2. Save URLs in a database
+Fortunately, Encore makes it really easy to set up a PostgreSQL database to store our data. To do so, we first define a **database schema**, in the form of a migration file.
+
+🥐 Create a new folder named `migrations` inside the `url` folder. Then, inside the `migrations` folder, create an initial database migration file named `1_create_tables.up.sql`. The file name format is important (it must start with `1_` and end in `.up.sql`).
+
+🥐 Add the following contents to the file:
+
+```sql
+-- url/migrations/1_create_tables.up.sql --
+CREATE TABLE url (
+ id TEXT PRIMARY KEY,
+ original_url TEXT NOT NULL
+);
+```
+
+🥐 Next, go back to the `url/url.go` file and import the `encore.dev/storage/sqldb` package by modifying the import statement to become:
+
+```go
+HL url/url.go 5:5
+-- url/url.go --
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+
+ "encore.dev/storage/sqldb"
+)
+```
+
+🥐 Then let's define our database object by adding the following to `url/url.go`:
+
+```go
+-- url/url.go --
+// Define a database named 'url', using the database
+// migrations in the "./migrations" folder.
+
+var db = sqldb.NewDatabase("url", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+```
+
+🥐 Now, to insert data into our database, let’s create a helper function `insert`:
+
+```go
+-- url/url.go --
+// insert inserts a URL into the database.
+func insert(ctx context.Context, id, url string) error {
+ _, err := db.Exec(ctx, `
+ INSERT INTO url (id, original_url)
+ VALUES ($1, $2)
+ `, id, url)
+ return err
+}
+```
+
+🥐 Lastly, we can update our `Shorten` function to insert into the database:
+
+```go
+-- url/url.go --
+func Shorten(ctx context.Context, p *ShortenParams) (*URL, error) {
+ id, err := generateID()
+ if err != nil {
+ return nil, err
+ } else if err := insert(ctx, id, p.URL); err != nil {
+ return nil, err
+ }
+ return &URL{ID: id, URL: p.URL}, nil
+}
+```
+
+
+
+Before running your application, make sure you have [Docker](https://www.docker.com) installed and running. It's required to locally run Encore applications with databases.
+
+
+
+🥐 Next, start the application again with `encore run` and Encore automatically sets up your database.
+
+(In case your application won't run, check the [databases troubleshooting guide](/docs/develop/databases#troubleshooting).)
+
+You can verify that the database was created by opening the **Infra** tab in the local development dashboard at [localhost:9400](http://localhost:9400), which should something like this:
+
+
+
+🥐 Now let's call the API again from the local development dashboard, or from the terminal:
+
+```shell
+$ curl http://localhost:4000/url -d '{"URL": "https://encore.dev"}'
+```
+
+🥐 Finally, let's verify that it was saved in the database. You can do this by checking the trace in the local development dashboard, or you can run `encore db shell url` from the app root directory and inputting `select * from url;`:
+
+```shell
+$ encore db shell url
+psql (13.1, server 11.12)
+Type "help" for help.
+
+url=# select * from url;
+ id | original_url
+----------+--------------------
+ zr6RmZc4 | https://encore.dev
+(1 row)
+```
+
+That was easy!
+
+## 3. Add endpoint to retrieve URLs
+To complete our URL shortener API, let’s add the endpoint to retrieve a URL given its short id.
+
+🥐 Add this endpoint to `url/url.go`:
+
+```go
+-- url/url.go --
+// Get retrieves the original URL for the id.
+//encore:api public method=GET path=/url/:id
+func Get(ctx context.Context, id string) (*URL, error) {
+ u := &URL{ID: id}
+ err := db.QueryRow(ctx, `
+ SELECT original_url FROM url
+ WHERE id = $1
+ `, id).Scan(&u.URL)
+ return u, err
+}
+```
+
+Encore uses the `path=/url/:id` syntax to represent a path with a parameter. The `id` name corresponds to the parameter name in the function signature. In this case it is of type `string`, but you can also use other built-in types like `int` or `bool` if you want to restrict the values.
+
+🥐 We can make sure it works by reviewing the endpoint in the Service Catalog in the local development dashboard, where we can call it using the `id` you got in the previous step:
+
+
+
+
+
+You can also call it directly from the terminal:
+
+```shell
+$ curl http://localhost:4000/url/zr6RmZc4
+```
+
+You should now see this:
+
+```json
+{
+ "ID": "zr6RmZc4",
+ "URL": "https://encore.dev"
+}
+```
+
+It works! That's how you build REST APIs and use PostgreSQL databases in Encore.
+
+## 4. Add a test
+
+Before deployment, it is good practice to have tests to assure that
+the service works properly. Such tests including database access
+are easy to write.
+
+We've prepared a test to check that the whole cycle of shortening
+the URL, storing and then retrieving the original URL works.
+
+🥐 Save this in a separate file `url/url_test.go`.
+
+```go
+-- url/url_test.go --
+package url
+
+import (
+ "context"
+ "testing"
+)
+
+// TestShortenAndRetrieve - test that the shortened URL is stored and retrieved from database.
+func TestShortenAndRetrieve(t *testing.T) {
+ testURL := "https://github.com/encoredev/encore"
+ sp := ShortenParams{URL: testURL}
+ resp, err := Shorten(context.Background(), &sp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantURL := testURL
+ if resp.URL != wantURL {
+ t.Errorf("got %q, want %q", resp.URL, wantURL)
+ }
+
+ firstURL := resp
+ gotURL, err := Get(context.Background(), firstURL.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if *gotURL != *firstURL {
+ t.Errorf("got %v, want %v", *gotURL, *firstURL)
+ }
+}
+```
+
+🥐 Now run `encore test ./...` to verify that it's working.
+
+If you use the local development dashboard ([localhost:9400](http://localhost:9400)), you can even see traces for tests.
+
+## 5. Deploy
+
+
+
+### Self-hosting
+
+Encore supports building Docker images directly from the CLI, which can then be self-hosted on your own infrastructure of choice.
+
+If your app is using infrastructure resources, such as SQL databases, Pub/Sub, or metrics, you will need to configure your Docker image with the necessary configuration.
+Our URL shortener makes use of a PostgreSQL database, so we'll need to supply a [runtime configuration](/docs/go/self-host/configure-infra) so that our app knows how to connect to the database in the cloud.
+
+🥐 Create a new file `infra-config.json` in the root of your project with the following contents:
+
+```json
+{
+ "$schema": "https://encore.dev/schemas/infra.schema.json",
+ "sql_servers": [
+ {
+ "host": "my-db-host:5432",
+ "databases": {
+ "url": {
+ "username": "my-db-owner",
+ "password": {"$env": "DB_PASSWORD"}
+ }
+ }
+ }
+ ]
+}
+```
+
+The values in this configuration are just examples, you will need to replace them with the correct values for your database.
+
+🥐 Build a Docker image by running `encore build docker url-shortener:v1.0`.
+
+This will compile your application using the host machine and then produce a Docker image containing the compiled application.
+
+🥐 Upload the Docker image to the cloud provider of your choice and run it.
+
+
+
+
+
+### Encore Cloud (free)
+
+Encore Cloud provides automated infrastructure and DevOps. Deploy to a free development environment or to your own cloud account on AWS or GCP.
+
+### Create account
+
+Before deploying with Encore Cloud, you need to have a free Encore Cloud account and link your app to the platform. If you already have an account, you can move on to the next step.
+
+If you don’t have an account, the simplest way to get set up is by running `encore app create` and selecting **Y** when prompted to create a new account. Once your account is set up, continue creating a new app, selecting the `empty app` template.
+
+After creating the app, copy your project files into the new app directory, ensuring that you do not replace the `encore.app` file (this file holds a unique id which links your app to the platform).
+
+### Commit changes
+
+The final step before you deploy is to commit all changes to the project repo.
+
+🥐 Commit the new files to the project's git repo and trigger a deploy to Encore's free development cloud by running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore will now build and test your app, provision the needed infrastructure, and deploy your application to the cloud.
+
+After triggering the deployment, you will see a URL where you can view its progress in the [Encore Cloud dashboard](https://app.encore.cloud). It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+
+
+
+
+From there you can also see metrics, traces, and connect your own AWS or GCP account to use for production deployment.
+
+*Now you have a fully fledged backend running in the cloud, well done!*
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
+
+🥐 A great next step is to [integrate with GitHub](/docs/platform/integrations/github). Once you've linked with GitHub, Encore will automatically start building and running tests against your Pull Requests.
+
+
+
+## What's next
+
+Now that you know how to build a backend with a database, you're ready to let your creativity flow and begin building your next great idea!
+
+We're excited to hear what you're going to build with Encore, join the pioneering developer community on [Discord](/discord) and share your story.
diff --git a/docs/go/tutorials/slack-bot.md b/docs/go/tutorials/slack-bot.md
new file mode 100644
index 0000000000..fae54573af
--- /dev/null
+++ b/docs/go/tutorials/slack-bot.md
@@ -0,0 +1,312 @@
+---
+seotitle: Tutorial – How to build a Slack bot
+seodesc: Learn how to build a Slack bot with Enore.go, and get it running in the cloud in just a few minutes.
+title: Building a Slack bot
+subtitle: Learn how to build a Slack bot with an Encore backend
+lang: go
+---
+
+In this tutorial you will create a Slack bot that brings the greatness of the `cowsay` utility to Slack!
+
+
+
+This is the end result:
+
+
+
+
+
+
+To make it easier to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+
+
+## 1. Create your Encore application
+
+🥐 Create a new Encore application by running `encore app create` and select `Empty app` as the template.
+**Take a note of your app id, we'll need it in the next step.**
+
+## 2. Create a Slack app
+
+🥐 The first step is to create a new Slack app:
+
+1. Head over to [Slack's API site](https://api.slack.com/apps) and create a new app.
+2. When prompted, choose to create the app **from an app manifest**.
+3. Choose a workspace to install the app in.
+
+🥐 Enter the following manifest (replace `$APP_ID` in the URL below with your app id from above):
+
+```yaml
+_metadata:
+ major_version: 1
+display_information:
+ name: Encore Bot
+ description: Cowsay for the cloud age.
+features:
+ slash_commands:
+ - command: /cowsay
+ # Replace $APP_ID below
+ url: https://staging-$APP_ID.encr.app/cowsay
+ description: Say things with a flair!
+ usage_hint: your message here
+ should_escape: false
+ bot_user:
+ display_name: encore-bot
+ always_online: true
+oauth_config:
+ scopes:
+ bot:
+ - commands
+ - chat:write
+ - chat:write.public
+settings:
+ org_deploy_enabled: false
+ socket_mode_enabled: false
+ token_rotation_enabled: false
+```
+
+Once created, we're ready to move on with implementing our Encore endpoint!
+
+## 3. Implement the Slack endpoint
+
+Since Slack sends custom HTTP headers that we need to pay attention to, we're going to
+use a raw endpoint in Encore. For more information on this check out Slack's documentation
+on [Enabling interactivity with Slash Commands](https://api.slack.com/interactivity/slash-commands).
+
+🥐 In your Encore app, create a new directory named `slack` and create a file `slack/slack.go` with the following contents:
+
+```go
+-- slack/slack.go --
+// Service slack implements a cowsaw Slack bot.
+package slack
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+// cowart is the formatting string for printing the cow art.
+const cowart = "Moo! %s"
+
+//encore:api public raw path=/cowsay
+func Cowsay(w http.ResponseWriter, req *http.Request) {
+ text := req.FormValue("text")
+ data, _ := json.Marshal(map[string]string{
+ "response_type": "in_channel",
+ "text": fmt.Sprintf(cowart, text),
+ })
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(200)
+ w.Write(data)
+}
+```
+
+Let's try it out locally.
+
+🥐 Start your app with `encore run` and then call it in another terminal:
+
+```shell
+$ curl http://localhost:4000/cowsay -d 'text=Eat your greens!'
+{"response_type":"in_channel","text":"Moo! Eat your greens!"}
+```
+
+Looks great!
+
+🥐 Next, let's deploy it to the cloud:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Once deployed, we're ready to try our Slack command!
+
+🥐 Head over to the workspace you installed the app in and run `/cowsay Hello there`.
+You should see something like this:
+
+")
+
+And just like that we have a fully working Slack integration.
+
+## 4. Secure the webhook endpoint
+
+In order to get up and running quickly we ignored one important aspect for a production-ready Slack app:
+verifying that the webhook requests are actually coming from Slack. Let's do that now!
+
+The Slack documentation covers this really well on the [Verifying requests from Slack](https://api.slack.com/authentication/verifying-requests-from-slack) page.
+
+In short, what we need to do is:
+
+1. Save a shared secret that Slack provides us
+2. Use the secret to verify that the request comes from Slack, using HMAC (Hash-based Message Authentication Code).
+
+### Save the shared secret
+
+Let's define a secret using Encore's secrets management functionality.
+
+🥐 Add this to your `slack.go` file:
+
+```go
+-- slack/slack.go --
+var secrets struct {
+ SlackSigningSecret string
+}
+```
+
+🥐 Head over to the configuration section for your Slack app (go to [Your Apps](https://api.slack.com/apps) → select your app → Basic Information).
+
+🥐 Copy the **Signing Secret** and then run `encore secret set --type prod SlackSigningSecret` and paste the secret.
+
+🥐 For development you will also want to set `encore secret set --type dev,local,pr SlackSigningSecret`.
+You can use the same secret value or a placeholder value.
+
+### Compute the HMAC
+
+Go makes computing HMAC very straightforward, but it's still a fair amount of code.
+
+🥐 Add a few more imports to your file, so that it reads:
+
+```go
+-- slack/slack.go --
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "encore.dev/beta/errs"
+ "encore.dev/rlog"
+)
+```
+
+🥐 Next, we'll add the `verifyRequest` function:
+
+```go
+-- slack/slack.go --
+// verifyRequest verifies that a request is coming from Slack.
+func verifyRequest(req *http.Request) (body []byte, err error) {
+ eb := errs.B().Code(errs.InvalidArgument)
+ body, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, eb.Cause(err).Err()
+ }
+
+ // Compare timestamps to prevent replay attack
+ ts := req.Header.Get("X-Slack-Request-Timestamp")
+ threshold := int64(5 * 60)
+ n, _ := strconv.ParseInt(ts, 10, 64)
+ if diff := time.Now().Unix() - n; diff > threshold || diff < -threshold {
+ return body, eb.Msg("message not recent").Err()
+ }
+
+ // Compare HMAC signature
+ sig := req.Header.Get("X-Slack-Signature")
+ prefix := "v0="
+ if !strings.HasPrefix(sig, prefix) {
+ return body, eb.Msg("invalid signature").Err()
+ }
+ gotMac, _ := hex.DecodeString(sig[len(prefix):])
+
+ mac := hmac.New(sha256.New, []byte(secrets.SlackSigningSecret))
+ fmt.Fprintf(mac, "v0:%s:", ts)
+ mac.Write(body)
+ expectedMac := mac.Sum(nil)
+ if !hmac.Equal(gotMac, expectedMac) {
+ return body, eb.Msg("bad mac").Err()
+ }
+ return body, nil
+}
+```
+
+
+
+As you can see, this function needs to consume the whole HTTP body in order to compute the HMAC.
+
+This breaks the use of `req.FormValue("text")` that we used earlier, since it relies on reading the HTTP body. That's the reason we're returning the body from `verifyRequest`, so that we can parse the form values from that directly instead.
+
+
+
+We're now ready to verify the signature.
+
+🥐 Update the `Cowsay` function to look like this:
+
+```go
+-- slack/slack.go --
+//encore:api public raw path=/cowsay
+func Cowsay(w http.ResponseWriter, req *http.Request) {
+ body, err := verifyRequest(req)
+ if err != nil {
+ errs.HTTPError(w, err)
+ return
+ }
+ q, _ := url.ParseQuery(string(body))
+ text := q.Get("text")
+ data, _ := json.Marshal(map[string]string{
+ "response_type": "in_channel",
+ "text": fmt.Sprintf(cowart, text),
+ })
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(200)
+ w.Write(data)
+}
+```
+
+## 5. Put it all together and deploy
+
+Finally we're ready to put it all together.
+
+🥐 Add the `cowart` like so:
+
+```go
+-- slack/slack.go --
+const cowart = `
+ ________________________________________
+< %- 38s >
+ ----------------------------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\
+ ||----w |
+ || ||
+`
+```
+
+🥐 Finally, let's commit our changes and deploy it:
+
+```shell
+$ git add -A .
+$ git commit -m 'Verify webhook requests and improve art'
+$ git push encore
+```
+
+🥐 Once deployed, head back to Slack and run `/cowsay Hello there`.
+
+If everything is set up correctly, you should see:
+
+
+
+And there we go, a production-ready Slack bot in less than 100 lines of code.
+
+Well done!
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
diff --git a/docs/go/tutorials/uptime.md b/docs/go/tutorials/uptime.md
new file mode 100644
index 0000000000..f4384479af
--- /dev/null
+++ b/docs/go/tutorials/uptime.md
@@ -0,0 +1,955 @@
+---
+title: Building an Uptime Monitor
+subtitle: Learn how to build an event-driven uptime monitoring system
+seotitle: How to build an event-driven Uptime Monitoring System using Encore.go
+seodesc: Learn how to build an event-driven uptime monitoring tool using Go and Encore. Get your application running in the cloud in 30 minutes!
+lang: go
+---
+
+Want to be notified when your website goes down so you can fix it before your users notice?
+
+You need an uptime monitoring system. Sounds daunting? Don't worry,
+we'll build it with Encore in 30 minutes!
+
+The app will use an event-driven architecture and the final result will look like this:
+
+
+
+
+
+
+
+## 1. Create your Encore application
+
+
+
+To make it easier to follow along, we've laid out a trail of croissants to guide your way.
+Whenever you see a 🥐 it means there's something for you to do.
+
+
+
+🥐 Create a new Encore application, using this tutorial project's starting-point branch. This gives you a ready-to-go frontend to use.
+
+```shell
+$ encore app create uptime --example=github.com/encoredev/example-app-uptime/tree/starting-point
+```
+
+If this is the first time you're using Encore, you'll be asked if you wish to create a free account. This is needed when you want Encore to manage functionality like secrets and handle cloud deployments (which we'll use later on in the tutorial).
+
+When we're done we'll have a backend with an event-driven architecture, as seen below in the [automatically generated diagram](/docs/go/observability/encore-flow) where white boxes are services and black boxes are Pub/Sub topics:
+
+
+
+## 2. Create monitor service
+
+Let's start by creating the functionality to check if a website is currently up or down.
+Later we'll store this result in a database so we can detect when the status changes and
+send alerts.
+
+🥐 Create an Encore service named `monitor` containing a file named `ping.go`.
+
+```shell
+$ mkdir monitor
+$ touch monitor/ping.go
+```
+
+🥐 Add an Encore API endpoint named `Ping` that takes a URL as input and returns a response
+indicating whether the site is up or down.
+
+```go
+-- monitor/ping.go --
+// Service monitor checks if a website is up or down.
+package monitor
+
+import (
+ "context"
+ "net/http"
+ "strings"
+)
+
+// PingResponse is the response from the Ping endpoint.
+type PingResponse struct {
+ Up bool `json:"up"`
+}
+
+// Ping pings a specific site and determines whether it's up or down right now.
+//
+//encore:api public path=/ping/*url
+func Ping(ctx context.Context, url string) (*PingResponse, error) {
+ // If the url does not start with "http:" or "https:", default to "https:".
+ if !strings.HasPrefix(url, "http:") && !strings.HasPrefix(url, "https:") {
+ url = "https://" + url
+ }
+
+ // Make an HTTP request to check if it's up.
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return &PingResponse{Up: false}, nil
+ }
+ resp.Body.Close()
+
+ // 2xx and 3xx status codes are considered up
+ up := resp.StatusCode < 400
+ return &PingResponse{Up: up}, nil
+}
+```
+
+🥐 Let's try it! Run `encore run` in your terminal and you should see the service start up.
+
+Then open up the Local Development Dashboard at [http://localhost:9400](http://localhost:9400) and try calling the `monitor.ping` endpoint from the API Explorer, passing in `google.com` as the URL.
+
+You can then see the response, logs, and view a trace of the request. It will look like this:
+
+
+
+
+
+If you prefer to use the terminal instead run `curl http://localhost:4000/ping/google.com` in
+a new terminal instead. Either way you should see the response:
+
+```json
+{"up": true}
+```
+
+You can also try with `httpstat.us/400` and `some-non-existing-url.com` and it should respond with `{"up": false}`.
+(It's always a good idea to test the negative case as well.)
+
+### Add a test
+
+🥐 Let's write an automated test so we don't break this endpoint over time. Create the file `monitor/ping_test.go`
+with the content:
+
+```go
+-- monitor/ping_test.go --
+package monitor
+
+import (
+ "context"
+ "testing"
+)
+
+func TestPing(t *testing.T) {
+ ctx := context.Background()
+ tests := []struct {
+ URL string
+ Up bool
+ }{
+ {"encore.dev", true},
+ {"google.com", true},
+ // Test both with and without "https://"
+ {"httpbin.org/status/200", true},
+ {"https://httpbin.org/status/200", true},
+
+ // 4xx and 5xx should considered down.
+ {"httpbin.org/status/400", false},
+ {"https://httpbin.org/status/500", false},
+ // Invalid URLs should be considered down.
+ {"invalid://scheme", false},
+ }
+
+ for _, test := range tests {
+ resp, err := Ping(ctx, test.URL)
+ if err != nil {
+ t.Errorf("url %s: unexpected error: %v", test.URL, err)
+ } else if resp.Up != test.Up {
+ t.Errorf("url %s: got up=%v, want %v", test.URL, resp.Up, test.Up)
+ }
+ }
+}
+```
+
+🥐 Run `encore test ./...` to check that it all works as expected. You should see something like:
+
+```shell
+$ encore test ./...
+9:38AM INF starting request endpoint=Ping service=monitor test=TestPing
+9:38AM INF request completed code=ok duration=71.861792 endpoint=Ping http_code=200 service=monitor test=TestPing
+[... lots more lines ...]
+PASS
+ok encore.app/monitor 1.660
+```
+
+And if you open the local development dashboard at [localhost:9400](http://localhost:9400), you can also see traces for the tests.
+
+## 3. Create site service
+
+Next, we want to keep track of a list of websites to monitor.
+
+Since most of these APIs will be simple "CRUD" (Create/Read/Update/Delete) endpoints, let's build this service using [GORM](https://gorm.io/), an ORM
+library that makes building CRUD endpoints really simple.
+
+🥐 Let's create a new service named `site` with a SQL database. To do so, create a new directory `site` in the application root with `migrations` folder inside that folder:
+
+```shell
+$ mkdir site
+$ mkdir site/migrations
+```
+
+🥐 Add a database migration file inside that folder, named `1_create_tables.up.sql`.
+The file name is important (it must look something like `1_.up.sql`).
+
+Add the following contents:
+
+```sql
+-- site/migrations/1_create_tables.up.sql --
+CREATE TABLE sites (
+ id BIGSERIAL PRIMARY KEY,
+ url TEXT NOT NULL
+);
+```
+
+🥐 Next, install the GORM library and PostgreSQL driver:
+
+```shell
+$ go get -u gorm.io/gorm gorm.io/driver/postgres
+```
+
+Now let's create the `site` service itself. To do this we'll use Encore's support for [dependency injection](https://encore.dev/docs/go/how-to/dependency-injection) to inject the GORM database connection.
+
+🥐 Create `site/service.go` with the contents:
+
+```go
+-- site/service.go --
+// Service site keeps track of which sites to monitor.
+package site
+
+import (
+ "encore.dev/storage/sqldb"
+ "gorm.io/driver/postgres"
+ "gorm.io/gorm"
+)
+
+//encore:service
+type Service struct {
+ db *gorm.DB
+}
+
+// Define a database named 'site', using the database migrations
+// in the "./migrations" folder. Encore automatically provisions,
+// migrates, and connects to the database.
+var db = sqldb.NewDatabase("site", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+
+// initService initializes the site service.
+// It is automatically called by Encore on service startup.
+func initService() (*Service, error) {
+ db, err := gorm.Open(postgres.New(postgres.Config{
+ Conn: db.Stdlib(),
+ }))
+ if err != nil {
+ return nil, err
+ }
+ return &Service{db: db}, nil
+}
+```
+
+🥐 With that, we're now ready to create our CRUD endpoints. Create the following files:
+
+```go
+-- site/get.go --
+package site
+
+import "context"
+
+// Site describes a monitored site.
+type Site struct {
+ // ID is a unique ID for the site.
+ ID int `json:"id"`
+ // URL is the site's URL.
+ URL string `json:"url"`
+}
+
+// Get gets a site by id.
+//
+//encore:api public method=GET path=/site/:siteID
+func (s *Service) Get(ctx context.Context, siteID int) (*Site, error) {
+ var site Site
+ if err := s.db.Where("id = $1", siteID).First(&site).Error; err != nil {
+ return nil, err
+ }
+ return &site, nil
+}
+-- site/add.go --
+package site
+
+import "context"
+
+// AddParams are the parameters for adding a site to be monitored.
+type AddParams struct {
+ // URL is the URL of the site. If it doesn't contain a scheme
+ // (like "http:" or "https:") it defaults to "https:".
+ URL string `json:"url"`
+}
+
+// Add adds a new site to the list of monitored websites.
+//
+//encore:api public method=POST path=/site
+func (s *Service) Add(ctx context.Context, p *AddParams) (*Site, error) {
+ site := &Site{URL: p.URL}
+ if err := s.db.Create(site).Error; err != nil {
+ return nil, err
+ }
+ return site, nil
+}
+-- site/list.go --
+package site
+
+import "context"
+
+type ListResponse struct {
+ // Sites is the list of monitored sites.
+ Sites []*Site `json:"sites"`
+}
+
+// List lists the monitored websites.
+//
+//encore:api public method=GET path=/site
+func (s *Service) List(ctx context.Context) (*ListResponse, error) {
+ var sites []*Site
+ if err := s.db.Find(&sites).Error; err != nil {
+ return nil, err
+ }
+ return &ListResponse{Sites: sites}, nil
+}
+-- site/delete.go --
+package site
+
+import "context"
+
+// Delete deletes a site by id.
+//
+//encore:api public method=DELETE path=/site/:siteID
+func (s *Service) Delete(ctx context.Context, siteID int) error {
+ return s.db.Delete(&Site{ID: siteID}).Error
+}
+```
+
+🥐 Now make sure you have [Docker](https://docker.com) installed and running, and then restart `encore run` to cause the `site` database to be created by Encore.
+
+You can verify that the database was created by looking at your application's Flow architecture diagram in the local development dashboard at [localhost:9400](http://localhost:9400), and then use the Service Catalog to call the `site.Add` endpoint:
+
+
+
+
+
+Or you can call `site.Add` from the terminal:
+
+```shell
+$ curl -X POST 'http://localhost:4000/site' -d '{"url": "https://encore.dev"}'
+{
+ "id": 1,
+ "url": "https://encore.dev"
+}
+```
+
+## 4. Record uptime checks
+
+In order to notify when a website goes down or comes back up, we need to track the previous state it was in.
+
+🥐 To do so, let's add a database to the `monitor` service as well.
+Create the directory `monitor/migrations` and the file `monitor/migrations/1_create_tables.up.sql`:
+
+```sql
+-- monitor/migrations/1_create_tables.up.sql --
+CREATE TABLE checks (
+ id BIGSERIAL PRIMARY KEY,
+ site_id BIGINT NOT NULL,
+ up BOOLEAN NOT NULL,
+ checked_at TIMESTAMP WITH TIME ZONE NOT NULL
+);
+```
+
+We'll insert a database row every time we check if a site is up.
+
+🥐 Add a new endpoint `Check` to the `monitor` service, that
+takes in a Site ID, pings the site, and inserts a database row
+in the `checks` table.
+
+For this service we'll use Encore's [`sqldb` package](https://encore.dev/docs/go/primitives/databases#querying-databases)
+instead of GORM (in order to showcase both approaches).
+
+```go
+-- monitor/check.go --
+package monitor
+
+import (
+ "context"
+
+ "encore.app/site"
+ "encore.dev/storage/sqldb"
+)
+
+// Check checks a single site.
+//
+//encore:api public method=POST path=/check/:siteID
+func Check(ctx context.Context, siteID int) error {
+ site, err := site.Get(ctx, siteID)
+ if err != nil {
+ return err
+ }
+ result, err := Ping(ctx, site.URL)
+ if err != nil {
+ return err
+ }
+ _, err = db.Exec(ctx, `
+ INSERT INTO checks (site_id, up, checked_at)
+ VALUES ($1, $2, NOW())
+ `, site.ID, result.Up)
+ return err
+}
+
+// Define a database named 'monitor', using the database migrations
+// in the "./migrations" folder. Encore automatically provisions,
+// migrates, and connects to the database.
+var db = sqldb.NewDatabase("monitor", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+```
+
+
+🥐 Restart `encore run` to cause the `monitor` database to be created.
+
+We can again verify that the database was created in the Flow diagram, and also see the dependency between the `monitor` service and the `site` service that we just added.
+
+We can then call the `monitor.Check` endpoint using the id `1` that we got in the last step, and view the trace where we see the database interactions.
+
+It will look something like this:
+
+
+
+
+
+🥐 You can also inspect the database using `encore db shell ` to make sure everything worked:
+
+```shell
+$ encore db shell monitor
+psql (14.4, server 14.2)
+Type "help" for help.
+
+monitor=> SELECT * FROM checks;
+ id | site_id | up | checked_at
+----+---------+----+-------------------------------
+ 1 | 1 | t | 2022-10-21 09:58:30.674265+00
+```
+
+If that's what you see, everything's working great!
+
+### Add a cron job to check all sites
+
+We now want to regularly check all the tracked sites so we can
+respond in case any of them go down.
+
+We'll create a new `CheckAll` API endpoint in the `monitor` service
+that will list all the tracked sites and check all of them.
+
+🥐 Let's extract some of the functionality we wrote for the
+`Check` endpoint into a separate function, like so:
+
+```go
+-- monitor/check.go --
+// Check checks a single site.
+//
+//encore:api public method=POST path=/check/:siteID
+func Check(ctx context.Context, siteID int) error {
+ site, err := site.Get(ctx, siteID)
+ if err != nil {
+ return err
+ }
+ return check(ctx, site)
+}
+
+func check(ctx context.Context, site *site.Site) error {
+ result, err := Ping(ctx, site.URL)
+ if err != nil {
+ return err
+ }
+ _, err = db.Exec(ctx, `
+ INSERT INTO checks (site_id, up, checked_at)
+ VALUES ($1, $2, NOW())
+ `, site.ID, result.Up)
+ return err
+}
+```
+
+Now we're ready to create our new `CheckAll` endpoint.
+
+🥐 Create the new `CheckAll` endpoint inside `monitor/check.go`:
+
+```go
+-- monitor/check.go --
+import "golang.org/x/sync/errgroup"
+
+// CheckAll checks all sites.
+//
+//encore:api public method=POST path=/checkall
+func CheckAll(ctx context.Context) error {
+ // Get all the tracked sites.
+ resp, err := site.List(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Check up to 8 sites concurrently.
+ g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(8)
+ for _, site := range resp.Sites {
+ site := site // capture for closure
+ g.Go(func() error {
+ return check(ctx, site)
+ })
+ }
+ return g.Wait()
+}
+```
+
+This uses [an errgroup](https://pkg.go.dev/golang.org/x/sync/errgroup) to check up to 8 sites concurrently, aborting early if
+we encounter any error. (Note that a website being down is
+not treated as an error.)
+
+🥐 Run `go get golang.org/x/sync/errgroup` to install that dependency.
+
+🥐 Now that we have a `CheckAll` endpoint, define a [cron job](https://encore.dev/docs/go/primitives/cron-jobs) to automatically call it every 1 hour (since this is an example, we don't need to go too crazy and check every minute):
+
+```go
+-- monitor/check.go --
+import "encore.dev/cron"
+
+// Check all tracked sites every 1 hour.
+var _ = cron.NewJob("check-all", cron.JobConfig{
+ Title: "Check all sites",
+ Endpoint: CheckAll,
+ Every: 1 * cron.Hour,
+})
+```
+
+
+
+Cron jobs are not triggered when running the application locally but work when deploying the application to a cloud environment.
+
+
+
+The frontend needs a way to list all sites and display if they are up or down.
+
+🥐 Add a file in the `monitor` service and name it `status.go`. Add the following code:
+
+```go
+-- monitor/status.go --
+package monitor
+
+import (
+ "context"
+ "time"
+)
+
+// SiteStatus describes the current status of a site
+// and when it was last checked.
+type SiteStatus struct {
+ Up bool `json:"up"`
+ CheckedAt time.Time `json:"checked_at"`
+}
+
+// StatusResponse is the response type from the Status endpoint.
+type StatusResponse struct {
+ // Sites contains the current status of all sites,
+ // keyed by the site ID.
+ Sites map[int]SiteStatus `json:"sites"`
+}
+
+// Status checks the current up/down status of all monitored sites.
+//
+//encore:api public method=GET path=/status
+func Status(ctx context.Context) (*StatusResponse, error) {
+ rows, err := db.Query(ctx, `
+ SELECT DISTINCT ON (site_id) site_id, up, checked_at
+ FROM checks
+ ORDER BY site_id, checked_at DESC
+ `)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ result := make(map[int]SiteStatus)
+ for rows.Next() {
+ var siteID int
+ var status SiteStatus
+ if err := rows.Scan(&siteID, &status.Up, &status.CheckedAt); err != nil {
+ return nil, err
+ }
+ result[siteID] = status
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return &StatusResponse{Sites: result}, nil
+}
+```
+
+Now try visiting http://localhost:4000/frontend in your browser again. This time you should see a working frontend that lists all sites and their current status.
+
+## 5. Deploy
+
+To try out your uptime monitor for real, let's deploy it to the cloud.
+
+
+
+### Self-hosting
+
+Encore supports building Docker images directly from the CLI, which can then be self-hosted on your own infrastructure of choice.
+
+If your app is using infrastructure resources, such as SQL databases, Pub/Sub, or metrics, you will need to supply a [runtime configuration](/docs/go/self-host/configure-infra) your Docker image.
+
+🥐 Create a new file `infra-config.json` in the root of your project with the following contents:
+
+```json
+{
+ "$schema": "https://encore.dev/schemas/infra.schema.json",
+ "sql_servers": [
+ {
+ "host": "my-db-host:5432",
+ "databases": {
+ "monitor": {
+ "username": "my-db-owner",
+ "password": {"$env": "DB_PASSWORD"}
+ },
+ "site": {
+ "username": "my-db-owner",
+ "password": {"$env": "DB_PASSWORD"}
+ }
+ }
+ }
+ ]
+}
+```
+
+The values in this configuration are just examples, you will need to replace them with the correct values for your database.
+
+🥐 Build a Docker image by running `encore build docker uptime:v1.0`.
+
+This will compile your application using the host machine and then produce a Docker image containing the compiled application.
+
+🥐 Upload the Docker image to the cloud provider of your choice and run it.
+
+
+
+
+
+### Encore Cloud (free)
+
+Encore Cloud provides automated infrastructure and DevOps. Deploy to a free development environment or to your own cloud account on AWS or GCP.
+
+### Create account
+
+Before deploying with Encore Cloud, you need to have a free Encore Cloud account and link your app to the platform. If you already have an account, you can move on to the next step.
+
+If you don’t have an account, the simplest way to get set up is by running `encore app create` and selecting **Y** when prompted to create a new account. Once your account is set up, continue creating a new app, selecting the `empty app` template.
+
+After creating the app, copy your project files into the new app directory, ensuring that you do not replace the `encore.app` file (this file holds a unique id which links your app to the platform).
+
+### Commit changes
+
+Encore comes with built-in CI/CD, and the deployment process is as simple as a `git push`.
+(You can also integrate with GitHub to activate per Pull Request Preview Environments, learn more in the [CI/CD docs](/docs/platform/deploy/deploying).)
+
+🥐 Now, let's deploy your app to Encore's free development cloud by running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Initial commit'
+$ git push encore
+```
+
+Encore will now build and test your app, provision the needed infrastructure, and deploy your application to the cloud.
+
+After triggering the deployment, you will see a URL where you can view its progress in the [Encore Cloud dashboard](https://app.encore.cloud). It will look something like: `https://app.encore.cloud/$APP_ID/deploys/...`
+
+From the Cloud Dashboard you can also see metrics, trigger Cron Jobs, see traces, and later connect your own AWS or GCP account to use for deployment.
+
+
+
+
+
+🥐 When the deploy has finished, you can try out your uptime monitor by going to `https://staging-$APP_ID.encr.app/frontend`.
+
+*You now have an Uptime Monitor running in the cloud, well done!*
+
+
+
+## 6. Publish Pub/Sub events when a site goes down
+
+Hold on, an uptime monitoring system isn't very useful if it doesn't
+actually notify you when a site goes down.
+
+To do so let's add a [Pub/Sub topic](https://encore.dev/docs/go/primitives/pubsub) on which we'll publish a message every time a site transitions from being up to being down, or vice versa.
+
+🥐 Define the topic using Encore's Pub/Sub package in a new file, `monitor/alerts.go`:
+
+```go
+-- monitor/alerts.go --
+package monitor
+
+import "encore.dev/pubsub"
+
+// TransitionEvent describes a transition of a monitored site
+// from up->down or from down->up.
+type TransitionEvent struct {
+ // Site is the monitored site in question.
+ Site *site.Site `json:"site"`
+ // Up specifies whether the site is now up or down (the new value).
+ Up bool `json:"up"`
+}
+
+// TransitionTopic is a pubsub topic with transition events for when a monitored site
+// transitions from up->down or from down->up.
+var TransitionTopic = pubsub.NewTopic[*TransitionEvent]("uptime-transition", pubsub.TopicConfig{
+ DeliveryGuarantee: pubsub.AtLeastOnce,
+})
+```
+
+Now let's publish a message on the `TransitionTopic` if a site's up/down
+state differs from the previous measurement.
+
+🥐 Create a `getPreviousMeasurement` function to report the last up/down state:
+
+```go
+-- monitor/alerts.go --
+import (
+ "encore.dev/storage/sqldb"
+ "errors"
+ "context"
+)
+// getPreviousMeasurement reports whether the given site was
+// up or down in the previous measurement.
+func getPreviousMeasurement(ctx context.Context, siteID int) (up bool, err error) {
+ err = db.QueryRow(ctx, `
+ SELECT up FROM checks
+ WHERE site_id = $1
+ ORDER BY checked_at DESC
+ LIMIT 1
+ `, siteID).Scan(&up)
+
+ if errors.Is(err, sqldb.ErrNoRows) {
+ // There was no previous ping; treat this as if the site was up before
+ return true, nil
+ } else if err != nil {
+ return false, err
+ }
+ return up, nil
+}
+```
+
+🥐 Now add a function to conditionally publish a message if the up/down state differs:
+
+```go
+-- monitor/alerts.go --
+import "encore.app/site"
+
+func publishOnTransition(ctx context.Context, site *site.Site, isUp bool) error {
+ wasUp, err := getPreviousMeasurement(ctx, site.ID)
+ if err != nil {
+ return err
+ }
+ if isUp == wasUp {
+ // Nothing to do
+ return nil
+ }
+ _, err = TransitionTopic.Publish(ctx, &TransitionEvent{
+ Site: site,
+ Up: isUp,
+ })
+ return err
+}
+```
+
+🥐 Finally modify the `check` function to call this function:
+
+```go
+-- monitor/check.go --
+func check(ctx context.Context, site *site.Site) error {
+ result, err := Ping(ctx, site.URL)
+ if err != nil {
+ return err
+ }
+
+ // Publish a Pub/Sub message if the site transitions
+ // from up->down or from down->up.
+ if err := publishOnTransition(ctx, site, result.Up); err != nil {
+ return err
+ }
+
+ _, err = db.Exec(ctx, `
+ INSERT INTO checks (site_id, up, checked_at)
+ VALUES ($1, $2, NOW())
+ `, site.ID, result.Up)
+ return err
+}
+```
+
+Now the monitoring system will publish messages on the `TransitionTopic`
+whenever a monitored site transitions from up->down or from down->up.
+It doesn't know or care who actually listens to these messages.
+
+The truth is right now nobody does. So let's fix that by adding
+a Pub/Sub subscriber that posts these events to Slack.
+
+## 7. Send Slack notifications when a site goes down
+
+🥐 Start by creating a Slack service containing the following:
+
+```go
+-- slack/slack.go --
+package slack
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+type NotifyParams struct {
+ // Text is the Slack message text to send.
+ Text string `json:"text"`
+}
+
+// Notify sends a Slack message to a pre-configured channel using a
+// Slack Incoming Webhook (see https://api.slack.com/messaging/webhooks).
+//
+//encore:api private
+func Notify(ctx context.Context, p *NotifyParams) error {
+ reqBody, err := json.Marshal(p)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", secrets.SlackWebhookURL, bytes.NewReader(reqBody))
+ if err != nil {
+ return err
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("notify slack: %s: %s", resp.Status, body)
+ }
+ return nil
+}
+
+var secrets struct {
+ // SlackWebhookURL defines the Slack webhook URL to send
+ // uptime notifications to.
+ SlackWebhookURL string
+}
+```
+
+🥐 Now go to a Slack community of your choice where you have the permission
+to create a new Incoming Webhook.
+
+🥐 Once you have the Webhook URL, set it as an Encore secret:
+
+```shell
+$ encore secret set --type dev,local,pr SlackWebhookURL
+Enter secret value: *****
+Successfully updated development secret SlackWebhookURL.
+```
+
+🥐 Test the `slack.Notify` endpoint by calling it via cURL:
+
+```shell
+$ curl 'http://localhost:4000/slack.Notify' -d '{"Text": "Testing Slack webhook"}'
+```
+You should see the *Testing Slack webhook* message appear in the Slack channel you designated for the webhook.
+
+🥐 When it works it's time to add a Pub/Sub subscriber to automatically notify Slack when a monitored site goes up or down. Add the following:
+
+```go
+-- slack/slack.go --
+import (
+ "encore.dev/pubsub"
+ "encore.app/monitor"
+)
+
+var _ = pubsub.NewSubscription(monitor.TransitionTopic, "slack-notification", pubsub.SubscriptionConfig[*monitor.TransitionEvent]{
+ Handler: func(ctx context.Context, event *monitor.TransitionEvent) error {
+ // Compose our message.
+ msg := fmt.Sprintf("*%s is down!*", event.Site.URL)
+ if event.Up {
+ msg = fmt.Sprintf("*%s is back up.*", event.Site.URL)
+ }
+
+ // Send the Slack notification.
+ return Notify(ctx, &NotifyParams{Text: msg})
+ },
+})
+```
+
+## 8. Deploy your finished Uptime Monitor
+
+Now you're ready to deploy your finished Uptime Monitor, complete with a Slack integration.
+
+
+
+### Self-hosting
+
+Because we have added more infrastructure to our app, we need to [update the configuration](/docs/go/self-host/configure-infra) in our `infra-config.json` to include the new Pub/Sub topic and subscription as well as how we should set the `SlackWebhookURL` secret.
+
+🥐 Update your `ìnfra-config.json` to reflect the new infrastructure.
+
+🥐 Build a Docker image by running `encore build docker uptime:v2.0`.
+
+🥐 Upload the Docker image to the cloud provider and run it.
+
+
+
+
+
+### Encore Cloud (free)
+
+🥐 As before, deploying your app to the cloud is as simple as running:
+
+```shell
+$ git add -A .
+$ git commit -m 'Add slack integration'
+$ git push encore
+```
+
+### Celebrate with fireworks
+
+Now that your app is running in the cloud, let's celebrate with some fireworks:
+
+🥐 In the Cloud Dashboard, open the Command Menu by pressing **Cmd + K** (Mac) or **Ctrl + K** (Windows/Linux).
+
+_From here you can easily access all Cloud Dashboard features and for example jump straight to specific services in the Service Catalog or view Traces for specific endpoints._
+
+🥐 Type `fireworks` in the Command Menu and press enter. Sit back and enjoy the show!
+
+
+
+
+
+## Conclusion
+
+We've now built a fully functioning uptime monitoring system.
+
+If we may say so ourselves (and we may; it's our documentation after all)
+it's pretty remarkable how much we've accomplished in such little code:
+
+* We've built three different services (`site`, `monitor`, and `slack`)
+* We've added two databases (to the `site` and `monitor` services) for tracking monitored sites and the monitoring results
+* We've added a cron job for automatically checking the sites every hour
+* We've set up a Pub/Sub topic to decouple the monitoring system from the Slack notifications
+* We've added a Slack integration, using secrets to securely store the webhook URL, listening to a Pub/Sub subscription for up/down transition events
+
+All of this in just a bit over 300 lines of code. It's time to lean back
+and take a sip of your favorite beverage, safe in the knowledge you'll
+never be caught unaware of a website going down suddenly.
diff --git a/docs/menu.cue b/docs/menu.cue
new file mode 100644
index 0000000000..8ab16661ea
--- /dev/null
+++ b/docs/menu.cue
@@ -0,0 +1,1301 @@
+#Menu: #RootMenu | #SubMenu
+
+#RootMenu: {
+ kind: "rootmenu"
+ items: [...#MenuItem]
+}
+
+#SubMenu: {
+ kind: "submenu"
+ // Menu title to display when this submenu is active.
+ title: string
+
+ // ID for the submenu, used for tracking active menu in frontend.
+ id: string
+
+ // Additional presentation options for the menu item.
+ presentation?: #Presentation
+
+ back: {
+ // Text to display in the back button.
+ text: string
+
+ // Path to the page to navigate to when the back button is clicked.
+ path: string
+ }
+
+ items: [...#MenuItem]
+}
+
+// Represents an item in a menu.
+#MenuItem: #SectionMenuItem | #BasicMenuItem | #NavMenuItem | #AccordionMenuItem
+
+#SectionMenuItem: {
+ // Represents a menu section that can't be navigated to.
+ kind: "section"
+
+ // The text to display in the menu.
+ text: string
+
+ // Menu items to show for this section.
+ items: [...#MenuItem]
+}
+
+#BasicMenuItem: {
+ // Represents a basic page that can be navigated to.
+ kind: "basic"
+
+ // The text to display in the menu.
+ text: string
+
+ // The URL path to the page.
+ path: string
+
+ // The file to render when viewing this page.
+ file: string
+
+ // Inline menu to show when viewing this page.
+ inline_menu?: [...#MenuItem]
+
+ // hidden, if true, indicates the page exists but is hidden in the menu.
+ // It can be navigated to directly, and will be show as "next page"/"prev page"
+ // in the per-page navigation.
+ hidden?: true
+}
+
+#NavMenuItem: {
+ // Represents a page that can be navigated to, that has a menu
+ // that replaces the navigation when viewing this page.
+ kind: "nav"
+
+ // The text to display in the menu.
+ text: string
+
+ // The URL path to the page.
+ path: string
+
+ // The file to render when viewing this page.
+ file: string
+
+ // The items to display in the submenu.
+ submenu: #SubMenu
+
+ // Additional presentation options for the menu item.
+ presentation?: #Presentation
+}
+
+#Presentation: {
+ // Icon to display next to the menu item.
+ icon?: string
+ style: "card" | *"basic"
+}
+
+#AccordionMenuItem: {
+ kind: "accordion"
+ text: string
+ // If the accordion is open by default.
+ defaultExpanded: bool | *false
+
+ // The items to display in the accordion.
+ accordion: [...#MenuItem]
+}
+
+// The root object is a #RootMenu.
+#RootMenu
+{
+ items: [
+ {
+ kind: "nav"
+ text: "Encore.go"
+ path: "/go"
+ file: "go/overview"
+ submenu: #EncoreGO
+ presentation: {
+ icon: "golang"
+ style: "card"
+ }
+ }, {
+ kind: "nav"
+ text: "Encore.ts"
+ path: "/ts"
+ file: "ts/overview"
+ submenu: #EncoreTS
+ presentation: {
+ icon: "typescript"
+ style: "card"
+ }
+ }, {
+ kind: "nav"
+ text: "Encore Cloud"
+ path: "/platform"
+ file: "platform/overview"
+ submenu: #EncorePlatform
+ presentation: {
+ icon: "typescript"
+ style: "card"
+ }
+ },
+ ]
+}
+
+#EncoreGO: #SubMenu & {
+ title: "Encore.go"
+ id: "go"
+ presentation: {
+ icon: "golang"
+ }
+ back: {
+ text: ""
+ path: ""
+ }
+ items: [
+ {
+ kind: "section"
+ text: "Get Started"
+ items: [{
+ kind: "basic"
+ text: "Installation"
+ path: "/go/install"
+ file: "go/install"
+ }, {
+ kind: "basic"
+ text: "Quick Start"
+ path: "/go/quick-start"
+ file: "go/quick-start"
+ }, {
+ kind: "basic"
+ text: "FAQ"
+ path: "/go/faq"
+ file: "go/faq"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Concepts"
+ items: [{
+ kind: "basic"
+ text: "Benefits"
+ path: "/go/concepts/benefits"
+ file: "go/concepts/benefits"
+ }, {
+ kind: "basic"
+ text: "Application Model"
+ path: "/go/concepts/application-model"
+ file: "go/concepts/application-model"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Tutorials"
+ items: [{
+ kind: "basic"
+ text: "Building a REST API"
+ path: "/go/tutorials/rest-api"
+ file: "go/tutorials/rest-api"
+ }, {
+ kind: "basic"
+ text: "Building an Uptime Monitor"
+ path: "/go/tutorials/uptime"
+ file: "go/tutorials/uptime"
+ }, {
+ kind: "basic"
+ text: "Building a GraphQL API"
+ path: "/go/tutorials/graphql"
+ file: "go/tutorials/graphql"
+ }, {
+ kind: "basic"
+ text: "Building a Slack bot"
+ path: "/go/tutorials/slack-bot"
+ file: "go/tutorials/slack-bot"
+ }, {
+ kind: "basic"
+ text: "Building a Meeting Notes app"
+ path: "/go/tutorials/meeting-notes"
+ file: "go/tutorials/meeting-notes"
+ }, {
+ kind: "basic"
+ text: "Building a Booking System"
+ path: "/go/tutorials/booking-system"
+ file: "go/tutorials/booking-system"
+ }, {
+ kind: "basic"
+ text: "Building an Incident Management tool"
+ path: "/go/tutorials/incident-management-tool"
+ file: "go/tutorials/incident-management-tool"
+ hidden: true
+ }]
+ },
+ {
+ kind: "section"
+ text: "Primitives"
+ items: [{
+ kind: "basic"
+ text: "App Structure"
+ path: "/go/primitives/app-structure"
+ file: "go/primitives/app-structure"
+ }, {
+ kind: "basic"
+ text: "Services"
+ path: "/go/primitives/services"
+ file: "go/primitives/services"
+ }, {
+ kind: "accordion"
+ text: "APIs"
+ accordion: [{
+ kind: "basic"
+ text: "Defining APIs"
+ path: "/go/primitives/defining-apis"
+ file: "go/primitives/defining-apis"
+ }, {
+ kind: "basic"
+ text: "API Calls"
+ path: "/go/primitives/api-calls"
+ file: "go/primitives/api-calls"
+ }, {
+ kind: "basic"
+ text: "Raw Endpoints"
+ path: "/go/primitives/raw-endpoints"
+ file: "go/primitives/raw-endpoints"
+ }, {
+ kind: "basic"
+ text: "Service Structs"
+ path: "/go/primitives/service-structs"
+ file: "go/primitives/service-structs"
+ }, {
+ kind: "basic"
+ text: "API Errors"
+ path: "/go/primitives/api-errors"
+ file: "go/primitives/api-errors"
+ }]
+ }, {
+ kind: "accordion"
+ text: "Databases"
+ accordion: [{
+ kind: "basic"
+ text: "Using SQL databases"
+ path: "/go/primitives/databases"
+ file: "go/primitives/databases"
+ }, {
+ kind: "basic"
+ text: "Change SQL database schema"
+ path: "/go/primitives/change-db-schema"
+ file: "go/primitives/change-db-schema"
+ }, {
+ kind: "basic"
+ text: "Integrate with existing databases"
+ path: "/go/primitives/connect-existing-db"
+ file: "go/primitives/connect-existing-db"
+ }, {
+ kind: "basic"
+ text: "Insert test data in a database"
+ path: "/go/primitives/insert-test-data-db"
+ file: "go/primitives/insert-test-data-db"
+ }, {
+ kind: "basic"
+ text: "Share databases between services"
+ path: "/go/primitives/share-db-between-services"
+ file: "go/primitives/share-db-between-services"
+ }, {
+ kind: "basic"
+ text: "PostgreSQL Extensions"
+ path: "/go/primitives/databases/extensions"
+ file: "go/primitives/database-extensions"
+ }, {
+ kind: "basic"
+ text: "Troubleshooting"
+ path: "/go/primitives/databases/troubleshooting"
+ file: "go/primitives/database-troubleshooting"
+ }]
+ }, {
+ kind: "basic"
+ text: "Object Storage"
+ path: "/go/primitives/object-storage"
+ file: "go/primitives/object-storage"
+ }, {
+ kind: "basic"
+ text: "Cron Jobs"
+ path: "/go/primitives/cron-jobs"
+ file: "go/primitives/cron-jobs"
+ }, {
+ kind: "basic"
+ text: "Pub/Sub"
+ path: "/go/primitives/pubsub"
+ file: "go/primitives/pubsub"
+ }, {
+ kind: "basic"
+ text: "Caching"
+ path: "/go/primitives/caching"
+ file: "go/primitives/caching"
+ }, {
+ kind: "basic"
+ text: "Secrets"
+ path: "/go/primitives/secrets"
+ file: "go/primitives/secrets"
+ }, {
+ kind: "basic"
+ text: "Code Snippets"
+ path: "/go/primitives/code-snippets"
+ file: "go/primitives/code-snippets"
+ }]
+ }, {
+ kind: "section"
+ text: "Development"
+ items: [{
+ kind: "basic"
+ text: "Authentication"
+ path: "/go/develop/auth"
+ file: "go/develop/auth"
+ }, {
+ kind: "basic"
+ text: "Configuration"
+ path: "/go/develop/config"
+ file: "go/develop/config"
+ }, {
+ kind: "basic"
+ text: "CORS"
+ path: "/go/develop/cors"
+ file: "go/develop/cors"
+ }, {
+ kind: "basic"
+ text: "Metadata"
+ path: "/go/develop/metadata"
+ file: "go/develop/metadata"
+ }, {
+ kind: "basic"
+ text: "Middleware"
+ path: "/go/develop/middleware"
+ file: "go/develop/middleware"
+ }, {
+ kind: "basic"
+ text: "Testing"
+ path: "/go/develop/testing"
+ file: "go/develop/testing"
+ }, {
+ kind: "basic"
+ text: "Mocking"
+ path: "/go/develop/testing/mocking"
+ file: "go/develop/mocking"
+ }, {
+ kind: "basic"
+ text: "Validation"
+ path: "/go/develop/validation"
+ file: "go/develop/validation"
+ }]
+ },
+ {
+ kind: "section"
+ text: "CLI"
+ items: [{
+ kind: "basic"
+ text: "CLI Reference"
+ path: "/go/cli/cli-reference"
+ file: "go/cli/cli-reference"
+ }, {
+ kind: "basic"
+ text: "Client Generation"
+ path: "/go/cli/client-generation"
+ file: "go/cli/client-generation"
+ }, {
+ kind: "basic"
+ text: "Infra Namespaces"
+ path: "/go/cli/infra-namespaces"
+ file: "go/cli/infra-namespaces"
+ }, {
+ kind: "basic"
+ text: "CLI Configuration"
+ path: "/go/cli/config-reference"
+ file: "go/cli/config-reference"
+ }, {
+ kind: "basic"
+ text: "Telemetry"
+ path: "/go/cli/telemetry"
+ file: "go/cli/telemetry"
+ }, {
+ kind: "basic"
+ text: "MCP"
+ path: "/go/cli/mcp"
+ file: "go/cli/mcp"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Observability"
+ items: [{
+ kind: "basic"
+ text: "Development Dashboard"
+ path: "/go/observability/dev-dash"
+ file: "go/observability/dev-dash"
+ }, {
+ kind: "basic"
+ text: "Distributed Tracing"
+ path: "/go/observability/tracing"
+ file: "go/observability/tracing"
+ }, {
+ kind: "basic"
+ text: "Flow Architecture Diagram"
+ path: "/go/observability/encore-flow"
+ file: "go/observability/encore-flow"
+ }, {
+ kind: "basic"
+ text: "Service Catalog"
+ path: "/go/observability/service-catalog"
+ file: "go/observability/service-catalog"
+ }, {
+ kind: "basic"
+ text: "Logging"
+ path: "/go/observability/logging"
+ file: "go/observability/logging"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Self Hosting"
+ items: [
+ {
+ kind: "basic"
+ text: "CI/CD"
+ path: "/go/self-host/ci-cd"
+ file: "go/self-host/ci-cd"
+ },
+ {
+ kind: "basic"
+ text: "Build Docker Images"
+ path: "/go/self-host/docker-build"
+ file: "go/self-host/self-host"
+ }, {
+ kind: "basic"
+ text: "Configure Infrastructure"
+ path: "/go/self-host/configure-infra"
+ file: "go/self-host/configure-infra"
+ }]
+ },
+ {
+ kind: "section"
+ text: "How to guides"
+ items: [{
+ kind: "basic"
+ text: "Break a monolith into microservices"
+ path: "/go/how-to/break-up-monolith"
+ file: "go/how-to/break-up-monolith"
+ }, {
+ kind: "basic"
+ text: "Integrate with a web frontend"
+ path: "/go/how-to/integrate-frontend"
+ file: "go/how-to/integrate-frontend"
+ }, {
+ kind: "basic"
+ text: "Use Temporal with Encore"
+ path: "/go/how-to/temporal"
+ file: "go/how-to/temporal"
+ }, {
+ kind: "basic"
+ text: "Build with cgo"
+ path: "/go/how-to/cgo"
+ file: "go/how-to/cgo"
+ }, {
+ kind: "basic"
+ text: "Debug with Delve"
+ path: "/go/how-to/debug"
+ file: "go/how-to/debug"
+ }, {
+ kind: "basic"
+ text: "Receive regular HTTP requests & Use websockets"
+ path: "/go/how-to/http-requests"
+ file: "go/how-to/http-requests"
+ }, {
+ kind: "basic"
+ text: "Use Atlas + GORM for database migrations"
+ path: "/go/how-to/atlas-gorm"
+ file: "go/how-to/atlas-gorm"
+ }, {
+ kind: "basic"
+ text: "Use the ent ORM for migrations"
+ path: "/go/how-to/entgo-orm"
+ file: "go/how-to/entgo-orm"
+ }, {
+ kind: "basic"
+ text: "Use Connect for gRPC communication"
+ path: "/go/how-to/grpc-connect"
+ file: "go/how-to/grpc-connect"
+ }, {
+ kind: "basic"
+ text: "Use a Pub/Sub Transactional Outbox"
+ path: "/go/how-to/pubsub-outbox"
+ file: "go/how-to/pubsub-outbox"
+ }, {
+ kind: "basic"
+ text: "Use Dependency Injection"
+ path: "/go/how-to/dependency-injection"
+ file: "go/how-to/dependency-injection"
+ }, {
+ kind: "basic"
+ text: "Use Auth0 Authentication"
+ path: "/go/how-to/auth0-auth"
+ file: "go/how-to/auth0-auth"
+ }, {
+ kind: "basic"
+ text: "Use Clerk Authentication"
+ path: "/go/how-to/clerk-auth"
+ file: "go/how-to/clerk-auth"
+ }, {
+ kind: "basic"
+ text: "Use Firebase Authentication"
+ path: "/go/how-to/firebase-auth"
+ file: "go/how-to/firebase-auth"
+ }, {
+ kind: "basic"
+ text: "Use Logto Authentication"
+ path: "/go/how-to/logto-auth"
+ file: "go/how-to/logto-auth"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Migration guides"
+ items: [{
+ kind: "basic"
+ text: "Migrate away from Encore"
+ path: "/go/migration/migrate-away"
+ file: "go/migration/migrate-away"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Community"
+ items: [{
+ kind: "basic"
+ text: "Get Involved"
+ path: "/go/community/get-involved"
+ file: "go/community/get-involved"
+ }, {
+ kind: "basic"
+ text: "Contribute"
+ path: "/go/community/contribute"
+ file: "go/community/contribute"
+ }, {
+ kind: "basic"
+ text: "Open Source"
+ path: "/go/community/open-source"
+ file: "go/community/open-source"
+ }, {
+ kind: "basic"
+ text: "Principles"
+ path: "/go/community/principles"
+ file: "go/community/principles"
+ }, {
+ kind: "basic"
+ text: "Submit Template"
+ path: "/go/community/submit-template"
+ file: "go/community/submit-template"
+ }]
+ },
+ ]
+}
+
+#EncoreTS: #SubMenu & {
+ title: "Encore.ts"
+ id: "ts"
+ presentation: {
+ icon: "typescript"
+ }
+ back: {
+ text: ""
+ path: ""
+ }
+ items: [
+ {
+ kind: "section"
+ text: "Get started"
+ items: [{
+ kind: "basic"
+ text: "Installation"
+ path: "/ts/install"
+ file: "ts/install"
+ }, {
+ kind: "basic"
+ text: "Quick Start"
+ path: "/ts/quick-start"
+ file: "ts/quick-start"
+ }, {
+ kind: "basic"
+ text: "FAQ"
+ path: "/ts/faq"
+ file: "ts/faq"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Concepts"
+ items: [{
+ kind: "basic"
+ text: "Benefits"
+ path: "/ts/concepts/benefits"
+ file: "ts/concepts/benefits"
+ }, {
+ kind: "basic"
+ text: "Application Model"
+ path: "/ts/concepts/application-model"
+ file: "ts/concepts/application-model"
+ }, {
+ kind: "basic"
+ text: "Hello World"
+ path: "/ts/concepts/hello-world"
+ file: "ts/concepts/hello-world"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Tutorials"
+ items: [{
+ kind: "basic"
+ text: "Building a REST API"
+ path: "/ts/tutorials/rest-api"
+ file: "ts/tutorials/rest-api"
+ }, {
+ kind: "basic"
+ text: "Building an Uptime Monitor"
+ path: "/ts/tutorials/uptime"
+ file: "ts/tutorials/uptime"
+ }, {
+ kind: "basic"
+ text: "Building a GraphQL API"
+ path: "/ts/tutorials/graphql"
+ file: "ts/tutorials/graphql"
+ }, {
+ kind: "basic"
+ text: "Building a Slack bot"
+ path: "/ts/tutorials/slack-bot"
+ file: "ts/tutorials/slack-bot"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Primitives"
+ items: [{
+ kind: "basic"
+ text: "App Structure"
+ path: "/ts/primitives/app-structure"
+ file: "ts/primitives/app-structure"
+ }, {
+ kind: "basic"
+ text: "Services"
+ path: "/ts/primitives/services"
+ file: "ts/primitives/services"
+ }, {
+ kind: "accordion"
+ text: "APIs"
+ accordion: [{
+ kind: "basic"
+ text: "Defining APIs"
+ path: "/ts/primitives/defining-apis"
+ file: "ts/primitives/defining-apis"
+ }, {
+ kind: "basic"
+ text: "Validation"
+ path: "/ts/primitives/validation"
+ file: "ts/primitives/validation"
+ }, {
+ kind: "basic"
+ text: "API Calls"
+ path: "/ts/primitives/api-calls"
+ file: "ts/primitives/api-calls"
+ }, {
+ kind: "basic"
+ text: "Raw Endpoints"
+ path: "/ts/primitives/raw-endpoints"
+ file: "ts/primitives/raw-endpoints"
+ }, {
+ kind: "basic"
+ text: "GraphQL"
+ path: "/ts/primitives/graphql"
+ file: "ts/primitives/graphql"
+ }, {
+ kind: "basic"
+ text: "Streaming APIs"
+ path: "/ts/primitives/streaming-apis"
+ file: "ts/primitives/streaming-apis"
+ }, {
+ kind: "basic"
+ text: "API Errors"
+ path: "/ts/primitives/errors"
+ file: "ts/primitives/errors"
+ }, {
+ kind: "basic"
+ text: "Static Assets"
+ path: "/ts/primitives/static-assets"
+ file: "ts/primitives/static-assets"
+ }, {
+ kind: "basic"
+ text: "Cookies"
+ path: "/ts/primitives/cookies"
+ file: "ts/primitives/cookies"
+ }]
+ }, {
+ kind: "basic"
+ text: "Databases"
+ path: "/ts/primitives/databases"
+ file: "ts/primitives/databases"
+ }, {
+ kind: "basic"
+ text: "PostgreSQL Extensions"
+ path: "/ts/primitives/databases-extensions"
+ file: "ts/primitives/database-extensions"
+ }, {
+ kind: "basic"
+ text: "Object Storage"
+ path: "/ts/primitives/object-storage"
+ file: "ts/primitives/object-storage"
+ }, {
+ kind: "basic"
+ text: "Cron Jobs"
+ path: "/ts/primitives/cron-jobs"
+ file: "ts/primitives/cron-jobs"
+ }, {
+ kind: "basic"
+ text: "Pub/Sub"
+ path: "/ts/primitives/pubsub"
+ file: "ts/primitives/pubsub"
+ }, {
+ kind: "basic"
+ text: "Secrets"
+ path: "/ts/primitives/secrets"
+ file: "ts/primitives/secrets"
+ }]
+ }, {
+ kind: "section"
+ text: "Development"
+ items: [{
+ kind: "basic"
+ text: "Authentication"
+ path: "/ts/develop/auth"
+ file: "ts/develop/auth"
+ }, {
+ kind: "accordion"
+ text: "ORMs"
+ accordion: [{
+ kind: "basic"
+ text: "Overview"
+ path: "/ts/develop/orms"
+ file: "ts/develop/orms/overview"
+ }, {
+ kind: "basic"
+ text: "Knex.js"
+ path: "/ts/develop/orms/knex"
+ file: "ts/develop/orms/knex"
+ }, {
+ kind: "basic"
+ text: "Prisma"
+ path: "/ts/develop/orms/prisma"
+ file: "ts/develop/orms/prisma"
+ }, {
+ kind: "basic"
+ text: "Drizzle"
+ path: "/ts/develop/orms/drizzle"
+ file: "ts/develop/orms/drizzle"
+ }, {
+ kind: "basic"
+ text: "Sequelize"
+ path: "/ts/develop/orms/sequelize"
+ file: "ts/develop/orms/sequelize"
+ }]
+ }, {
+ kind: "basic"
+ text: "Metadata"
+ path: "/ts/develop/metadata"
+ file: "ts/develop/metadata"
+ }, {
+ kind: "basic"
+ text: "Testing"
+ path: "/ts/develop/testing"
+ file: "ts/develop/testing"
+ }, {
+ kind: "basic"
+ text: "Debugging"
+ path: "/ts/develop/debug"
+ file: "ts/develop/debug"
+ }, {
+ kind: "basic"
+ text: "Middleware"
+ path: "/ts/develop/middleware"
+ file: "ts/develop/middleware"
+ }, {
+ kind: "basic"
+ text: "Multithreading"
+ path: "/ts/develop/multithreading"
+ file: "ts/develop/multithreading"
+ },{
+ kind: "basic"
+ text: "Running Scripts"
+ path: "/ts/develop/running-scripts"
+ file: "ts/develop/running-scripts"
+ }]
+ },
+ {
+ kind: "section"
+ text: "CLI"
+ items: [{
+ kind: "basic"
+ text: "CLI Reference"
+ path: "/ts/cli/cli-reference"
+ file: "ts/cli/cli-reference"
+ }, {
+ kind: "basic"
+ text: "Client Generation"
+ path: "/ts/cli/client-generation"
+ file: "ts/cli/client-generation"
+ }, {
+ kind: "basic"
+ text: "Infra Namespaces"
+ path: "/ts/cli/infra-namespaces"
+ file: "ts/cli/infra-namespaces"
+ }, {
+ kind: "basic"
+ text: "CLI Configuration"
+ path: "/ts/cli/config-reference"
+ file: "ts/cli/config-reference"
+ }, {
+ kind: "basic"
+ text: "Telemetry"
+ path: "/ts/cli/telemetry"
+ file: "ts/cli/telemetry"
+ }, {
+ kind: "basic"
+ text: "MCP"
+ path: "/ts/cli/mcp"
+ file: "ts/cli/mcp"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Frontend"
+ items: [{
+ kind: "basic"
+ text: "Hosting"
+ path: "/ts/frontend/hosting"
+ file: "ts/frontend/hosting"
+ }, {
+ kind: "basic"
+ text: "CORS"
+ path: "/ts/frontend/cors"
+ file: "ts/frontend/cors"
+ }, {
+ kind: "basic"
+ text: "Request Client"
+ path: "/ts/frontend/request-client"
+ file: "ts/frontend/request-client"
+ }, {
+ kind: "basic"
+ text: "Template Engine"
+ path: "/ts/frontend/template-engine"
+ file: "ts/frontend/template-engine"
+ }, {
+ kind: "basic"
+ text: "Mono vs Multi Repo"
+ path: "/ts/frontend/mono-vs-multi-repo"
+ file: "ts/frontend/mono-vs-multi-repo"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Observability"
+ items: [{
+ kind: "basic"
+ text: "Development Dashboard"
+ path: "/ts/observability/dev-dash"
+ file: "ts/observability/dev-dash"
+ }, {
+ kind: "basic"
+ text: "Logging"
+ path: "/ts/observability/logging"
+ file: "ts/observability/logging"
+ }, {
+ kind: "basic"
+ text: "Distributed Tracing"
+ path: "/ts/observability/tracing"
+ file: "ts/observability/tracing"
+ }, {
+ kind: "basic"
+ text: "Flow Architecture Diagram"
+ path: "/ts/observability/flow"
+ file: "ts/observability/flow"
+ }, {
+ kind: "basic"
+ text: "Service Catalog"
+ path: "/ts/observability/service-catalog"
+ file: "ts/observability/service-catalog"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Self Hosting"
+ items: [
+ {
+ kind: "basic"
+ text: "CI/CD"
+ path: "/ts/self-host/ci-cd"
+ file: "ts/self-host/ci-cd"
+ },
+ {
+ kind: "basic"
+ text: "Build Docker Images"
+ path: "/ts/self-host/build"
+ file: "ts/self-host/build"
+ }, {
+ kind: "basic"
+ text: "Configure Infrastructure"
+ path: "/ts/self-host/configure-infra"
+ file: "ts/self-host/configure-infra"
+ }, {
+ kind: "basic"
+ text: "Deploy to DigitalOcean"
+ path: "/ts/self-host/deploy-digitalocean"
+ file: "ts/self-host/deploy-to-digital-ocean"
+ }, {
+ kind: "basic"
+ text: "Deploy to Railway"
+ path: "/ts/self-host/deploy-railway"
+ file: "ts/self-host/deploy-to-railway"
+ }]
+ },
+ {
+ kind: "section"
+ text: "How to guides"
+ items: [{
+ kind: "basic"
+ text: "Handle file uploads"
+ path: "/ts/how-to/file-uploads"
+ file: "ts/how-to/file-uploads"
+ }, {
+ kind: "basic"
+ text: "Use NestJS with Encore"
+ path: "/ts/how-to/nestjs"
+ file: "ts/how-to/nestjs"
+ }]
+ }, {
+ kind: "section"
+ text: "Migration guides"
+ items: [{
+ kind: "basic"
+ text: "Migrate away from Encore"
+ path: "/ts/migration/migrate-away"
+ file: "ts/migration/migrate-away"
+ }, {
+ kind: "basic"
+ text: "Migrate from Express.js"
+ path: "/ts/migration/express-migration"
+ file: "ts/migration/express-migration"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Community"
+ items: [{
+ kind: "basic"
+ text: "Get Involved"
+ path: "/ts/community/get-involved"
+ file: "ts/community/get-involved"
+ }, {
+ kind: "basic"
+ text: "Contribute"
+ path: "/ts/community/contribute"
+ file: "ts/community/contribute"
+ }, {
+ kind: "basic"
+ text: "Open Source"
+ path: "/ts/community/open-source"
+ file: "ts/community/open-source"
+ }, {
+ kind: "basic"
+ text: "Principles"
+ path: "/ts/community/principles"
+ file: "ts/community/principles"
+ }, {
+ kind: "basic"
+ text: "Submit Template"
+ path: "/ts/community/submit-template"
+ file: "ts/community/submit-template"
+ }]
+ },
+ ]
+}
+
+#EncorePlatform: #SubMenu & {
+ title: "Encore Cloud"
+ id: "platform"
+ presentation: {
+ icon: ""
+ }
+ back: {
+ text: ""
+ path: ""
+ }
+ items: [
+ {
+ kind: "section"
+ text: "Concepts"
+ items: [{
+ kind: "basic"
+ text: "Introduction"
+ path: "/platform/introduction"
+ file: "platform/introduction"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Deployment"
+ items: [{
+ kind: "basic"
+ text: "Deploying & CI/CD"
+ path: "/platform/deploy/deploying"
+ file: "platform/deploy/deploying"
+ }, {
+ kind: "basic"
+ text: "Connect your cloud account"
+ path: "/platform/deploy/own-cloud"
+ file: "platform/deploy/own-cloud"
+ }, {
+ kind: "basic"
+ text: "Environments"
+ path: "/platform/deploy/environments"
+ file: "platform/deploy/environments"
+ }, {
+ kind: "basic"
+ text: "Preview Environments"
+ path: "/platform/deploy/preview-environments"
+ file: "platform/deploy/preview-environments"
+ }, {
+ kind: "basic"
+ text: "Application Security"
+ path: "/platform/deploy/security"
+ file: "platform/deploy/security"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Infrastructure"
+ items: [{
+ kind: "basic"
+ text: "Provisioning & Environments"
+ path: "/platform/infrastructure/infra"
+ file: "platform/infrastructure/infra"
+ }, {
+ kind: "basic"
+ text: "Infrastructure Configuration"
+ path: "/platform/infrastructure/configuration"
+ file: "platform/infrastructure/configuration"
+ },
+ {
+ kind: "accordion"
+ text: "GCP Infrastructure"
+ accordion: [{
+ kind: "basic"
+ text: "Overview"
+ path: "/platform/infrastructure/gcp"
+ file: "platform/infrastructure/gcp"
+ }, {
+ kind: "basic"
+ text: "Import Cloud SQL"
+ path: "/platform/infrastructure/gcp/import-cloud-sql"
+ file: "platform/infrastructure/import-cloud-sql"
+ }, {
+ kind: "basic"
+ text: "Import Project"
+ path: "/platform/infrastructure/gcp/import-project"
+ file: "platform/infrastructure/import-project"
+ }, {
+ kind: "basic"
+ text: "Configure Network"
+ path: "/platform/infrastructure/configure-network"
+ file: "platform/infrastructure/configure-network"
+ }]
+ }, {
+ kind: "accordion"
+ text: "AWS Infrastructure"
+ accordion: [{
+ kind: "basic"
+ text: "Overview"
+ path: "/platform/infrastructure/aws"
+ file: "platform/infrastructure/aws"
+ },{
+ kind: "basic"
+ text: "Import RDS Database"
+ path: "/platform/infrastructure/aws/import-rds"
+ file: "platform/infrastructure/import-rds"
+ }, {
+ kind: "basic"
+ text: "Configure Network"
+ path: "/platform/infrastructure/configure-network"
+ file: "platform/infrastructure/configure-network"
+ }]
+ }, {
+ kind: "accordion"
+ text: "Kubernetes deployment"
+ accordion: [{
+ kind: "basic"
+ text: "Deploying to a new cluster"
+ path: "/platform/infrastructure/kubernetes"
+ file: "platform/infrastructure/kubernetes"
+ }, {
+ kind: "basic"
+ text: "Import an existing cluster"
+ path: "/platform/infrastructure/import-kubernetes-cluster"
+ file: "platform/infrastructure/import-kubernetes-cluster"
+ }, {
+ kind: "basic"
+ text: "Configure kubectl"
+ path: "/platform/infrastructure/configure-kubectl"
+ file: "platform/infrastructure/configure-kubectl"
+ }]
+ }, {
+ kind: "basic"
+ text: "Neon Postgres"
+ path: "/platform/infrastructure/neon"
+ file: "platform/infrastructure/neon"
+ }, {
+ kind: "basic"
+ text: "Cloudflare R2"
+ path: "/platform/infrastructure/cloudflare"
+ file: "platform/infrastructure/cloudflare"
+ }, {
+ kind: "basic"
+ text: "Managing database users"
+ path: "/platform/infrastructure/manage-db-users"
+ file: "platform/infrastructure/manage-db-users"
+ }]
+ }, {
+ kind: "section"
+ text: "Observability"
+ items: [{
+ kind: "basic"
+ text: "Metrics"
+ path: "/platform/observability/metrics"
+ file: "platform/observability/metrics"
+ }, {
+ kind: "basic"
+ text: "Distributed Tracing"
+ path: "/platform/observability/tracing"
+ file: "platform/observability/tracing"
+ }, {
+ kind: "basic"
+ text: "Flow Architecture Diagram"
+ path: "/platform/observability/encore-flow"
+ file: "platform/observability/encore-flow"
+ }, {
+ kind: "basic"
+ text: "Service Catalog"
+ path: "/platform/observability/service-catalog"
+ file: "platform/observability/service-catalog"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Integrations"
+ items: [{
+ kind: "basic"
+ text: "GitHub"
+ path: "/platform/integrations/github"
+ file: "platform/integrations/github"
+ }, {
+ kind: "basic"
+ text: "Custom Domains"
+ path: "/platform/integrations/custom-domains"
+ file: "platform/integrations/custom-domains"
+ }, {
+ kind: "basic"
+ text: "Webhooks"
+ path: "/platform/integrations/webhooks"
+ file: "platform/integrations/webhooks"
+ }, {
+ kind: "basic"
+ text: "OAuth Clients"
+ path: "/platform/integrations/oauth-clients"
+ file: "platform/integrations/oauth-clients"
+ }, {
+ kind: "basic"
+ text: "Auth Keys"
+ path: "/platform/integrations/auth-keys"
+ file: "platform/integrations/auth-keys"
+ }, {
+ kind: "basic"
+ text: "API Reference"
+ path: "/platform/integrations/api-reference"
+ file: "platform/integrations/api-reference"
+ }, {
+ kind: "basic"
+ text: "Terraform"
+ path: "/platform/integrations/terraform"
+ file: "platform/integrations/terraform"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Migration guides"
+ items: [{
+ kind: "basic"
+ text: "Migrate to Encore"
+ path: "/platform/migration/migrate-to-encore"
+ file: "platform/migration/migrate-to-encore"
+ }, {
+ kind: "basic"
+ text: "Migrate away from Encore"
+ path: "/platform/migration/migrate-away"
+ file: "platform/migration/migrate-away"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Management & Billing"
+ items: [{
+ kind: "basic"
+ text: "Compliance & Security"
+ path: "/platform/management/compliance"
+ file: "platform/management/compliance"
+ }, {
+ kind: "basic"
+ text: "Plans & billing"
+ path: "/platform/management/billing"
+ file: "platform/management/billing"
+ }, {
+ kind: "basic"
+ text: "Telemetry"
+ path: "/platform/management/telemetry"
+ file: "platform/management/telemetry"
+ }, {
+ kind: "basic"
+ text: "Roles & Permissions"
+ path: "/platform/management/permissions"
+ file: "platform/management/permissions"
+ }, {
+ kind: "basic"
+ text: "Usage limits"
+ path: "/platform/management/usage"
+ file: "platform/management/usage"
+ }]
+ },
+ {
+ kind: "section"
+ text: "Other"
+ items: [
+ {
+ kind: "accordion"
+ text: "Product comparisons"
+ accordion: [{
+ kind: "basic"
+ text: "Encore vs. Heroku"
+ path: "/platform/other/vs-heroku"
+ file: "platform/other/vs-heroku"
+ }, {
+ kind: "basic"
+ text: "Encore vs. Supabase / Firebase"
+ path: "/platform/other/vs-supabase"
+ file: "platform/other/vs-supabase"
+ }, {
+ kind: "basic"
+ text: "Encore vs. Terraform / Pulumi"
+ path: "/platform/other/vs-terraform"
+ file: "platform/other/vs-terraform"
+ }]
+ }]
+ },
+ ]
+}
diff --git a/docs/platform/deploy/deploying.md b/docs/platform/deploy/deploying.md
new file mode 100644
index 0000000000..57524ba5f9
--- /dev/null
+++ b/docs/platform/deploy/deploying.md
@@ -0,0 +1,153 @@
+---
+seotitle: Deploying your Encore application is as simple as git push
+seodesc: Learn how to deploy your backend application built with Encore with a single command, while Encore manages your entire CI/CD process.
+title: Deploying Applications with Encore Cloud
+subtitle: Encore Cloud automates the deployment and infrastructure provisioning process
+lang: platform
+---
+
+Encore Cloud simplifies deploying your application, making it as simple as pushing to a git repository, removing the need for manual steps.
+
+## Deploying your application
+
+### Step 1: Create account & application
+
+Before deploying, ensure that you have an **Encore Cloud account** and have created an **Encore application**.
+
+You can create both an account and an application by running the following command:
+
+```shell
+$ encore app create
+```
+
+You will be asked to create a free Encore Cloud account first, and then proceed to create a new Encore application.
+
+#### Already created an application locally?
+
+Follow these steps if you've already created an app and want to link it to an account on Encore Cloud:
+
+**1. Ensure you are logged in with the CLI**
+
+```bash
+encore auth signup # If you haven't created an Encore Cloud account
+encore auth login # If you've already created an Encore Cloud account
+```
+
+**2. Link your local app to Encore Cloud**
+
+Run this command from you application's root folder:
+
+```bash
+encore app init
+```
+
+**3. Set up Encore's git remote to enable pushing directly to Encore Cloud**
+
+Run this command from you application's root folder:
+
+```bash
+git remote add encore encore://
+```
+
+
+### Step 2: Integrate with GitHub (Optional)
+
+When creating an Encore application, Encore will automatically create a new Encore managed git repository. If you are just trying out Encore Cloud, you can use this and skip the rest of this step.
+
+For production applications we recommend integrating with GitHub instead of using the built-in Encore managed git:
+
+#### **Connecting your GitHub account**
+
+Open your app in the **[Encore Cloud dashboard](https://app.encore.cloud/) > (Select your app) > App Settings > Integrations > GitHub**.
+Click the **Connect Account to GitHub** button, which will open GitHub where you can grant access either to the relevant repositorie(s).
+
+[See the full docs](/docs/platform/integrations/github) on integrating with GitHub to learn how to configure different repository structures.
+
+Once connected to GitHub, pushing code will trigger deployments automatically. Encore Cloud Pro users get [Preview Environments](/docs/platform/deploy/preview-environments) for each pull request.
+
+### Step 3: Connect your AWS / GCP account (Optional)
+
+Deploy to your own cloud on AWS or GCP by connecting your cloud account to Encore Cloud.
+
+If you're just trying out Encore Cloud, skip this step to deploy to a free development environment using Encore Cloud's hosting, subject to [fair use limits](/docs/platform/management/usage).
+
+#### **Connecting your cloud account**
+
+Open your app in the **[Encore Cloud dashboard](https://app.encore.cloud/) > (Select your app) > App Settings > Integrations > Connect Cloud**.
+
+Learn more in the [connecting your cloud docs](/docs/platform/deploy/own-cloud).
+
+### Step 4: Push to deploy
+
+Deploy your application by pushing your code to the connected Git repository.
+
+- **Using Encore Cloud's managed git**:
+
+```shell
+$ git add -A .
+$ git commit -m 'Commit message'
+$ git push encore
+```
+
+- **If you have connected your GitHub account:**
+
+```shell
+$ git add -A .
+$ git commit -m 'Commit message'
+$ git push origin
+```
+
+This will trigger Encore Cloud's deployment process, consisting of the following phases:
+* A build & test phase
+* An infrastructure provisioning phase
+* A deployment phase
+
+Once you've pushed your code, you can monitor the progress in the **[Encore Cloud dashboard](https://app.encore.cloud/) > (Select your app) > Deployments**.
+
+## Configuring deploy trigger
+
+When using GitHub, you can configure Encore Cloud to automatically trigger deploys when you push to a specific branch name.
+
+To configure which branch name is used to trigger deploys, open your app in the [Encore Cloud dashboard](https://app.encore.cloud) and go to the **Overview** page for your intended environment. Click on **Settings** and then in the section **Branch Push** configure the `Branch name` and hit save.
+
+### Integrating using Encore Cloud's API
+
+You can trigger deployments using Encore Cloud's API, learn more in the [API reference](/docs/platform/integrations/api-reference).
+
+## Configuring custom build settings
+
+If you want, you can override certain aspects of the CI/CD process in the `encore.app` file:
+
+* The Docker base image to use when deploying
+* Whether to build with Cgo enabled
+* Whether to bundle the source code in the docker image (useful for [Sentry stack traces](https://docs.sentry.io/platforms/go/usage/serverless/))
+
+Below are the available build settings configurable in the `encore.app` file,
+with their default values:
+
+```cue
+{
+ "build": {
+ // Enables cgo when building the application and running tests
+ // in Encore's CI/CD system.
+ "cgo_enabled": false,
+
+ // Docker-related configuration
+ "docker": {
+ // The Docker base image to use when deploying the application.
+ // It must be a publicly accessible image, and defaults to "scratch".
+ "base_image": "scratch",
+
+ // Whether to bundle the source code in the docker image.
+ // The source code will be copied into /workspace as part
+ // of the build process. This is primarily useful for tools like
+ // Sentry that need access to the source code to generate stack traces.
+ "bundle_source": false,
+
+ // The working directory to start the docker image in.
+ // If empty it defaults to "/workspace" if the source code is bundled, and to "/" otherwise.
+ "working_dir": ""
+ }
+ }
+}
+```
diff --git a/docs/platform/deploy/environments.md b/docs/platform/deploy/environments.md
new file mode 100644
index 0000000000..6766044999
--- /dev/null
+++ b/docs/platform/deploy/environments.md
@@ -0,0 +1,77 @@
+---
+seotitle: Environments – Creating local, preview, and prod environments
+seodesc: Learn how to create all the environments you need for your backend application, local, preview, testing and production. Here's how you keep them in sync!
+title: Creating & configuring environments
+subtitle: Get the environments you need, without the work
+lang: platform
+---
+
+Encore automatically sets up and manages different environments for your application (local, preview, testing, and production). Each environment is:
+- Fully isolated
+- Automatically provisioned
+- Always in sync with your codebase
+- Configured with appropriate infrastructure for its purpose
+
+## Environment Types
+
+Encore has four types of environments:
+- `production`
+- `development`
+- `preview`
+- `local`
+
+Some environment types differ in how infrastructure is provisioned:
+- `local` is provisioned by Encore's Open Source CLI using local versions of infrastructure.
+- `preview` environments are provisioned in Encore Cloud hosting and are optimized to be cost-efficient and fast to provision.
+- `production` and `development` environments are provisioned by Encore Cloud, either in your [cloud account](/docs/platform/deploy/own-cloud) or using Encore Cloud's free development hosting. Both environment types offer the same infrastructure options when deployed using your own cloud account.
+
+Environment type is also used for [Secrets management](/docs/ts/primitives/secrets), allowing you to configure different secrets for different environment types. Therefore, you can easily configure different secrets for your `production` and `development` environments.
+
+## Creating environments
+
+1. Open your app in the [Encore Cloud dashboard](https://app.encore.cloud)
+2. Go to **Environments** > **Create env**
+3. Configure your environment:
+ - Name your environment
+ - Choose type: **Production** or **Development** (see [Environment Types](#environment-types))
+ - Set deploy trigger: Git branch or manual
+ - Configure infrastructure approval: automatic or manual
+ - Select cloud provider
+ - Choose process allocation: single or separate processes
+
+
+
+### Configuring deploy trigger
+
+When using GitHub, you can configure Encore Cloud to automatically trigger deploys when you push to a specific branch name.
+
+To configure which branch name is used to trigger deploys, open your app in the [Encore Cloud dashboard](https://app.encore.cloud) and go to the **Overview** page for your intended environment. Click on **Settings** and then in the section **Branch Push** configure the `Branch name` and hit save.
+
+### Configuring infrastructure approval
+
+For some environments you may want to enforce infrastructure approval before deploying. You can configure this in the **Settings** > **Infrastructure Approval** section for your environment.
+
+When infrastructure approval is enabled, an application **Admin** will need to manually approve the infrastructure changes before the deployment can proceed.
+
+### Configuring process allocation
+
+Encore Cloud offers flexible process allocation options:
+- **Single process**: All services run in one process (simpler, lower cost)
+- **Separate processes**: Each service runs independently (better isolation, independent scaling)
+
+Choose your preferred deployment model when creating each environment. You can use different models for production and development environments without changing any code.
+
+
+
+## Setting a Primary environment
+
+Every Encore app has a configurable Primary environment that serves as the default for:
+- App insights in the Encore Cloud dashboard
+- API documentation
+- CLI functionality (like API client generation)
+
+**Configuring your Primary environment:**
+1. Open your app in the [Encore Cloud dashboard](https://app.encore.cloud)
+2. Navigate to **Settings** > **General** > **Primary Environment**
+3. Select your desired environment from the dropdown
+4. Click **Update**
diff --git a/docs/platform/deploy/own-cloud.md b/docs/platform/deploy/own-cloud.md
new file mode 100644
index 0000000000..9928b009cf
--- /dev/null
+++ b/docs/platform/deploy/own-cloud.md
@@ -0,0 +1,75 @@
+---
+seotitle: Connect your cloud account to deploy to any cloud
+seodesc: Learn how to deploy your backend application to all the major cloud providers (AWS or GCP) using Encore.
+title: Connect your cloud account
+subtitle: Whatever cloud you prefer is fine by us
+lang: platform
+---
+
+Encore Cloud lets you deploy your application to any of the major cloud providers, using your own cloud account.
+This lets you use Encore to improve your experience and productivity, while keeping the reliability of a major cloud provider.
+
+Each [environment](/docs/platform/deploy/environments) can be configured to use a different cloud provider, and you can have as many environments as you wish.
+This also lets you easily deploy a hybrid or multi-cloud application, as you see fit.
+
+
+
+Encore Cloud will provision infrastructure in your cloud account, but for safety reasons Encore Cloud does not automatically destroy infrastructure once it's no longer required. To do this, you need to manually approve the deletion of the infrastructure in your Encore Cloud dashboard.
+
+This means if you disconnect your app from your cloud provider, or delete the environment
+within Encore, you need to explicitly approve the deletion of the infrastructure in your Encore Cloud dashboard.
+
+
+
+## Google Cloud Platform (GCP)
+
+Encore Cloud provides a GCP Service Account for each Encore Cloud application, letting you grant Encore Cloud access to provision all the necessary infrastructure directly in your own GCP Organization account.
+
+To find your app's Service Account email and configure GCP deployments, head over to the Connect Cloud page by going to the **[Encore Cloud dashboard](https://app.encore.cloud/) > (Select your app) > App Settings > Integrations > Connect Cloud**.
+
+
+
+### Troubleshooting
+
+**I can't access/edit the `Policy for Domain restricted sharing` page**
+
+To edit Organization policies, you need to have the `Organization Policy Administrator` role. If you don't have this role, you can ask your GCP Organization Administrator to grant you the necessary permissions.
+If you're a GCP Organization Administrator, you can grant yourself the necessary permissions by following the steps below:
+
+1. Go to the [IAM & Admin page](https://console.cloud.google.com/iam-admin/iam) in the GCP Console.
+2. Find your user account in the list of members.
+3. Click the pencil icon to edit your user account.
+4. Add the `Organization Policy Administrator` role to your user account.
+5. Click Save.
+
+**I can't grant access to the Encore Cloud service account**
+
+If you're unable to grant access to the Encore Cloud service account, you may have failed to add Encore Cloud to your `Domain restricted sharing` policy.
+Make sure you've followed all the steps in the Connect Cloud page to add Encore Cloud to the policy.
+If you're using several GCP accounts, make sure you're logged in with the correct account and that the correct organization is selected in the GCP Console.
+
+**Encore Cloud returns "Could not find Organization ID"**
+
+If you see this error message, it means that Encore Cloud was unable to connect to your GCP Organization. Make sure you've followed all the steps in the Connect Cloud page to grant Encore Cloud access to your GCP Organization.
+If you're using several GCP accounts, make sure you're logged in with the correct account and that the correct organization is selected in the GCP Console.
+
+Still having issues? Drop us an email at [support@encore.dev](mailto:support@encore.dev) or chat with us in the [Encore Discord](https://encore.dev/discord.
+
+## Amazon Web Services (AWS)
+To configure your Encore Cloud app to deploy to your AWS account, head over to the Connect Cloud page by going to the
+**[Encore Cloud dashboard](https://app.encore.cloud/) > (Select your app) > App Settings > Integrations > Connect Cloud**.
+
+Follow the instructions to create an IAM Role, and then connect the role with Encore Cloud.
+[Learn more in the AWS docs](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html).
+
+
+
+
+
+
+For your security, make sure to check `Require external ID` and specify the
+external ID provided in the instructions.
+
+
+
+After connecting your app to AWS, you will be asked to choose which region you want Encore Cloud to provision resources in. [Learn more about AWS regions here](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/).
diff --git a/docs/platform/deploy/preview-environments.md b/docs/platform/deploy/preview-environments.md
new file mode 100644
index 0000000000..c21f6ead97
--- /dev/null
+++ b/docs/platform/deploy/preview-environments.md
@@ -0,0 +1,51 @@
+---
+seotitle: Preview Environments – Temporary dev environments per Pull Request
+seodesc: Learn how to use Encore to activate automatic Preview Environments for every Pull Request to simplify testing and collaborating.
+title: Preview Environments
+subtitle: Accelerate development with isolated test environments for each Pull Request
+lang: platform
+---
+
+When using [Encore Cloud Pro](https://encore.cloud/pricing), you automatically get ephemeral Preview Environments for each Pull Request.
+
+Preview Environments are free, fully-managed development environments that run on Encore Cloud. They let you test changes without managing infrastructure or incurring cost.
+
+See the [infra docs](/docs/platform/infrastructure/infra#preview-environments) if you're curious about exactly how Preview Environments are provisioned.
+
+## Using Preview Environments
+
+To use Preview Environments, you first need to [connect your application to GitHub](/docs/platform/integrations/github).
+
+Preview Environments are named after the pull request, for example PR #72 creates a Preview Environment named `pr:72` with the API base url `https://pr72-$APP_ID.encr.app`.
+
+You can also view the environment in the Encore Cloud dashboard, where the url will be `https://app.encore.cloud/$APP_ID/envs/pr:72`.
+
+
+
+## Populate databases with test data automatically
+
+Preview Environments can automatically come with pre-populated test data thanks to Neon's database branching feature. Here's how it works:
+
+1. Your main database (typically in staging) contains your test data
+2. When a Preview Environment is created, it gets a fresh database that's an exact copy of your main database
+3. This happens automatically - no manual data copying needed!
+
+#### Setup instructions
+1. Go to [Encore Cloud dashboard](https://app.encore.cloud)
+2. Select your app > App Settings > Preview Environments
+3. Choose which environment's database to copy from (e.g., staging)
+4. Save your changes
+
+**Note:** This feature requires using Neon as your database provider, which is:
+- Default for Encore Cloud environments
+- Optional for AWS and GCP environments
+
+
+
+## Frontend Collaboration
+
+Preview Environments make it really easy to collaborate and test changes with your frontend. Just update your frontend API client to point to the `pr:#` environment.
+This is a one-line change since your API client always specifies the environment name, e.g. `https://-.encr.app/`.
+
+If your pull request makes changes to the API, you can [generate a new API client](/docs/ts/cli/client-generation)
+for the new backend API using `encore gen client --env=pr:72 --lang=typescript my-app`
diff --git a/docs/platform/deploy/security.md b/docs/platform/deploy/security.md
new file mode 100644
index 0000000000..3541a863bf
--- /dev/null
+++ b/docs/platform/deploy/security.md
@@ -0,0 +1,29 @@
+---
+seotitle: Security – How Encore keeps your backend application secure
+seodesc: Encore applications come with built-in security best practises. See how Encore keeps your application secure by default.
+title: Application Security
+subtitle: Encore Cloud makes strong security the default path
+lang: platform
+---
+
+## Built on industry experience
+
+The security practices in Encore Cloud are built on our team's decades of experience designing and operating sensitive systems at companies like Google, Spotify, and Monzo.
+
+## Security by Default
+
+Encore Cloud is designed to make security effortless rather than burdensome:
+
+- **Zero-config security**: Focus on building features while Encore Cloud automatically implements security best practices
+- **Built-in secrets management**: Safely handle sensitive data using the built-in [secrets management system](/docs/ts/primitives/secrets)
+- **Automated IAM management**: Encore Cloud automatically manages IAM policies based on the principle of least privilege
+
+## Security features
+
+When Encore Cloud deploys your application and infrastructure, it takes care of implementing security best practices:
+
+- **Strong encryption**: All communication uses mutual TLSv1.3
+- **Secure databases**: Database access is encrypted with certificate validation and strong security credentials
+- **Cloud security**: Automatic provisioning with security best practices specific to each cloud provider
+ - Learn more about [Google Cloud Platform (GCP)](/docs/platform/infrastructure/gcp)
+ - Learn more about [Amazon Web Services (AWS)](/docs/platform/infrastructure/aws)
diff --git a/docs/platform/infrastructure/aws.md b/docs/platform/infrastructure/aws.md
new file mode 100644
index 0000000000..7613fe608b
--- /dev/null
+++ b/docs/platform/infrastructure/aws.md
@@ -0,0 +1,141 @@
+---
+seotitle: AWS Infrastructure on Encore Cloud
+seodesc: A comprehensive guide to how Encore Cloud provisions and manages AWS infrastructure for your applications
+title: AWS Infrastructure
+subtitle: Understanding your application's AWS infrastructure
+lang: platform
+---
+Encore Cloud simplifies the process of deploying applications by automatically provisioning and managing the necessary AWS infrastructure. This guide provides a detailed look at the components involved and how they work together to support your applications.
+
+## Core Infrastructure Components
+
+### Networking Architecture
+
+Networking is a critical aspect of cloud infrastructure, ensuring secure and efficient communication between different parts of your application. Encore Cloud creates an isolated [Virtual Private Cloud (VPC)][aws-vpc] for each environment, which serves as a secure network boundary.
+
+The network architecture is designed with reliability and security in mind. Each VPC spans across two Availability Zones (AZs), providing redundancy and fault tolerance. If one AZ experiences issues, your application can continue running in the other AZ, significantly reducing the risk of downtime. This multi-AZ setup is crucial for maintaining high availability in production environments.
+
+Within the VPC, Encore Cloud implements a three-tier architecture that carefully separates different components of your application into distinct subnet layers. This separation of concerns enhances both security and performance by controlling traffic flow between layers and limiting potential attack vectors. Each tier is configured with specific security groups and network ACLs to enforce these boundaries, creating a robust and secure networking foundation for your application.
+
+#### Subnet Tiers
+
+1. **Public Subnet**
+ The public subnet contains several key components that manage external traffic flow. At the forefront is the Application Load Balancer (ALB), which serves as the entry point for all incoming traffic to your application. The ALB intelligently distributes requests across your application instances, ensuring optimal performance and reliability.
+
+ To enable outbound communication, the subnet includes an Internet Gateway that allows your application components to securely connect to external services and APIs. Working alongside it is a NAT Gateway, which provides a secure pathway for resources in private subnets (like your compute instances) to access the internet while remaining protected from direct external access. This NAT Gateway acts as an intermediary, translating private IP addresses to public ones for outbound traffic while maintaining the security of your internal resources.
+
+2. **Compute Subnet**
+ The compute subnet is where your application's containers run, regardless of whether you're using Fargate or EKS as your container orchestration platform. This subnet is carefully isolated and configured to only accept incoming traffic from the Application Load Balancer in the public subnet. This strict traffic control ensures that your application containers can only be accessed through proper channels, protecting them from unauthorized direct access while still allowing legitimate requests to flow through seamlessly.
+
+3. **Storage Subnet** (provisioned as needed)
+ The storage subnet is a dedicated network segment designed to host your application's databases and caching systems. To maintain the highest level of security, this subnet operates in complete isolation from the internet, with no direct inbound or outbound connectivity. Access to resources within the storage subnet is strictly limited to traffic originating from the compute subnet, creating a secure enclave for your data layer. This architecture ensures that your sensitive data remains protected while still being readily accessible to your application's services running in the compute tier.
+
+### Container Management
+
+Encore Cloud provisions an [Elastic Container Registry (ECR)][aws-ecr] to store your application's Docker images. The registry is seamlessly integrated with your chosen compute platform and provides robust security features. Access to images is tightly controlled through comprehensive access controls, ensuring only authorized users and services can pull or push container images. Additionally, ECR automatically scans all images for known security vulnerabilities as they are pushed to the registry, helping you maintain a secure application environment by identifying potential risks before deployment.
+
+### Secrets Management
+
+Managing sensitive information securely is crucial. Encore Cloud uses [AWS Secrets Manager][aws-secrets] to store and manage secrets, such as API keys and database credentials. Through deep integration with AWS Secrets Manager, Encore Cloud automatically injects secrets directly into your service's environment variables at runtime, making them easily accessible while maintaining strict security controls. All secrets are encrypted both at rest and in transit using industry-standard encryption algorithms, providing comprehensive protection for your sensitive data. The system implements fine-grained access controls, where each service is given precisely scoped permissions to access only the specific secrets it needs. This ensures that even if one service is compromised, the blast radius is contained and other secrets remain secure.
+
+## Compute Options
+
+Encore Cloud provisions one of two compute platforms for running your application containers, based on your choice:
+
+### AWS Fargate
+
+When using Fargate, Encore Cloud configures:
+
+- **Task Definitions**
+ Task definitions are meticulously configured to ensure optimal performance and reliability of your services. Each service's container settings are fine-tuned based on its specific requirements, including memory allocation, CPU utilization, and networking parameters. Comprehensive health check configurations monitor the service's status, enabling quick detection and recovery from any issues. Environment variables are securely injected from AWS Secrets Manager at runtime, providing your services with the credentials and configuration they need while maintaining security. The task definitions are also integrated with AWS Service Discovery, enabling automatic service registration and allowing for seamless service-to-service communication within your application.
+
+- **Fargate Services**
+ Fargate services are configured with sophisticated deployment strategies that ensure zero downtime during updates. When deploying new versions of your services, Encore Cloud orchestrates a rolling update process where new tasks are gradually introduced while old ones are removed, maintaining consistent availability throughout the deployment.
+
+ Each service is automatically integrated with Application Load Balancer target groups, enabling intelligent request routing and load distribution. The load balancer continuously monitors the health of your service instances and automatically routes traffic only to healthy targets.
+
+ To ensure smooth service startup, appropriate health check grace periods are configured. This gives your services adequate time to initialize and warm up before receiving traffic, preventing premature health check failures during deployment or scaling events.
+
+- **IAM Configuration**
+ Encore Cloud implements a comprehensive IAM security model by creating unique execution roles for each task definition. These roles are automatically configured with precisely scoped permissions that enable secure access to required AWS services. The execution roles allow containers to pull images from ECR and write operational logs to CloudWatch for monitoring and debugging. They also grant access to assigned AWS resources like S3 buckets and SQS queues that the service needs to interact with. Additionally, the roles are configured to securely retrieve secrets from AWS Secrets Manager at runtime, enabling safe storage and access of sensitive configuration data. This granular permission model follows security best practices by providing each service with the minimum privileges required for operation.
+
+- **Network Integration**
+Fargate tasks are strategically placed within private compute subnets, ensuring they remain isolated from direct internet access while maintaining the ability to communicate with other application components. The associated security groups are configured with precise rules that govern network traffic. These rules allow inbound traffic exclusively from the Application Load Balancer, ensuring that your services can only be accessed through the properly configured entry point. For outbound connectivity, the security groups permit traffic to flow to your databases and caching layers, enabling your services to interact with these essential backend resources while maintaining a secure network boundary.
+
+### Amazon EKS
+
+When using EKS, Encore Cloud configures:
+
+- **Cluster Setup**
+ Encore Cloud configures the core networking components required for cluster operation. The VPC CNI (Container Network Interface) is configured to enable pod networking within the cluster, allowing pods to communicate efficiently using the underlying AWS VPC networking capabilities. This includes setting up IP address management and network policy enforcement.
+
+ The cluster's internal DNS resolution is handled through CoreDNS, which is configured for optimal service discovery and name resolution within the cluster. CoreDNS settings are tuned to provide fast and reliable DNS lookups while maintaining reasonable cache sizes and query limits.
+
+
+- **Kubernetes Resources**
+ Encore Cloud automatically manages all necessary Kubernetes resources for your application. Each service in your application is deployed as a separate Kubernetes Deployment, allowing for independent scaling and lifecycle management. These deployments are configured with appropriate resource requests, limits, and health checks to ensure reliable operation.
+
+ For authentication and authorization, Encore Cloud implements IAM Roles for Service Accounts (IRSA), providing secure access to AWS services. Each service gets its own service account with precisely scoped IAM roles, following the principle of least privilege.
+
+ For sensitive data like API keys and credentials, Encore Cloud uses Kubernetes Secrets, which are encrypted at rest and only accessible to authorized services.
+
+ To enable network connectivity, Encore Cloud creates Kubernetes Service resources for each of your application's services, providing stable networking endpoints for inter-service communication.
+
+- **Load Balancer Integration**
+ Encore Cloud manages the complete load balancer integration for your EKS cluster. The AWS Load Balancer Controller is automatically installed and configured to handle ingress traffic for your application. This controller works in conjunction with the Application Load Balancer (ALB) to provide intelligent traffic routing and SSL/TLS termination.
+
+ The ALB Ingress Controller is configured to automatically create and manage Application Load Balancers based on your application's needs. It handles the creation and configuration of target groups, ensuring traffic is properly distributed across your service pods. The controller also manages the lifecycle of these resources, automatically cleaning up unused resources to prevent waste.
+
+ Target group binding is automatically configured to map your Kubernetes services to the appropriate ALB target groups. This ensures that traffic is correctly routed to the right pods and that health checks are properly configured to maintain high availability.
+
+ For secure communication, Encore Cloud automatically manages SSL/TLS certificates through AWS Certificate Manager. These certificates are automatically provisioned, renewed, and attached to your load balancers, ensuring all external traffic to your application is encrypted. The system also handles certificate rotation and updates transparently, maintaining secure communication without manual intervention.
+
+- **Monitoring Setup**
+ Encore Cloud automatically aggregates and sends metrics to your configured metrics destination, providing you with real-time visibility into your application's performance.
+
+ In addition to metrics, Encore Cloud configures the CloudWatch Logs agent to capture and forward all container logs. The logs are structured and organized by service name, making it easy to search and analyze application behavior. Log streams are automatically created for each container, and log retention policies are configured to help manage storage costs while maintaining necessary historical data.
+
+- **Service Accounts**
+ Encore Cloud implements a comprehensive service account management system that ensures secure and controlled access to resources. Each service in your application receives its own dedicated Kubernetes service account, providing a unique identity for authentication and authorization purposes.
+
+ To enable secure interaction with AWS services, Encore Cloud maps each Kubernetes service account to a corresponding IAM role using IAM Roles for Service Accounts (IRSA). This mapping allows pods to securely authenticate with AWS services without storing long-lived credentials.
+
+ The IAM roles are automatically configured with the minimum required permissions for each service's needs. This includes access to service-specific S3 buckets for object storage operations, permissions to publish and subscribe to SQS queues and SNS topics, ability to retrieve secrets from AWS Secrets Manager, and secure access to assigned database instances. These permissions are continuously updated as your application evolves, ensuring services always have the access they need while maintaining strong security boundaries.
+
+All of these configurations are automatically maintained and updated by Encore Cloud as you develop your application, ensuring your infrastructure stays aligned with your application's needs.
+
+## Managed Services
+
+### Databases
+Encore Cloud provisions [Amazon RDS][aws-rds] for PostgreSQL databases, providing a robust and scalable database solution. Each database runs the latest PostgreSQL version to ensure compatibility with modern features while maintaining up-to-date security patches. The databases are provisioned on auto-scaling capable instances, starting with db.m5.large configurations that can seamlessly scale up as your application's needs grow.
+
+To protect your data, Encore Cloud configures automated daily backups with a 7-day retention period. Security is paramount, so databases are strategically placed within private subnets and protected by comprehensive access controls. This network isolation combined with strict security rules ensures your data remains secure while still being accessible to your application's services.
+
+#### Database Access
+Database access is managed through a comprehensive security model. At its core, Encore Cloud deploys [Emissary](https://github.com/encoredev/emissary), a secure socks proxy that enables safe database migrations while maintaining strict access controls. Each service in your application is assigned its own dedicated database role, providing granular control over data access and ensuring services can only interact with the data they need. For enhanced security, all databases are placed in private subnets, completely isolated from direct internet access. This multi-layered approach creates a secure foundation for your application's data access needs while maintaining operational flexibility.
+
+### Pub/Sub
+Encore Cloud implements a robust messaging system using [SQS][aws-sqs] and [SNS][aws-sns]. The system automatically configures dead-letter queues to capture failed messages, enabling thorough analysis and debugging of messaging issues. Each service in your application receives precisely scoped IAM permissions to publish and consume messages, ensuring secure communication between components. Encore Cloud fully manages the creation and configuration of subscriptions and topics, streamlining the setup and ongoing maintenance of your messaging infrastructure while maintaining optimal performance and reliability.
+
+### Object Storage
+Encore Cloud leverages [S3][aws-s3] for object storage, providing a comprehensive solution for your application's storage needs. When you declare storage requirements in your application, Encore Cloud automatically provisions dedicated S3 buckets with unique names to ensure global uniqueness across AWS. Each service in your application receives precisely scoped permissions to perform storage operations, following the principle of least privilege. For public buckets, Encore Cloud automatically integrates with CloudFront to create a global content delivery network, significantly improving access speeds for your users worldwide. Each bucket is assigned its own unique domain name, making it simple to manage and access stored content while maintaining a clear organizational structure.
+
+### Caching
+Encore Cloud uses [ElastiCache for Redis][aws-redis] to provide a high-performance caching solution. The service starts with cache.m6g.large instances that can automatically scale up as your application's needs grow. To ensure maximum reliability, caches are configured with Multi-AZ replication across availability zones, providing both high availability and fault tolerance. In the event of any failures, automatic failover capabilities ensure your application experiences no disruption in service.
+
+Security is maintained through Redis Access Control Lists (ACLs), which provide fine-grained control over who can access your cache and what operations they can perform. The entire system is configured for high availability, with monitoring and alerting in place to maintain optimal performance and uptime. This comprehensive setup ensures your application's caching layer remains fast, secure, and always available.
+
+### Cron Jobs
+Encore Cloud provides a streamlined approach to scheduled tasks that prioritizes security and simplicity. Each cron job is executed through authenticated API requests that are cryptographically signed to verify their authenticity. The system performs rigorous source verification to ensure all scheduled tasks originate exclusively from Encore Cloud's cron functionality, preventing unauthorized execution attempts. This elegant implementation requires no additional infrastructure components, making it both cost-effective and easy to maintain while ensuring your scheduled tasks run reliably and securely.
+
+[aws-vpc]: https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html
+[aws-fargate]: https://aws.amazon.com/fargate/
+[aws-eks]: https://aws.amazon.com/eks/
+[aws-secrets]: https://aws.amazon.com/secrets-manager/
+[aws-rds]: https://aws.amazon.com/rds/postgresql/
+[aws-sqs]: https://aws.amazon.com/sqs/
+[aws-sns]: https://aws.amazon.com/sns/
+[aws-s3]: https://aws.amazon.com/s3/
+[aws-redis]: https://aws.amazon.com/elasticache/redis/
+[aws-ecr]: https://aws.amazon.com/ecr/
+[aws-alb]: https://aws.amazon.com/elasticloadbalancing/application-load-balancer/
diff --git a/docs/platform/infrastructure/cloudflare.md b/docs/platform/infrastructure/cloudflare.md
new file mode 100644
index 0000000000..120fb41187
--- /dev/null
+++ b/docs/platform/infrastructure/cloudflare.md
@@ -0,0 +1,57 @@
+---
+seotitle: Cloudflare R2 Infrastructure on Encore Cloud
+seodesc: A comprehensive guide to how Encore Cloud provisions and manages Cloudflare R2 infrastructure for your applications
+title: Cloudflare R2 Buckets
+lang: platform
+---
+
+Encore Cloud simplifies the process of using Cloudflare R2 for object storage by automatically provisioning and managing the necessary infrastructure. This guide provides setup instructions and details on how Encore Cloud manages your Cloudflare R2 infrastructure.
+
+## Setup Process
+
+### 1. Cloudflare Account Connection
+
+To connect your Cloudflare account to Encore Cloud:
+
+1. Create a Cloudflare API token using the **Create Additional Tokens** button in the Cloudflare dashboard
+
+2. Add the the following permissions:
+ - Zone > Zone: Read
+ - Zone > DNS: Edit
+ - Account > Workers R2 Storage: Edit
+
+3. Add the token in the Encore Cloud dashboard:
+ - Navigate to App Settings > Integrations > Cloudflare
+ - Click "Connect Account"
+ - Provide an account name and your API token
+
+### 2. Environment Configuration
+
+When creating a new environment:
+
+1. Select your preferred cloud provider
+2. Choose "Cloudflare R2" as the object storage provider
+3. Configure the following R2-specific settings:
+ - Token: Your Cloudflare API token
+ - Account: Your Cloudflare account
+ - Zone: The domain zone for public bucket URLs
+ - Region: Your preferred R2 storage region
+
+## Managed Features
+
+### Bucket Management
+
+Encore Cloud provides comprehensive bucket management capabilities that adapt to your application's needs. When you define storage requirements in your application, Encore Cloud automatically provisions the necessary R2 buckets with appropriate configurations. Each bucket is created with carefully configured policies and access controls to ensure secure yet efficient access to your stored objects.
+
+### Public Access Configuration
+
+When working with public buckets, Encore Cloud handles all aspects of public access configuration automatically. Each bucket is assigned a unique subdomain that is automatically provisioned and configured in your DNS settings. The bucket is seamlessly integrated with Cloudflare's global CDN network, ensuring fast content delivery worldwide. Encore Cloud also configures optimal caching rules to maximize performance while maintaining appropriate cache invalidation policies. This comprehensive setup ensures your public content is served efficiently and securely through Cloudflare's infrastructure.
+
+### Security Controls
+
+Encore Cloud implements a comprehensive multi-layered security model to protect your R2 storage. At the bucket level, fine-grained access controls ensure that only authorized services can perform specific operations on each bucket. Each service in your application receives its own unique set of credentials, preventing any unauthorized cross-service access. These credentials are securely distributed to the appropriate services through Encore Cloud's built-in secrets management system, which handles the entire credential lifecycle.
+
+All these configurations are automatically maintained and updated by Encore Cloud as you develop your application, ensuring your infrastructure stays aligned with your application's needs.
+
+[cloudflare-r2]: https://developers.cloudflare.com/r2/
+[cloudflare-cdn]: https://developers.cloudflare.com/cdn/
\ No newline at end of file
diff --git a/docs/platform/infrastructure/configuration.md b/docs/platform/infrastructure/configuration.md
new file mode 100644
index 0000000000..d44eaefd3b
--- /dev/null
+++ b/docs/platform/infrastructure/configuration.md
@@ -0,0 +1,73 @@
+---
+seotitle: Infrastructure Configuration
+seodesc: Learn how you can configure infrastructure provisioned using Encore Cloud
+title: Infrastructure Configuration
+subtitle: How to configure infrastructure when using Encore Cloud
+lang: platform
+---
+
+Encore Cloud provides a powerful and flexible approach to infrastructure management, ensuring that your cloud resources are efficiently provisioned, according to enterprise best practices.
+
+Unlike traditional Infrastructure-as-Code (IaC) tools, when using Encore's declarative infrastructure framework, you do not define any cloud service specifics in code. This ensures your code is cloud-agnostic and portable across clouds, and can be deployed using different infrastructure for each environment according to your priorities (cost, performance, etc.).
+
+Infrastructure configuration is made in the Encore Cloud dashboard, which provides a controlled workflow, role-based access controls, and auditable history of changes.
+
+Encore Cloud provisions and manages infrastructure by using your cloud provider's APIs. Learn more in the [Infrastructure](/docs/platform/infrastructure/infra) documentation.
+
+## Infrastructure settings when creating a new environment
+
+When creating a new environment, you can decide the following:
+
+- Which cloud provider to use (AWS or GCP)
+- Which compute hardware to use (e.g. AWS Fargate, GCP Cloud Run, Kubernetes)
+- If using Kubernetes, should a new cluster be created or should an existing cluster be used?
+- Which Kubernetes provider to use (GKE or EKS)
+- Which database to use (e.g. AWS RDS, GCP CloudSQL, Neon Serverless Postgres)
+- Which process allocation strategy to use (more on this below)
+
+## Ongoing infrastructure configuration
+
+### Configuration UI in Encore Cloud
+
+After creating an environment, you can continue to configure the infrastructure via the Encore Cloud dashboard.
+
+The dashboard exposes the most common configuration options, and provides a controlled workflow for making changes, including audit logs and role-based access controls.
+
+
+
+#### Process allocation configuration
+
+Encore provides a powerful configuration option called process allocation. This enables you to configure how microservices should be deployed on the compute hardware; either deploying all services in one process or one process per service. All without any code changes.
+
+It's often recommended to deploy all services in one process in order to reduce costs and minimize response times between services. (But it depends on your use case.)
+Deploying each service as its own process will improve scalability and decrease blast radius if things go wrong. This is only recommended for production environments.
+
+
+
+### Manual configuration in your cloud provider's console
+
+Manual configuration is relevant in cases where some configuration options are not yet available in the Encore Cloud dashboard, or you may want to make changes manually. Handily, you have full access to make changes directly in your cloud provider's console.
+
+Encore Cloud tries very hard to ensure that any manual changes made in the cloud provider's console are not overwritten.
+
+Therefore it only makes the minimum necessary modifications to infrastructure when deploying new changes, using the following strategies:
+
+- **PATCH-style updates:** Resources are updated using compare-and-set and similar techniques, modifying only the attributes that require changes.
+
+- **Avoid full syncs:** Unlike Terraform, Encore Cloud updates only the specific resources necessary to accomplish an infrastructure change rather than performing a complete infrastructure refresh.
+
+These behaviors ensure an efficient and predictable workflow, minimizing unintended changes and reducing deployment times, and means that you can safely use your cloud provider's console to modify the provisioned resources.
+
+This behavior also makes Encore Cloud well-suited for environments where infrastructure is partially managed outside of Encore Cloud, enabling you to deploy Encore applications alongside existing infrastructure (more on this below).
+
+## Working with Existing Infrastructure
+
+One of Encore Cloud’s strengths is its ability to work seamlessly with existing infrastructure. Since it does not enforce a full sync approach, it can:
+
+- Integrate with pre-existing cloud resources without overwriting manual changes
+
+- Deploy to existing Kubernetes clusters
+
+- Co-exist with other IaC tools like Terraform and CloudFormation.
+
+Encore Cloud also provides a Terraform Provider to simplify integration with existing Terraform-managed infrastructure. Learn more in the [Terraform Provider](/docs/platform/integrations/terraform) documentation.
diff --git a/docs/platform/infrastructure/configure-kubectl.md b/docs/platform/infrastructure/configure-kubectl.md
new file mode 100644
index 0000000000..ef0e9aa565
--- /dev/null
+++ b/docs/platform/infrastructure/configure-kubectl.md
@@ -0,0 +1,23 @@
+---
+seotitle: Configure kubectl to access your Encore Kubernetes cluster
+seodesc: Learn how to configure kubectl to access your Encore Kubernetes cluster.
+title: Configure kubectl
+lang: platform
+---
+
+Encore Cloud automatically provisions and manages Kubernetes clusters for you, but sometimes it's useful to manually inspect
+clusters using the [kubectl](https://kubernetes.io/docs/reference/kubectl/) cli. To do this, you need to configure `kubectl` to connect and authenticate through
+encore. You can do this by running the following command in your app directory:
+
+```shell
+encore kubernetes configure -e
+```
+
+Where `` is the name of the environment you want to configure `kubectl` for.
+
+This will configure `kubectl` to use `encore` to authenticate the cluster and proxy your traffic to the correct
+cluster. You can now use `kubectl` as you normally would, for example:
+
+```shell
+kubectl get pods
+```
diff --git a/docs/platform/infrastructure/configure-network.md b/docs/platform/infrastructure/configure-network.md
new file mode 100644
index 0000000000..5d0061fd34
--- /dev/null
+++ b/docs/platform/infrastructure/configure-network.md
@@ -0,0 +1,35 @@
+---
+seotitle: How to configure custom network settings for your Encore environment
+seodesc: Learn how to configure IP ranges when connecting your Encore application to existing networks.
+title: Configure network settings
+subtitle: Customizing IP ranges for network peering
+lang: platform
+---
+
+# Overview
+
+When deploying applications with Encore Cloud, a network is automatically provisioned with default settings. However, if you plan to peer your Encore network with an existing network, you can manually configure the IP range for your environment.
+
+## Benefits
+
+Configuring custom network settings allows you to:
+- Connect your Encore application to existing networks via peering
+- Prevent IP range conflicts with other networks in your organization
+- Plan your network topology with predictable addressing
+
+## Configuring network settings
+
+Follow these steps to configure custom network settings:
+
+1. Navigate to **Create Environment** in the Encore Cloud dashboard
+2. Select the AWS or GCP cloud provider
+3. Expand the **Network** section
+4. Enter your desired IP range
+ - The range must be at least a /16 block to reserve enough IPs for your application to grow
+ - Choose a range that doesn't conflict with your existing networks
+
+Once configured, Encore will use your specified IP range instead of assigning a random private network.
+
+## Default network behavior
+
+By default, Encore will reserve a randomly assigned /16 block in one of the private IP ranges. This is suitable for most deployments that don't require network peering.
diff --git a/docs/platform/infrastructure/gcp.md b/docs/platform/infrastructure/gcp.md
new file mode 100644
index 0000000000..d52dcead6c
--- /dev/null
+++ b/docs/platform/infrastructure/gcp.md
@@ -0,0 +1,111 @@
+---
+seotitle: GCP Infrastructure on Encore Cloud
+seodesc: A comprehensive guide to how Encore Cloud provisions and manages GCP infrastructure for your applications
+title: GCP Infrastructure
+subtitle: Understanding your application's GCP infrastructure
+lang: platform
+---
+
+Encore Cloud simplifies the process of deploying applications by automatically provisioning and managing the necessary GCP infrastructure. This page provides an overview of the components involved and how they work together to support your applications.
+
+_Example of Encore project deployment alongside existing legacy systems on GCP:_
+
+
+
+## Core Infrastructure Components
+
+### Networking Architecture
+
+To ensure maximum security and isolation, Encore Cloud provisions a dedicated GCP Project for each environment. This project isolation prevents any potential cross-environment access and enables granular control over resources and permissions. Within each project, all resources are deployed into a private network configuration, where they can only communicate with other resources inside the VPC. This private networking approach significantly reduces the attack surface by preventing direct access from the public internet, with traffic only flowing through designated ingress points.
+
+### Container Management
+
+Encore Cloud provisions a [Google Container Registry (GCR)][gcp-gcr] to store your application's Docker images.
+
+The registry implements comprehensive access controls to ensure only authorized users and services can access and manage container images. Through integration with GCP's Identity and Access Management (IAM), each service is granted the minimum required permissions needed to pull its container images.
+
+Additionally, GCR performs automated vulnerability scanning on all container images. As new images are pushed to the registry, they are automatically analyzed for known security vulnerabilities in the operating system and application dependencies. This proactive scanning helps identify potential security issues early in the deployment pipeline, allowing you to maintain a secure application environment.
+
+### Secrets Management
+
+Encore Cloud's integration with Secret Manager provides comprehensive security and seamless access to sensitive data. All secrets are automatically injected as environment variables into your services, eliminating the need for manual configuration while maintaining security. The secrets are protected using industry-standard encryption both when stored and during transmission between services. To ensure maximum security, Secret Manager implements strict access controls - each service can only access the specific secrets it needs, and all access attempts are logged and audited.
+
+## Compute Options
+
+Encore Cloud provisions one of two compute platforms for running your application containers, based on your choice:
+
+### Google Cloud Run
+
+When using Cloud Run, Encore Cloud configures:
+
+**Service Deployments**
+Each service is configured with optimized container settings and health check configurations to ensure reliable operation. Environment variables are automatically injected from Secret Manager to securely provide configuration values. Service discovery integration enables seamless communication between services.
+
+**Cloud Run Services**
+Cloud Run services are configured with zero-downtime deployment strategies, ensuring your application remains available during updates. Each service is integrated with a load balancer to distribute traffic efficiently across instances. Health check grace periods are configured to allow containers adequate time to start up before receiving traffic, preventing premature termination of healthy instances.
+
+**IAM Configuration**
+Each deployment receives its own dedicated service account to ensure proper isolation and security. These service accounts are automatically configured with the minimum required permissions needed for operation. This includes access to pull container images from Google Container Registry, write application logs to Cloud Logging, and interact with assigned GCP resources like Cloud Storage buckets and Pub/Sub topics. The service accounts are also granted permission to read secrets from Secret Manager, enabling secure access to sensitive configuration values. This automated permission management ensures your services have exactly the access they need while following security best practices.
+
+### Google Kubernetes Engine
+
+When using GKE, Encore Cloud configures:
+
+- **Cluster Setup**
+ Encore Cloud provisions either GKE Autopilot clusters or standard GKE clusters with managed node pools, both configured to run in private subnets for enhanced security. With Autopilot, GKE automatically manages the underlying infrastructure, while with standard clusters Encore Cloud configures and maintains optimized node pools based on your workload requirements. In both cases, the nodes are placed in private subnets to ensure they're not directly accessible from the internet, with all traffic flowing through the load balancer.
+
+- **Kubernetes Resources**
+ Encore Cloud automatically creates and manages all necessary Kubernetes resources for your application. Each Encore service is deployed as a Kubernetes Deployment, ensuring reliable operation and scaling capabilities. These deployments are backed by service accounts configured with appropriate IAM roles to access GCP resources securely. Sensitive configuration data is stored as Kubernetes Secrets and automatically mounted into the appropriate pods. To enable network connectivity, Encore Cloud provisions Kubernetes Service and Ingress resources that integrate with the Google Cloud Load Balancer, providing secure external access to your application endpoints.
+
+- **Load Balancer Integration**
+ Encore Cloud integrates with Google Cloud Load Balancer to provide secure and reliable access to your applications. The load balancer is configured to distribute traffic across your services while handling SSL/TLS termination. All traffic is automatically encrypted using managed SSL/TLS certificates that are provisioned and renewed automatically. This ensures your application endpoints remain secure and accessible through HTTPS without requiring manual certificate management.
+
+- **Monitoring Setup**
+ Encore Cloud sets up comprehensive monitoring for your GKE clusters by configuring both metrics collection and log management. Container metrics are automatically collected from each pod and exported to your configured monitoring service, providing detailed insights into resource usage, performance, and application behavior. Additionally, all container logs are seamlessly forwarded to Cloud Logging, enabling centralized log aggregation and analysis. This integrated monitoring approach gives you full visibility into your application's health and performance within the Google Cloud ecosystem.
+
+- **Service Accounts**
+ Encore Cloud implements a comprehensive service account management system that ensures secure and controlled access to GCP resources. Each service in your application receives its own dedicated service account, providing fine-grained access control and isolation between services.
+
+ These service accounts are automatically configured with IAM roles that map precisely to the GCP services your application needs to interact with. The permission configuration is handled dynamically based on your application's declared resource usage. For example, if your service needs to access a GCS bucket, Encore Cloud automatically grants the minimum required permissions for those specific storage operations. Similarly, when your service needs to publish or subscribe to Pub/Sub topics, connect to databases, or retrieve secrets, the appropriate IAM roles are configured automatically.
+
+ This automated permission management ensures that each service operates under the principle of least privilege, having access only to the resources it explicitly needs to function. This significantly enhances your application's security posture by minimizing the potential impact of any security breach.
+
+All of these configurations are automatically maintained and updated by Encore Cloud as you develop your application, ensuring your infrastructure stays aligned with your application's needs.
+
+## Managed Services
+
+### Databases
+
+Encore Cloud provisions [GCP Cloud SQL][gcp-cloudsql] for PostgreSQL databases, providing a robust and scalable database solution:
+
+Encore Cloud provisions Cloud SQL instances running the latest PostgreSQL version, ensuring you have access to the newest features and security updates. Each instance starts with the smallest available configuration to optimize costs, while maintaining the ability to automatically scale up resources as your application's needs grow.
+
+Data protection is a key priority, with automated daily backups retained for 7 days and point-in-time recovery capabilities. This allows you to restore your database to any moment within the retention period if needed.
+
+Security is enforced through strategic placement of databases in private subnets, isolating them from direct internet access. Strict access controls ensure that only authorized services and users can connect to the database instances.
+
+### Pub/Sub
+
+Encore Cloud implements a robust messaging system using [GCP Pub/Sub][gcp-pubsub]. The system is designed with reliability and security in mind, automatically configuring dead-letter topics to capture and preserve any failed messages for later analysis and debugging. Each service in your application receives precisely scoped IAM permissions for publishing and consuming messages, ensuring secure communication between components while maintaining the principle of least privilege. Encore Cloud fully manages all subscriptions and topics, handling the complex setup and ongoing maintenance of your messaging infrastructure, allowing you to focus on your application logic rather than infrastructure management.
+
+### Object Storage
+
+Encore Cloud leverages [Google Cloud Storage][gcp-gcs] for object storage needs. When you declare storage buckets in your application, Encore Cloud automatically provisions them with unique names in GCP. Each service that interacts with storage is configured with precisely scoped permissions, ensuring secure access to only the buckets and operations it requires. For public buckets, Encore Cloud integrates with Cloud CDN to optimize content delivery, with each bucket accessible through a unique URL. This comprehensive setup provides secure, efficient, and easily manageable object storage capabilities for your application.
+
+### Caching
+
+Encore Cloud uses [GCP Memorystore for Redis][gcp-redis] to provide a high-performance caching solution. Each Redis instance starts with the smallest available configuration to optimize costs while maintaining the ability to automatically scale up resources as your application's caching needs grow. The instances are configured in a high-availability setup to ensure your cache remains available and performant even during infrastructure updates or zone outages. Access to the cache is secured through Redis authentication, with credentials automatically managed and rotated by Encore Cloud to maintain a strong security posture.
+
+### Cron Jobs
+
+Encore Cloud provides a streamlined approach to scheduled task execution that prioritizes both simplicity and security. Each cron job is executed through authenticated API requests that are cryptographically signed, ensuring that only legitimate, verified requests can trigger your scheduled tasks. The system includes robust source verification that validates all requests originate from Encore Cloud's trusted cron infrastructure. This elegant implementation requires no additional infrastructure components, making it both cost-effective and easy to maintain while providing the reliability and security needed for production workloads.
+
+[gcp-vpc]: https://cloud.google.com/vpc
+[gcp-cloudrun]: https://cloud.google.com/run
+[gcp-gke]: https://cloud.google.com/kubernetes-engine
+[gcp-secrets]: https://cloud.google.com/secret-manager
+[gcp-pubsub]: https://cloud.google.com/pubsub
+[gcp-gcs]: https://cloud.google.com/storage
+[gcp-cloudsql]: https://cloud.google.com/sql
+[gcp-redis]: https://cloud.google.com/memorystore
+[gcp-gcr]: https://cloud.google.com/container-registry
diff --git a/docs/platform/infrastructure/import-cloud-sql.md b/docs/platform/infrastructure/import-cloud-sql.md
new file mode 100644
index 0000000000..125f6273fd
--- /dev/null
+++ b/docs/platform/infrastructure/import-cloud-sql.md
@@ -0,0 +1,58 @@
+---
+seotitle: How to deploy your Encore application with an existing Cloud SQL instance
+seodesc: Learn how to easily import your existing Cloud SQL instance and connect your Encore application to it.
+title: Import an existing Cloud SQL instance
+subtitle: Using your pre-existing database instead of provisioning a new one
+lang: platform
+---
+
+# Overview
+
+When deploying applications to your own cloud, Encore Cloud can provision all necessary infrastructure—including database instances. However, if you already have a Cloud SQL instance, you can connect your Encore application directly to this existing database.
+
+## Benefits
+
+Using an existing Cloud SQL instance allows you to:
+- Maintain data continuity with your existing systems
+- Preserve specific database configurations
+- Utilize familiar database setups without migration
+
+## Importing a Cloud SQL instance
+
+Follow these steps to import your existing Cloud SQL instance:
+
+1. Navigate to **Create Environment** in the [Encore Cloud dashboard](https://app.encore.cloud)
+2. Select the GCP cloud provider
+3. Choose **Import Existing Cloud SQL Instance**
+4. Add permissions for the Encore Service Account:
+ - Copy the `Encore GCP Service Account` from the cloud dashboard
+ - Go to your project's IAM page in the GCP Console
+ - Grant the `Owner` role to the `Encore GCP Service Account`
+5. Return to the Encore Cloud dashboard
+6. Specify your database's `GCP Project ID` and `Cloud SQL Instance Name`
+7. Click the `Resolve` button to validate the instance
+
+Once validated, you can create the environment. When you deploy to this environment, Encore Cloud will automatically connect your application to your imported Cloud SQL instance rather than provisioning a new database.
+
+## Mapping existing databases to your Encore app
+To access an existing database in your Encore application, you need to specify the name of the existing database when you declare the database in your app. For example, if you have an existing database called `mydb` you can create a reference to it like so:
+
+```typescript
+const db = new SQLDatabase("mydb");
+```
+
+```go
+sqldb.NewDatabase("mydb", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+```
+
+## Applying migrations to existing databases
+Encore uses a table called `schema_migrations` in the public namespace to keep track of which migrations have been applied. If you import an existing database without that table, Encore will create it for you and apply your migrations in order. If the table already exists, Encore expects it to contain exactly two columns:
+
+```
+version bigint
+dirty boolean
+```
+
+If the table exists but has a different schema, you will not be able to import it with Encore at this time. If the table exists with an existing entry, Encore will apply all higher versions in your `migrations` directory to the database.
diff --git a/docs/platform/infrastructure/import-kubernetes-cluster.md b/docs/platform/infrastructure/import-kubernetes-cluster.md
new file mode 100644
index 0000000000..93b01ccd5c
--- /dev/null
+++ b/docs/platform/infrastructure/import-kubernetes-cluster.md
@@ -0,0 +1,21 @@
+---
+seotitle: How to deploy your Encore application to an existing Kubernetes cluster
+seodesc: Learn how to easily import your existing Kubernetes cluster and deploy your Encore application into it.
+title: Import an existing Kubernetes cluster
+subtitle: Deploying to your pre-existing cluster instead of provisioning a new one
+lang: platform
+---
+
+When you deploy your application to your own cloud, Encore Cloud can provision infrastructure for it in many different ways – including setting up a Kubernetes cluster.
+
+If you already have a Kubernetes cluster, Encore Cloud can deploy your Encore application into this pre-existing cluster. This is often useful if you want to integrate your Encore application with other parts of your system that are not built using Encore.
+
+Kubernetes imports are supported on GCP, AWS support is coming soon.
+
+## Importing a cluster
+
+To import your cluster, go to **Create Environment** in the [Encore Cloud dashboard](https://app.encore.cloud), select **Kubernetes: Existing GKE Cluster** as the compute platform, and then specify your cluster's `Project ID`, `Region`, and `Cluster Name`.
+
+When you deploy to this environment, Encore Cloud will use your imported cluster as the compute instance.
+
+
diff --git a/docs/platform/infrastructure/import-project.md b/docs/platform/infrastructure/import-project.md
new file mode 100644
index 0000000000..6c1badb7a0
--- /dev/null
+++ b/docs/platform/infrastructure/import-project.md
@@ -0,0 +1,36 @@
+---
+seotitle: How to deploy your Encore application to an existing GCP project
+seodesc: Learn how to easily import your existing GCP project and connect your Encore application to it.
+title: Import an existing GCP project
+subtitle: Using your pre-existing GCP project instead of provisioning a new one
+lang: platform
+---
+
+# Overview
+
+When deploying applications to your own cloud, Encore Cloud can provision all necessary infrastructure—including new GCP projects. However, if you already have a GCP project, you can deploy your Encore application directly to this existing project.
+
+## Benefits
+
+Using an existing GCP project allows you to:
+- Keep all your infrastructure in a single project
+- Maintain existing IAM policies and permissions
+- Utilize existing billing settings and quotas
+- Consolidate resources for easier management
+
+## Importing a GCP project
+
+Follow these steps to import your existing GCP project:
+
+1. Navigate to **Create Environment** in the [Encore Cloud dashboard](https://app.encore.cloud)
+2. Select the GCP cloud provider
+3. Choose **Import Project**
+4. Add permissions for the Encore Service Account:
+ - Copy the `Encore GCP Service Account` from the cloud dashboard
+ - Go to your project's IAM page in the GCP Console
+ - Grant the `Owner` role to the `Encore GCP Service Account`
+5. Return to the Encore Cloud dashboard
+6. Enter your `Project ID`
+7. Click the `Resolve` button to validate the project
+
+Once validated, you can create the environment. When you deploy to this environment, Encore Cloud will automatically deploy your application to your imported GCP project rather than provisioning a new one.
\ No newline at end of file
diff --git a/docs/platform/infrastructure/import-rds.md b/docs/platform/infrastructure/import-rds.md
new file mode 100644
index 0000000000..7f871a8170
--- /dev/null
+++ b/docs/platform/infrastructure/import-rds.md
@@ -0,0 +1,54 @@
+---
+seotitle: How to deploy your Encore application with an existing AWS RDS instance
+seodesc: Learn how to easily import your existing AWS RDS instance and connect your Encore application to it.
+title: Import an existing AWS RDS instance
+subtitle: Using your pre-existing database instead of provisioning a new one
+lang: platform
+---
+
+# Overview
+
+When deploying applications to your own cloud, Encore Cloud can provision all necessary infrastructure—including database instances. However, if you already have an AWS RDS instance, you can connect your Encore application directly to this existing database.
+
+## Benefits
+
+Using an existing AWS RDS instance allows you to:
+- Maintain data continuity with your existing systems
+- Preserve specific database configurations
+- Utilize familiar database setups without migration
+
+## Importing an AWS RDS instance
+
+Follow these steps to import your existing AWS RDS instance:
+
+1. Navigate to **Create Environment** in the [Encore Cloud dashboard](https://app.encore.cloud)
+2. Select the AWS cloud provider
+3. Pick the `AWS Region` your database resides in
+3. Choose **Import Existing RDS Instance**
+4. Specify your database's `RDS Instance Name`
+5. Click the `Resolve` button to validate the instance
+
+Once validated, you can create the environment. When you deploy to this environment, Encore Cloud will automatically connect your application to your imported AWS RDS instance rather than provisioning a new database.
+
+## Mapping existing databases to your Encore app
+To access an existing database in your Encore application, you need to specify the name of the existing database when you declare the database in your app. For example, if you have an existing database called `mydb` you can create a reference to it like so:
+
+```typescript
+const db = new SQLDatabase("mydb");
+```
+
+```go
+sqldb.NewDatabase("mydb", sqldb.DatabaseConfig{
+ Migrations: "./migrations",
+})
+```
+
+## Applying migrations to existing databases
+Encore uses a table called `schema_migrations` in the public namespace to keep track of which migrations have been applied. If you import an existing database without that table, Encore will create it for you and apply your migrations in order. If the table already exists, Encore expects it to contain exactly two columns:
+
+```
+version bigint
+dirty boolean
+```
+
+If the table exists but has a different schema, you will not be able to import it with Encore at this time. If the table exists with an existing entry, Encore will apply all higher versions in your `migrations` directory to the database.
diff --git a/docs/platform/infrastructure/infra.md b/docs/platform/infrastructure/infra.md
new file mode 100644
index 0000000000..607512366b
--- /dev/null
+++ b/docs/platform/infrastructure/infra.md
@@ -0,0 +1,108 @@
+---
+seotitle: Cloud Infrastructure Provisioning
+seodesc: Learn how to provision appropriate cloud infrastructure depending on the environment type for AWS and GCP.
+title: Infrastructure provisioning & Environments
+subtitle: How Encore Cloud provisions infrastructure for your application
+lang: platform
+---
+
+Encore Cloud automatically provisions all necessary infrastructure, in all environments and across all major cloud providers, without requiring application code changes. You simply [connect your cloud account](/docs/platform/deploy/own-cloud) and create an environment.
+
+
+
+## How it works
+
+This is powered by Encore's open source [backend framework](/docs/ts), which lets you declare infrastructure resources (databases, caches, queues, scheduled jobs, etc.) as type-safe objects in application code.
+
+At compile time, Encore parses the application code to generate an [Application Model](/docs/ts/concepts/application-model), and Encore Cloud uses this meta data to create an infrastructure graph with a high-resolution definition of the infrastructure your application requires.
+
+Encore Cloud then uses this graph to provision and manage the necessary infrastructure in your cloud account (using AWS and GCP APIs), and in development and preview environments hosted by Encore Cloud.
+
+
+
+
+The approach removes the need for infrastructure configuration files and avoids creating cloud-specific dependencies in your application.
+
+Having an end-to-end integration between application code and infrastructure also enables Encore Cloud to keep environments in sync and track cloud infrastructure, giving you an up-to-date view of your infrastructure to avoid unnecessary cloud costs.
+
+
+
+## Environment types
+
+By default, Encore Cloud provisions infrastructure using contextually appropriate objectives for each environment type. You retain control over the infrastructure in your cloud account, and can configure it directly both via the Encore Cloud dashboard and your cloud provider's console. Encore Cloud takes care of syncing your changes.
+
+| | Local | Encore Cloud Hosting | GCP / AWS |
+| ---------------------- | ------------------ | -------------------------- | ---------------------------------- |
+| **Environment types:** | Development | Preview, Development | Development, Production |
+| **Objectives:** | Provisioning speed | Provisioning speed, Cost\* | Reliability, Security, Scalability |
+
+\*Encore Cloud Hosting is free to use, subject to Fair Use guidelines and usage limits. [Learn more](/docs/platform/management/usage)
+
+## Development Infrastructure
+
+Encore Cloud provisions infrastructure resources differently for each type of development environment.
+
+| | Local | Preview / Development (Encore Cloud Hosting) | GCP / AWS |
+| ------------------- | --------------------------------- | ------------------------------------------------------------ | -------------------------------------------------------------- |
+| **SQL Databases:** | Docker | Encore Cloud Managed (Kubernetes), [Neon](/docs/deploy/neon) | [See production](/docs/deploy/infra#production-infrastructure) |
+| **Pub/Sub:** | In-memory ([NSQ](https://nsq.io)) | GCP Pub/Sub | [See production](/docs/deploy/infra#production-infrastructure) |
+| **Caches:** | In-memory (Redis) | In-memory (Redis) | [See production](/docs/deploy/infra#production-infrastructure) |
+| **Cron Jobs:** | Disabled | [Encore Cloud Managed](/docs/primitives/cron-jobs) | [See production](/docs/deploy/infra#production-infrastructure) |
+| **Object Storage:** | Local Disk | Encore Cloud Managed | [See production](/docs/deploy/infra#production-infrastructure) |
+
+
+### Local Development
+
+For local development Encore Cloud provisions a combination of Docker and in-memory infrastructure components.
+SQL Databases are provisioned using [Docker](https://docker.com). For Pub/Sub
+and Caching the infrastructure is run in-memory.
+
+When running tests, a separate SQL Database cluster is provisioned that is optimized for high performance
+(using an in-memory filesystem and fsync disabled) at the expense of reduced reliability.
+
+To avoid surprises during development, Cron Jobs are not triggered in local environments.
+They can always be triggered manually by calling the API directly from the [development dashboard](/docs/ts/observability/dev-dash).
+
+The application code itself is compiled and run natively on your machine (without Docker).
+
+### Preview Environments
+
+When you've [connected your application to GitHub](/docs/platform/integrations/github), Encore Cloud automatically provisions a temporary [Preview Environment](/docs/platform/deploy/preview-environments) for each Pull Request.
+
+Preview Environments are created in Encore Cloud Hosting, and are optimized for provisioning speed and cost-effectiveness.
+The Preview Environment is automatically destroyed when the Pull Request is merged or closed.
+
+Preview Environments are named after the pull request, so PR #72 will create an environment named `pr:72`.
+
+### Encore Cloud Hosting
+
+Encore Cloud Hosting is a simple, zero-configuration hosting solution provided by Encore.
+It's perfect for development environments and small-scale use that do not require any specific SLAs.
+It's also a great way to evaluate Encore Cloud without needing to connect your cloud account.
+
+Encore Cloud Hosting is not designed for business-critical use and does not offer reliability guarantees for persistent storage
+like SQL Databases. Other infrastructure primitives like Pub/Sub and Caching
+are provisioned with small-scale use in mind.
+
+[Learn more about the usage limitations](/docs/platform/management/usage)
+
+## Production Infrastructure
+
+Encore Cloud provisions production infrastructure resources using best-practice guidelines and services for each respective cloud provider.
+
+| | GCP | AWS |
+| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- |
+| **Networking:** | [VPC](/docs/platform/infrastructure/gcp#networking-architecture) | [VPC](/docs/platform/infrastructure/aws#networking-architecture) |
+| **Compute:** | [Cloud Run](/docs/platform/infrastructure/gcp#google-cloud-run), [GKE](/docs/platform/infrastructure/gcp#google-kubernetes-engine) | [Fargate ECS](/docs/platform/infrastructure/aws#aws-fargate), [EKS](/docs/platform/infrastructure/aws#aws-eks) |
+| **SQL Databases:** | [GCP Cloud SQL](/docs/platform/infrastructure/gcp#databases), [Neon](/docs/platform/infrastructure/neon) | [Amazon RDS](/docs/platform/infrastructure/aws#databases), [Neon](/docs/platform/infrastructure/neon) |
+| **Pub/Sub:** | [GCP Pub/Sub](/docs/platform/infrastructure/gcp#pubsub) | [Amazon SQS & Amazon SNS](/docs/platform/infrastructure/aws#pubsub) |
+| **Object Storage:** | [GCS/Cloud CDN](/docs/platform/infrastructure/gcp#object-storage) | [Amazon S3/CloudFront](/docs/platform/infrastructure/aws#object-storage) |
+| **Caches:** | [GCP Memorystore (Redis)](/docs/platform/infrastructure/gcp#caching) | [Amazon ElastiCache (Redis)](/docs/platform/infrastructure/aws#caching) |
+| **Cron Jobs:** | Encore Cloud Managed | Encore Cloud Managed | Encore Cloud Managed |
+| **Secrets:** | [Secret Manager](/docs/platform/infrastructure/gcp#secrets-management) | [AWS Secrets Manager](/docs/platform/infrastructure/aws#se) |
+
+### Configuration
+
+With Encore you do not define any cloud service specifics in the application code. This means that after deploying, you can safely use your cloud provider's console to modify the provisioned resources, or use the built-in configuration UI in the Encore Cloud dashboard.
+
+Learn more in the [Infrastructure Configuration](/docs/platform/infrastructure/configuration) documentation.
\ No newline at end of file
diff --git a/docs/platform/infrastructure/kubernetes.md b/docs/platform/infrastructure/kubernetes.md
new file mode 100644
index 0000000000..db6c5f71d4
--- /dev/null
+++ b/docs/platform/infrastructure/kubernetes.md
@@ -0,0 +1,71 @@
+---
+seotitle: How to deploy your Encore application to a new Kubernetes cluster
+seodesc: Learn how to automatically deploy your Encore application to a new Kubernetes cluster.
+title: Kubernetes deployment
+subtitle: Deploying your app to a new Kubernetes cluster
+lang: platform
+---
+
+# Deploying Encore Apps to Kubernetes
+
+Encore Cloud gives you flexibility in where you run your applications. You have two options for Kubernetes deployments:
+
+1. **Deploy to a new cluster**: Encore Cloud can automatically provision and manage a new Kubernetes cluster in your cloud account on AWS or GCP.
+2. **Use an existing cluster**: Deploy to your pre-existing Kubernetes cluster ([see instructions here](/docs/platform/infrastructure/import-kubernetes-cluster))
+
+All infrastructure provisioning is automated, and configuration is managed through the [Encore Cloud Dashboard](https://app.encore.cloud), keeping your application code clean and infrastructure-agnostic.
+
+## Deploying to a new Kubernetes cluster
+
+**1. Connect your cloud account:** Ensure your cloud account (Google Cloud Platform or AWS) is connected to Encore Cloud. ([See docs](/docs/platform/deploy/own-cloud))
+
+**2. Create environment:** Open your app in the [Encore Cloud dashboard](https://app.encore.cloud) and go to **Environments**, then click on **Create Environment**.
+
+Next, select your cloud (AWS or GCP) and then specify Kubernetes as the compute platform. Encore Cloud supports deploying to GKE on GCP, and EKS Fargate on AWS.
+
+You can also configure if you want to allocate all services in one process or run one process per service.
+
+
+
+**3. Push your code:** To deploy, commit and push your code to the branch you configured as the deployment trigger. You can also trigger a manual deploy from the Cloud Dashboard by going to the **Environment Overview** page and clicking on **Deploy**.
+
+**4. Automatic deployment by Encore Cloud:** Once you've triggered the deploy, Encore Cloud will automatically provision and deploy the necessary infrastructure on Kubernetes, per your environment configuration in the Cloud Dashboard. You can monitor the status of your deploy and view your environment's details through the Encore Cloud Dashboard.
+
+**5. Accessing your cluster with kubectl:** You can access your cluster using the `kubectl` CLI tool. [See the docs](/docs/platform/infrastructure/configure-kubectl) for how to do this.
+
+## Infrastructure Overview
+
+Encore Cloud simplifies the process of deploying applications by automatically provisioning and managing the necessary Kubernetes components. Here's an overview of the components Encore Cloud manages and how they work together to support your applications.
+
+### Namespace Management
+
+Encore Cloud creates a unique namespace for each environment deployed to your Kubernetes cluster, ensuring complete isolation between different environments of your application.
+
+### Secrets Management
+
+Encore Cloud provides comprehensive secrets management through deep integration with Kubernetes Secrets. Application secrets that you configure in Encore Cloud are automatically stored as Kubernetes Secrets and made available to your services at runtime. This includes both application-specific secrets that you define, as well as infrastructure secrets like database credentials that Encore Cloud manages automatically.
+
+Service accounts are automatically bound to the appropriate secrets they need access to, ensuring each service can only access the secrets it requires. This follows the principle of least privilege and helps maintain a strong security posture.
+
+### Ingress Configuration
+
+Encore Cloud provisions and manages ingress for your applications through a cloud provider-specific ingress controller. The ingress controller is automatically configured to handle incoming traffic and route it securely to your application's Encore Gateway service. It manages TLS certificates automatically to ensure all traffic is encrypted, and provides fine-grained control over which services are accessible from the public internet. The controller configuration is optimized for your specific cloud provider to ensure the best possible performance and reliability.
+
+## Service Management
+
+### Deployments
+Encore Cloud manages the deployment configuration for each service in your application. Each service is deployed as a separate Kubernetes deployment, allowing for independent scaling and management. The deployment configurations are automatically generated and optimized based on your service's requirements.
+
+For each service, Encore Cloud configures the pod specifications with appropriate resource requests and limits, health checks, and container settings. Runtime configurations like environment variables and command arguments are automatically set based on your application's needs. The container orchestration is handled seamlessly, with Encore Cloud managing pod scheduling, updates, and scaling to ensure your services run reliably and efficiently.
+
+### Network Configuration
+
+Encore Cloud provides a comprehensive networking setup through Kubernetes Service resources. Each service in your application gets assigned a unique cluster IP address, enabling reliable internal communication between services. This IP allocation works in conjunction with Kubernetes' built-in service discovery mechanism, allowing services to locate and communicate with each other using consistent internal DNS names. The internal service routing ensures that requests are efficiently distributed across all available pods for each service, providing automatic load balancing and failover capabilities.
+
+### Identity and Access
+
+Encore Cloud provides comprehensive service identity management through Kubernetes service accounts. Each pod is assigned its own dedicated service account, which handles authentication with the Kubernetes API and enables secure access to resources. These service accounts are automatically bound to the specific secrets and permissions required by each service.
+
+For cloud provider integration, Encore Cloud maps the service accounts to appropriate IAM roles, enabling secure access to cloud resources like databases and object storage. Following the principle of least privilege, Encore Cloud configures the minimum required permissions for each service account, ensuring services can only access the resources they explicitly need.
+
+All these configurations are automatically maintained and updated by Encore Cloud as you develop your application, ensuring your infrastructure stays aligned with your application's needs.
\ No newline at end of file
diff --git a/docs/platform/infrastructure/manage-db-users.md b/docs/platform/infrastructure/manage-db-users.md
new file mode 100644
index 0000000000..34eb453bae
--- /dev/null
+++ b/docs/platform/infrastructure/manage-db-users.md
@@ -0,0 +1,25 @@
+---
+seotitle: Managing database user credentials
+seodesc: Learn how to manage user credentials for databases created by Encore.
+title: Managing database user credentials
+lang: platform
+---
+
+Encore Cloud provisions your databases automatically, meaning you don't need to manually create database users. However, in some use cases you need access to the database user credentials, so Encore Cloud makes it simple to view them.
+
+As an application **Admin**, open the [Encore Cloud dashboard](https://app.encore.cloud) and go to the **Infrastructure** page for the relevant environment.
+
+In the section for the relevant **Database Cluster**, you will find a **Users** sub-section which lists your database users. Click on the "eye" icon next to each username to decrypt the password.
+
+Note that databases hosted in [Encore Cloud](/docs/platform/infrastructure/infra#encore-cloud) currently do not expose usernames and passwords.
+To connect to an Encore Cloud-hosted database, use [`encore db shell`](/docs/ts/primitives/databases#connecting-to-databases).
+
+`encore db shell` defaults to read-only permissions. Use `--write`, `--admin` and `--superuser` flags to modify which permissions you connect with.
+
+
+
+
+
+Do not change or remove the database users created by Encore, as this will prevent Encore Cloud from maintaining and handling connections to the databases in your application.
+
+
diff --git a/docs/platform/infrastructure/neon.md b/docs/platform/infrastructure/neon.md
new file mode 100644
index 0000000000..81369da328
--- /dev/null
+++ b/docs/platform/infrastructure/neon.md
@@ -0,0 +1,151 @@
+---
+seotitle: Neon Postgres Database
+seodesc: Learn how to configure your environment to provision a Neon Postgres database.
+title: Use Neon Postgres
+lang: platform
+---
+
+[Neon](https://neon.tech/) is a serverless database provider that offers a fully managed and autoscalable
+Postgres database.
+
+You can configure Encore Cloud to provision a Neon Postgres database instead of the default offering for all supported cloud providers.
+
+## Connect your Neon account
+To start using Neon with Encore Cloud, you need to add your Neon API key to your Encore Cloud application. You can sign up for
+a Neon account at [neon.tech](https://neon.tech/). Once you have an account, you can find your API key in the
+[Neon Console](https://neon.tech/docs/manage/api-keys)
+
+Then, head over to the Neon settings page by going to the
+[Encore Cloud dashboard](https://app.encore.cloud) > (Select your app) > App Settings > Integrations > Neon.
+
+Click the "Connect Account" button, give it a name, and enter your API key.
+
+
+
+## Creating environments using Neon
+Neon organizes databases in projects. A project consist of a main branch and any number of feature branches.
+[Branches](https://neon.tech/docs/introduction/branching) in Neon are similar to branches in git, letting you to create a new branch for each feature or bug fix, to test your changes in isolation.
+
+When configuring your Encore Cloud environment to use Neon, you can choose which project and branch to use. To get started,
+head to the [Encore Cloud dashboard](https://app.encore.cloud) > (Select your app) > Environments > Create Environment. In the Database section, select
+`Neon database`.
+
+
+
+### Create a new Neon project and branch
+If you're starting off a blank slate, you can let Encore Cloud create a new Neon project and branch for you.
+Select `New Neon project` and choose a Neon account and region. We recommend picking a region close to your compute and
+that you use the suggested project and branch names, but you're free to choose any configuration you like.
+
+### Branch from an existing Encore Cloud environment
+If you already have an Encore Cloud environment with Neon, you can branch your database from that environment.
+Simply select `Branch from Encore environment` and choose the environment you want to branch from. This option will
+be disabled if you don't have any environments using Neon.
+
+### Branch from an existing Neon branch
+You can also choose to manually select a Neon branch to branch from. This is useful if you have an existing Neon project,
+but it's not currently being used by any Encore Cloud environments. Select `Branch from Neon project`,
+then choose the account, project and branch you want to use.
+
+### Import an existing Neon branch
+The final option is to import an existing Neon branch. This is useful if you have an existing database you want to use.
+Be wary that this option will not create a new branch but operate on the existing data. Select `Import Neon branch`,
+then choose the account, project and branch you want to use.
+
+**Note:** You may need to manually adjust the roles, commonly you need to change the database owner to the `db__admin` role to enable execution of migrations.
+See more in the [Roles](#roles) section below.
+
+## Edit your Neon environment
+Once the environment is created, you can edit the Neon settings by going to the [Encore Cloud dashboard](https://app.encore.cloud) > (Select your app) > Environments > (Select your environment) > Infrastructure.
+Here you can view and edit your Neon account resources. As a safety precaution, we've disabled editing of imported
+resources to prevent accidental changes to shared data.
+
+
+
+### Neon project
+The retention history specifies how long Neon will keep changes to your data. The default is 1 day, but depending on your
+Neon plan, you can increase this to up to 30 days.
+
+### Neon endpoint
+Each branch is assigned a unique endpoint which essentially is the serverless compute handling your database.
+You can edit the endpoint to set the CPU limits and the suspend timeout. The suspend timeout is the time Neon will wait
+before suspending the compute when it's not in use. The default is 5 minutes, but you can increase this to up to a week
+(depending on your Neon plan).
+
+## Use Neon for Preview Environments
+Neon is a great choice for [Preview Environments](/docs/platform/deploy/preview-environments) as it allows you to branch off a populated
+database and test your changes in isolation.
+
+To configure which branch to use for Preview Environments, head to the
+[Encore Cloud dashboard](https://app.encore.cloud) > (Select your app) > App Settings > Preview Environments
+and select the environment with the database you want to branch from. Hit save and you're all done.
+
+Keep in mind that you can only branch from environments that use Neon as the database provider; this is the default for Encore Cloud environments, but is a configurable option when creating AWS and GCP environments.
+
+
+
+## Roles
+
+Encore Cloud automatically implements a structured role hierarchy that ensures a secure, scalable, and efficient management of databases.
+Below is an explanation of how roles are created, utilized, and managed.
+
+### Role hierarchy
+
+#### 1. Initial Superuser Role
+- **Role Name:** `encore_platform`
+ - **Access level:** This role has full privileges and is the foundational user for setting up the role hierarchy.
+ - **Purpose:** The role creates and configures the subsequent roles and then steps back from day-to-day operations.
+
+#### 2. Global Roles
+Three core roles are created to define access levels across all databases:
+
+- `encore_reader`
+ - **Access level:** Provides read-only access.
+ - **Use Case:** Reading data without modifying it.
+- `encore_writer`
+ - **Access level:** Allows read and write access.
+ - **Use Case:** Performing data manipulations and inserts.
+- `encore_admin`
+ - **Access level:** Grants administrative privileges for global database operations.
+ - **Use Case:** Overseeing configurations, managing schemas, and handling elevated tasks.
+
+These global roles are used by Encore's CLI when using the `encore db shell` command.
+Learn more in the [CLI docs](/docs/ts/primitives/databases#using-the-encore-cli).
+
+#### 3. Database-Specific Roles
+For each database within the Neon integration, specific roles are created to provide fine-grained control:
+ - `db__reader`: Read-only access to the main database.
+ - `db__writer`: Read and write access to the main database.
+ - `db__admin`: Administrative privileges specific to the main database.
+
+#### 4. Service-Specific Roles
+For each service in your application, a dedicated role is generated in the format `svc_`. This role is granted the necessary `db__writer` role for each database the service accesses.
+
+This ensures that each service has the appropriate level of access to perform its operations while maintaining security and separation of concerns.
+
+**Example:** A service named `orders` that writes to the `main` database is assigned the `svc_orders` role, which is granted the `db_main_writer` role.
+
+### Role Setup Workflow
+
+- **1. Superuser Creation:** the `encore_platform` superuser role is created upon integration setup.
+- **2. Global Role Creation:** The `encore_reader`, `encore_writer`, and `encore_admin` roles are established to provide general access control.
+- **3. Database-Specific Roles:** For each database, roles are created in the format `db__` to manage access specific to that database.
+- **4. Service-Specific Roles:** For each service, roles are created in the format `svc_` and are granted the necessary writer roles for the databases used by each service.
+
+### Viewing credentials
+
+To view database credentials, open your app in the [Encore Cloud dashboard](https://app.encore.cloud), navigate to the **Infrastructure page** for the appropriate **Environment**, and locate the **USERS** section within the relevant **Database Cluster**.
+
+
+### Best Practices
+
+Encore Cloud automatically manages roles according to these security best practices:
+
+- **Role Ownership:** Ensures critical operations, such as migrations, are executed by roles with appropriate permissions (e.g., `db__admin`).
+- **Access Control:** Assigns the least privilege necessary for each task. Uses specific database roles (e.g., `db__reader`) to restrict access.
+- **Consistency:** Maintains consistent naming conventions (`db_