From a947cb7119fba441e68883bc550130387360459c Mon Sep 17 00:00:00 2001 From: Wojtek Majewski Date: Mon, 29 Sep 2025 18:02:04 +0200 Subject: [PATCH] docs: Add comprehensive guide for auto-compilation flow development and deployment strategies --- .changeset/auto-compile-http-control.md | 6 + COMPILE_WORKER.md | 331 ++++++++++++++++ PLAN.md | 81 ---- PRD_control-plane-http.md | 203 ++++++++++ package.json | 1 + pkgs/cli/.gitignore | 13 +- .../__tests__/commands/compile/index.test.ts | 328 ++++++++++++++++ .../install/create-edge-function.test.ts | 143 +++++++ pkgs/cli/__tests__/e2e/compile.test.ts | 165 ++++++++ pkgs/cli/__tests__/helpers/process.ts | 215 +++++++++++ pkgs/cli/eslint.config.cjs | 7 +- pkgs/cli/project.json | 33 +- pkgs/cli/scripts/sync-e2e-deps.sh | 81 ++++ pkgs/cli/scripts/test-compile | 109 ++++++ pkgs/cli/src/commands/compile/index.ts | 347 +++++++++-------- .../commands/install/create-edge-function.ts | 132 +++++++ pkgs/cli/src/commands/install/index.ts | 16 +- pkgs/cli/src/deno/internal_compile.js | 55 --- pkgs/cli/supabase/.gitignore | 8 + pkgs/cli/supabase/config.toml | 357 ++++++++++++++++++ pkgs/cli/supabase/config.toml.backup | 357 ++++++++++++++++++ pkgs/cli/supabase/functions/deno.json | 26 ++ pkgs/cli/supabase/functions/pgflow/flows.ts | 9 + pkgs/cli/supabase/functions/pgflow/index.ts | 4 + pkgs/cli/tsconfig.lib.json | 3 + pkgs/client/package.json | 1 - pkgs/core/package.json | 3 +- pkgs/edge-worker/package.json | 3 +- pkgs/edge-worker/src/control-plane/index.ts | 27 ++ pkgs/edge-worker/src/control-plane/server.ts | 150 ++++++++ pkgs/edge-worker/src/index.ts | 6 +- pkgs/edge-worker/supabase/functions/deno.json | 5 + .../supabase/functions/pgflow/index.ts | 19 + .../tests/e2e/control-plane.test.ts | 90 +++++ .../tests/unit/control-plane/server.test.ts | 123 ++++++ pnpm-lock.yaml | 12 +- 36 files changed, 3120 insertions(+), 349 deletions(-) create mode 100644 .changeset/auto-compile-http-control.md create mode 100644 COMPILE_WORKER.md delete mode 100644 PLAN.md create mode 100644 PRD_control-plane-http.md create mode 100644 pkgs/cli/__tests__/commands/compile/index.test.ts create mode 100644 pkgs/cli/__tests__/commands/install/create-edge-function.test.ts create mode 100644 pkgs/cli/__tests__/e2e/compile.test.ts create mode 100644 pkgs/cli/__tests__/helpers/process.ts create mode 100755 pkgs/cli/scripts/sync-e2e-deps.sh create mode 100755 pkgs/cli/scripts/test-compile create mode 100644 pkgs/cli/src/commands/install/create-edge-function.ts delete mode 100644 pkgs/cli/src/deno/internal_compile.js create mode 100644 pkgs/cli/supabase/.gitignore create mode 100644 pkgs/cli/supabase/config.toml create mode 100644 pkgs/cli/supabase/config.toml.backup create mode 100644 pkgs/cli/supabase/functions/deno.json create mode 100644 pkgs/cli/supabase/functions/pgflow/flows.ts create mode 100644 pkgs/cli/supabase/functions/pgflow/index.ts create mode 100644 pkgs/edge-worker/src/control-plane/index.ts create mode 100644 pkgs/edge-worker/src/control-plane/server.ts create mode 100644 pkgs/edge-worker/supabase/functions/pgflow/index.ts create mode 100644 pkgs/edge-worker/tests/e2e/control-plane.test.ts create mode 100644 pkgs/edge-worker/tests/unit/control-plane/server.test.ts diff --git a/.changeset/auto-compile-http-control.md b/.changeset/auto-compile-http-control.md new file mode 100644 index 000000000..788f598a0 --- /dev/null +++ b/.changeset/auto-compile-http-control.md @@ -0,0 +1,6 @@ +--- +'pgflow': minor +'@pgflow/edge-worker': minor +--- + +Add auto-compilation flow and HTTP control plane server for edge worker diff --git a/COMPILE_WORKER.md b/COMPILE_WORKER.md new file mode 100644 index 000000000..a014fee28 --- /dev/null +++ b/COMPILE_WORKER.md @@ -0,0 +1,331 @@ +# Auto-Compilation: Simplified Flow Development + +> **Implementation**: This feature is being built in two phases: +> +> - **Phase 1 (MVP)**: Core auto-compilation with conservative behavior +> - **Phase 2 (Enhancement)**: Smart updates that preserve data when possible + +--- + +## ๐Ÿš€ Local Development - No Manual Steps + +### 1. Start Edge Runtime + +```bash +supabase functions serve +``` + +### 2. Start Worker (Triggers Auto-Compilation) + +```bash +curl http://localhost:54321/functions/v1/my-worker +``` + +- Worker detects local environment ([see how](#environment-detection)) +- Auto-creates flow in database +- โœ… Ready to process tasks immediately + +### 3. Edit Flow Code + +Make changes to your flow definition file. + +### 4. Restart Worker (After Code Changes) + +```bash +# Kill `functions serve` (Ctrl+C), then restart +supabase functions serve +``` + +```bash +# Start worker with fresh code +curl http://localhost:54321/functions/v1/my-worker +``` + +- Worker auto-updates flow definition +- โœ… Ready to test immediately + +**What happens automatically:** + +- Worker detects local environment +- Compares flow code with database definition +- Updates database to match your code +- **Phase 1**: Always drops and recreates (fresh state guaranteed) +- **Phase 2**: Preserves test data when only runtime options change + +**No `pgflow compile` commands needed in development! ๐ŸŽ‰** + +--- + +## ๐Ÿ” Environment Detection + +Workers automatically detect whether they're running locally or in production. + +```typescript +// Check for Supabase-specific environment variables +const isLocal = !Boolean( + Deno.env.get('DENO_DEPLOYMENT_ID') || Deno.env.get('SB_REGION') +); +``` + +**How it works:** + +- These environment variables are automatically set by Supabase on hosted deployments +- When running `supabase functions serve` locally, these variables are absent +- Additional DB URL validation warns about unexpected configurations + +**Result:** + +- **Local**: Auto-compilation enabled - worker creates/updates flows automatically +- **Production**: Conservative mode - requires explicit migrations for existing flows + +--- + +## ๐Ÿญ Production Deployment + +### Phase 1: Conservative Approach + +**Behavior**: + +- **New flows**: Auto-created on first deployment โœ… +- **Existing flows**: Worker fails, requires migration โŒ + +#### Deploy New Flow + +```bash +# 1. Deploy worker code +supabase functions deploy my-worker + +# 2. First request auto-creates flow +curl https://your-project.supabase.co/functions/v1/my-worker +# โœ… Ready to handle requests +``` + +#### Update Existing Flow + +```bash +# 1. Generate migration +pgflow compile flows/my-flow.ts + +# 2. Deploy migration +supabase db push + +# 3. Deploy worker code +supabase functions deploy my-worker +# โœ… Worker verifies flow matches +``` + +**Phase 1 Benefits**: + +- โœ… Explicit control over production changes +- โœ… Clear audit trail (migrations) +- โœ… Fail-fast protection +- โœ… Simple, predictable behavior + +**Phase 1 Trade-off**: + +- โš ๏ธ Even option-only changes require migration + +--- + +### Phase 2: Smart Updates (Enhancement) + +**Additional Behavior**: + +- **Existing flows with matching structure**: Auto-updates runtime options โœ… +- **Existing flows with structure changes**: Still requires migration โŒ + +#### Update Runtime Options (No Migration Needed!) + +```bash +# 1. Change timeout/maxAttempts in code +# 2. Deploy worker +supabase functions deploy my-worker +# โœ… Options updated automatically (no migration!) +``` + +#### Update Flow Structure (Migration Required) + +```bash +# 1. Add new step or change dependencies +# 2. Generate migration +pgflow compile flows/my-flow.ts + +# 3. Deploy migration + worker +supabase db push +supabase functions deploy my-worker +``` + +**Phase 2 Benefits**: + +- โœ… Faster deploys for option changes +- โœ… Still safe (structure changes require migration) +- โœ… Backward compatible with Phase 1 + +**Phase 2 Addition: Strict Mode** _(Optional)_ + +```bash +# Require migrations even for new flows +PGFLOW_REQUIRE_MIGRATIONS=true +``` + +--- + +## โš™๏ธ Manual Compilation Command + +Generate migration files for explicit deployment control. + +### Basic Usage + +```bash +pgflow compile flows/my-flow.ts +``` + +- Infers worker: `my-flow-worker` (basename + "-worker") +- Checks staleness: compares file mtime with worker startup time +- Returns compiled SQL if worker is fresh + +### Custom Worker Name + +```bash +pgflow compile flows/my-flow.ts --worker custom-worker +``` + +- Use when worker doesn't follow naming convention +- Useful for horizontal scaling or specialized workers + +**Success output:** โœ… + +``` +โœ“ Compiled successfully: my_flow โ†’ SQL migration ready +โœ“ Created: supabase/migrations/20250108120000_create_my_flow.sql +``` + +**If worker needs restart:** โŒ + +``` +Error: Worker code changed since startup +Action: Restart worker and retry +``` + +--- + +## โš ๏ธ Edge Cases & Solutions + +### Multiple Worker Instances (Horizontal Scaling) โœ… + +```bash +# All instances handle the same flow +my-flow-worker-1, my-flow-worker-2, my-flow-worker-3 +``` + +- โœ… **Phase 1**: First instance creates, others fail gracefully and retry +- โœ… **Phase 2**: First instance creates, others detect and continue +- โœ… Advisory locks prevent race conditions + +### Stale Worker (Code Changes) โŒ + +**Problem:** Worker started before code changes. + +#### Solution: Restart Worker + +```bash +# Kill `functions serve` (Ctrl+C), then restart +supabase functions serve +``` + +```bash +# Start worker with fresh code +curl http://localhost:54321/functions/v1/my-worker +``` + +**Detection:** CLI compares file modification time with worker startup time. + +--- + +### Flow Definition Changes + +#### Local Development โœ… + +**Phase 1**: + +- โœ… Always drops and recreates +- โœ… Guaranteed fresh state +- โš ๏ธ Test data lost on every restart + +**Phase 2**: + +- โœ… Preserves test data when only options change +- โœ… Only drops when structure changes (new steps, changed dependencies) +- โœ… Better developer experience + +--- + +#### Production Deployment + +**Phase 1 - Any Change**: + +``` +Error: Flow 'my_flow' already exists +Action: Deploy migration first or use different slug +``` + +Must generate and deploy migration for any change. + +**Phase 2 - Structure Change**: + +``` +Error: Flow 'my_flow' structure mismatch +- Step 'process' dependencies changed: ['fetch'] โ†’ ['fetch', 'validate'] +- New step 'validate' added +Action: Deploy migration first (pgflow compile flows/my-flow.ts) +``` + +Structure changes still require migration (safe!). + +**Phase 2 - Option Change**: + +``` +โœ“ Runtime options updated for flow 'my_flow' +- Step 'process': timeout 30s โ†’ 60s +``` + +Option changes work automatically (convenient!). + +--- + +## ๐Ÿ“‹ Behavior Summary + +### What Gets Auto-Compiled + +| Change Type | Local (Phase 1) | Local (Phase 2) | Production (Phase 1) | Production (Phase 2) | +| -------------------- | ---------------- | ------------------ | -------------------- | -------------------- | +| **New flow** | โœ… Auto-create | โœ… Auto-create | โœ… Auto-create | โœ… Auto-create | +| **Runtime options** | โœ… Drop+recreate | โœ… **Update only** | โŒ Require migration | โœ… **Update only** | +| **Structure change** | โœ… Drop+recreate | โœ… Drop+recreate | โŒ Require migration | โŒ Require migration | + +**Key Insight**: Phase 2 adds smart updates that preserve data and allow option changes without migrations. + +--- + +## ๐ŸŽฏ When to Use Each Phase + +### Ship Phase 1 When: + +- โœ… You want auto-compilation ASAP +- โœ… You're okay with explicit migrations in production +- โœ… You don't mind losing local test data on restarts +- โœ… You want simple, predictable behavior + +### Upgrade to Phase 2 When: + +- โœ… Phase 1 is stable in production +- โœ… You want better local dev experience (preserve test data) +- โœ… You want faster production deploys (option changes without migrations) +- โœ… You've validated Phase 1 works for your workflows + +--- + +## ๐Ÿ”— See Also + +- **[PLAN_phase1.md](./PLAN_phase1.md)** - Detailed Phase 1 implementation plan +- **[PLAN_phase2.md](./PLAN_phase2.md)** - Detailed Phase 2 enhancement plan diff --git a/PLAN.md b/PLAN.md deleted file mode 100644 index 33f4eebaf..000000000 --- a/PLAN.md +++ /dev/null @@ -1,81 +0,0 @@ -# Plan: Complete pgmq 1.5.0+ Upgrade Documentation and Communication - -## Completed Tasks - -โœ… Core migration changes with compatibility check -โœ… Updated `set_vt_batch` to use RETURNS TABLE (future-proof) -โœ… Added optional `headers` field to TypeScript `PgmqMessageRecord` -โœ… Updated all test mock messages -โœ… Created changeset with breaking change warning -โœ… Manual testing verified migration fails gracefully on pgmq 1.4.4 - -## Remaining Tasks - -### 1. Create News Article - -**File:** `pkgs/website/src/content/docs/news/pgmq-1-5-0-upgrade.mdx` (or similar) - -Create a news article announcing: -- pgflow 0.8.0 requires pgmq 1.5.0+ -- Breaking change details -- Migration instructions -- Benefits of the upgrade (future-proofing against pgmq changes) - -### 2. Update Supabase CLI Version Requirements in Docs - -**Files to review and update:** -- `pkgs/website/src/content/docs/get-started/installation.mdx` -- Other getting started guides -- Any tutorial pages mentioning Supabase CLI version - -**Action:** Update minimum Supabase CLI version requirement to the version that includes pgmq 1.5.0+ - -### 3. Update READMEs - -**Files to review and update:** -- Root `README.md` -- `pkgs/core/README.md` -- `pkgs/edge-worker/README.md` -- `pkgs/cli/README.md` -- Any other package READMEs mentioning Supabase versions - -**Action:** Ensure all READMEs mention the pgmq 1.5.0+ requirement - -### 4. Improve Update pgflow Docs Page - -**File:** Look for existing update/upgrade documentation page - -**Actions:** -- Add section about breaking changes in 0.8.0 -- Document migration path from 0.7.x to 0.8.0 -- Include pgmq version check instructions -- Add troubleshooting section for migration failures - -### 5. Review All Docs Pages for Version References - -**Action:** Comprehensive audit of all documentation for: -- Outdated Supabase CLI version numbers -- Missing pgmq version requirements -- Installation/setup instructions that need updating -- Migration guides that need breaking change warnings - -**Files to check:** -- All files in `pkgs/website/src/content/docs/` -- All READMEs across packages -- Any deployment guides -- Troubleshooting pages - -## Testing Checklist - -After documentation updates: -- [ ] Build website locally and verify all pages render correctly -- [ ] Check all internal links still work -- [ ] Verify code examples are still accurate -- [ ] Review for consistency in version numbering - -## Notes - -- Keep documentation aligned with MVP philosophy (concise, clear, actionable) -- Follow Diataxis framework for documentation organization -- Use clear warnings for breaking changes -- Provide migration instructions, not just "upgrade" diff --git a/PRD_control-plane-http.md b/PRD_control-plane-http.md new file mode 100644 index 000000000..05f295348 --- /dev/null +++ b/PRD_control-plane-http.md @@ -0,0 +1,203 @@ +# PRD: ControlPlane HTTP Compilation (Phase 1) + +**Status**: Draft +**Owner**: TBD +**Last Updated**: 2025-11-20 + +--- + +## What We're Building + +**One-liner**: `pgflow compile` calls HTTP endpoint instead of spawning Deno runtime. + +Replace CLI's fragile Deno runtime spawning with HTTP calls to a ControlPlane edge function. This: +- Eliminates deno.json complexity and path resolution bugs +- Establishes pattern for Phase 2 auto-compilation +- Improves developer experience with reliable compilation + +**Alternatives considered**: Per-worker endpoints (code duplication), keep Deno spawning (too unreliable), direct SQL in CLI (wrong packaging model). + +--- + +## Before & After + +| Aspect | Old (v0.8.0) | New (v0.9.0) | +|--------|--------------|--------------| +| **Command** | `pgflow compile path/to/flow.ts --deno-json=deno.json` | `pgflow compile my-flow` | +| **How it works** | CLI spawns Deno โ†’ imports flow file โ†’ compiles to SQL | CLI calls HTTP โ†’ ControlPlane compiles โ†’ returns SQL | +| **Pain points** | Import map errors, path resolution, Deno version issues | Flow must be registered in ControlPlane first | +| **Setup** | Deno installed locally | Supabase + edge functions running | +| **Rollback** | N/A | `npx pgflow@0.8.0 compile path/to/flow.ts` | + +--- + +## Goals & Success Criteria + +**What success looks like:** +- โœ… ControlPlane pattern established (reusable for Phase 2) +- โœ… HTTP compilation works reliably (<5% users need version pinning) +- โœ… Developer setup simplified (no Deno version management) +- โœ… Clear error messages with rollback option +- โœ… `pgflow compile` uses HTTP (Deno spawn code deleted) +- โœ… `pgflow install` creates ControlPlane edge function +- โœ… Tests passing (80%+ coverage: unit, integration, E2E) +- โœ… Docs updated (installation, compilation, troubleshooting) +- โœ… Changelog complete + +**Metrics:** +- Zero HTTP compilation failures +- Positive feedback on reliability +- ControlPlane API ready for Phase 2 + +--- + +## Requirements + +### ControlPlane Edge Function +- Serve `GET /flows/:slug` โ†’ `{ flowSlug: string, sql: string[] }` +- Registry: `Map` built from flows array +- Validation: Reject duplicate slugs at startup +- Errors: 404 for unknown flows + +### CLI Changes +- Command: `pgflow compile ` (flow slug, not file path) +- HTTP call: `GET /pgflow/flows/:slug` +- URL: Parse from `supabase status` +- Deprecation: Show warning if `--deno-json` used +- Migration: Delete all Deno spawn code + +### Installation +`pgflow install` creates: +- `supabase/functions/pgflow/index.ts` - Calls `ControlPlane.serve(flows)` +- `supabase/functions/pgflow/flows.ts` - User edits, exports flow array +- `supabase/functions/pgflow/deno.json` - Minimal import map template +- Updates `supabase/config.toml` with edge function entry + +### Testing +- **Unit**: ControlPlane registry, CLI helpers, mocked HTTP +- **Integration**: Real HTTP server, endpoint responses +- **E2E**: Full flow (install โ†’ register flow โ†’ compile), error scenarios +- **Coverage**: 80% min for new code, 100% for critical paths + +### Out of Scope (Phase 2) +- โŒ Worker auto-compilation +- โŒ POST /ensure-compiled endpoint +- โŒ Shape comparison, advisory locks +- โŒ Import map auto-generation +- โŒ Flow auto-discovery + +--- + +## Error Handling + +All user-facing errors centralized here: + +| Error Scenario | CLI Output | Fix | +|----------------|------------|-----| +| **--deno-json flag used** | Warning: `--deno-json` is deprecated and has no effect (will be removed in v1.0) | Remove flag | +| **Flow not registered** | Flow 'my-flow' not found. Did you add it to flows.ts? | Add to `flows.ts` | +| **Old path syntax** | Flow 'path/to/flow.ts' not found. Did you add it to flows.ts? | Use slug instead of path | +| **ControlPlane unreachable** | ControlPlane not reachable.

Fix options:
1. Start Supabase: `supabase start`
2. Start edge functions: `supabase functions serve`
3. Use previous version: `npx pgflow@0.8.0` | Start services or rollback | +| **SERVICE_ROLE missing** (v1.1) | SERVICE_ROLE key not found. Is Supabase running? (`supabase status`) | Check Supabase status | + +--- + +## Documentation & Versioning + +### Docs to Update +1. **installation.mdx**: Add note "Creates ControlPlane function for flow compilation" +2. **compile-flow.mdx**: + - Remove Deno requirement (no longer user-facing) + - Add prerequisites: Supabase + edge functions running + - Update command examples (file path โ†’ slug) + - Keep immutability note + link to delete-flows.mdx + +### Versioning Strategy +- **Latest best practice**: Single-path documentation (v0.9.0+ only) +- **Escape hatch**: Version pinning (`npx pgflow@0.8.0`) for rollback +- **Optional (v1.1)**: Dedicated troubleshooting page if users request + +### Phase 2 Changes +Move `compile-flow.mdx` to `concepts/` (tutorial โ†’ explanation), remove from getting started. User story: "Compilation is automatic now" + +--- + +## Technical Design + +### Architecture +``` +pgflow compile my-flow + โ”‚ + โ””โ”€> HTTP GET /pgflow/flows/my-flow + โ”‚ + โ–ผ + ControlPlane Edge Function + โ”‚ + โ”œโ”€> flows.get('my-flow') + โ”œโ”€> compileFlow(flow) [reuses existing @pgflow/dsl] + โ””โ”€> { flowSlug: 'my-flow', sql: [...] } + โ”‚ + โ–ผ + CLI generates migration: ${timestamp}_create_${flowSlug}_flow.sql +``` + +### Key Decisions +- **Reuse `compileFlow()`**: No new SQL logic, ControlPlane wraps existing DSL function +- **User owns flows.ts**: Import flows, export array +- **pgflow owns index.ts**: Updated via `pgflow install` +- **Future-proof**: `--control-plane` flag for multi-instance pattern (v1.1) + +**See PLAN.md for**: API specs, code examples, detailed test plan, error patterns + +--- + +## Constraints & Risks + +### Dependencies +- Supabase CLI (any version with `supabase status`) +- Existing `compileFlow()` from @pgflow/dsl (no changes) +- nx monorepo structure + +### Primary Risk: Import Map Complexity +**Risk**: deno.json management becomes worse +**Mitigation**: Minimal template, link to Supabase docs, users manage dependencies +**Detection**: Early testing with real flows +**Escape hatch**: Multiple ControlPlanes (manual) + +### Constraints +- Zero breaking changes to output format +- 10-12 hours effort (implementation 4-5h, tests 6-7h) +- Ship v1.0 within 2 weeks + +--- + +## Release Plan + +### v1.0 (2 weeks) +- ControlPlane.serve() + `GET /flows/:slug` +- Replace `pgflow compile` with HTTP +- `pgflow install` creates edge function templates +- Tests (unit + integration + E2E) +- Docs updated +- Changelog + +### v1.1 (1-2 weeks after v1.0, based on feedback) +- Troubleshooting page (if requested) +- SERVICE_ROLE auth (if not in v1.0) +- `--control-plane` flag +- Better error messages + +--- + +## Appendix + +### Related Documents +- **PLAN.md**: Detailed implementation, API specs, test plan +- **PLAN_orchestration.md**: Phase 2+ auto-compilation vision + +### Changelog +- **2025-11-20**: Major simplification - removed duplication, centralized errors, streamlined structure +- **2025-11-20**: Clarified command signature (path โ†’ slug), deprecated --deno-json +- **2025-01-20**: Made troubleshooting page optional (v1.1) +- **2024-11-19**: Changed to `GET /flows/:slug` with `{ flowSlug, sql }` response +- **2024-11-19**: Initial PRD diff --git a/package.json b/package.json index e89b0babf..57c5e2e79 100644 --- a/package.json +++ b/package.json @@ -43,6 +43,7 @@ "netlify-cli": "^22.1.3", "nx": "21.2.1", "prettier": "^2.6.2", + "supabase": "^2.34.3", "tslib": "^2.3.0", "typescript": "5.8.3", "typescript-eslint": "8.34.1", diff --git a/pkgs/cli/.gitignore b/pkgs/cli/.gitignore index 15193f184..59ac17beb 100644 --- a/pkgs/cli/.gitignore +++ b/pkgs/cli/.gitignore @@ -1,2 +1,13 @@ -supabase/ +# Git ignore vendor directory but keep Supabase structure +supabase/functions/_vendor/ + +# Git ignore migrations (copied from core during sync-e2e-deps) +supabase/migrations/* + +# Git ignore generated files +supabase/.temp/ +supabase/.branches/ + +# Keep everything else committed + deno.lock diff --git a/pkgs/cli/__tests__/commands/compile/index.test.ts b/pkgs/cli/__tests__/commands/compile/index.test.ts new file mode 100644 index 000000000..f74653c3c --- /dev/null +++ b/pkgs/cli/__tests__/commands/compile/index.test.ts @@ -0,0 +1,328 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { EventEmitter } from 'events'; +import type { ChildProcess } from 'child_process'; +import { getSupabaseConfig, fetchFlowSQL } from '../../../src/commands/compile'; + +describe('getSupabaseConfig', () => { + let mockProcess: EventEmitter & { + stdout: EventEmitter; + stderr: EventEmitter; + }; + let mockSpawn: ReturnType; + + beforeEach(() => { + // Create fresh mock child process for each test + mockProcess = Object.assign(new EventEmitter(), { + stdout: new EventEmitter(), + stderr: new EventEmitter(), + }); + + // Create mock spawn function that returns our mock process + mockSpawn = vi.fn().mockReturnValue(mockProcess as unknown as ChildProcess); + }); + + it('should parse valid supabase status JSON', async () => { + const validStatus = JSON.stringify({ + API_URL: 'http://127.0.0.1:54321', + ANON_KEY: 'test-anon-key', + SERVICE_ROLE_KEY: 'test-service-key', + }); + + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + // Simulate successful command execution + mockProcess.stdout.emit('data', Buffer.from(validStatus)); + mockProcess.emit('close', 0); + + const result = await promise; + + expect(result).toEqual({ + apiUrl: 'http://127.0.0.1:54321', + anonKey: 'test-anon-key', + }); + + // Verify spawn was called correctly + expect(mockSpawn).toHaveBeenCalledWith('supabase', ['status', '--output=json'], { + cwd: '/test/supabase', + }); + }); + + it('should error when API_URL is missing', async () => { + const invalidStatus = JSON.stringify({ + ANON_KEY: 'test-anon-key', + }); + + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.stdout.emit('data', Buffer.from(invalidStatus)); + mockProcess.emit('close', 0); + + await expect(promise).rejects.toThrow('Could not find API_URL'); + await expect(promise).rejects.toThrow('Make sure Supabase is running'); + }); + + it('should error when ANON_KEY is missing', async () => { + const invalidStatus = JSON.stringify({ + API_URL: 'http://127.0.0.1:54321', + }); + + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.stdout.emit('data', Buffer.from(invalidStatus)); + mockProcess.emit('close', 0); + + await expect(promise).rejects.toThrow('Could not find ANON_KEY'); + await expect(promise).rejects.toThrow('Make sure Supabase is running'); + }); + + it('should handle JSON parse errors', async () => { + const invalidJson = 'not valid json{{{'; + + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.stdout.emit('data', Buffer.from(invalidJson)); + mockProcess.emit('close', 0); + + await expect(promise).rejects.toThrow('Failed to parse supabase status JSON'); + }); + + it('should handle supabase command failure', async () => { + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.stderr.emit('data', Buffer.from('Error: Supabase not running')); + mockProcess.emit('close', 1); + + await expect(promise).rejects.toThrow('Failed to get Supabase status'); + await expect(promise).rejects.toThrow('exit code 1'); + }); + + it('should handle spawn errors', async () => { + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.emit('error', new Error('Command not found')); + + await expect(promise).rejects.toThrow('Failed to run supabase status'); + await expect(promise).rejects.toThrow('Command not found'); + await expect(promise).rejects.toThrow('Make sure Supabase CLI is installed'); + }); + + it('should handle multi-chunk stdout data', async () => { + const chunk1 = '{"API_URL": "http://127'; + const chunk2 = '.0.0.1:54321", "ANON_KEY": '; + const chunk3 = '"test-anon-key"}'; + + const promise = getSupabaseConfig('/test/supabase', mockSpawn); + + mockProcess.stdout.emit('data', Buffer.from(chunk1)); + mockProcess.stdout.emit('data', Buffer.from(chunk2)); + mockProcess.stdout.emit('data', Buffer.from(chunk3)); + mockProcess.emit('close', 0); + + const result = await promise; + + expect(result).toEqual({ + apiUrl: 'http://127.0.0.1:54321', + anonKey: 'test-anon-key', + }); + }); +}); + +describe('fetchFlowSQL', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should fetch flow SQL successfully', async () => { + const mockResponse = { + ok: true, + status: 200, + json: async () => ({ + flowSlug: 'test_flow', + sql: [ + "SELECT pgflow.create_flow('test_flow');", + "SELECT pgflow.add_step('test_flow', 'step1');", + ], + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + const result = await fetchFlowSQL( + 'test_flow', + 'http://127.0.0.1:54321', + 'test-anon-key' + ); + + expect(result).toEqual({ + flowSlug: 'test_flow', + sql: [ + "SELECT pgflow.create_flow('test_flow');", + "SELECT pgflow.add_step('test_flow', 'step1');", + ], + }); + + expect(global.fetch).toHaveBeenCalledWith( + 'http://127.0.0.1:54321/functions/v1/pgflow/flows/test_flow', + { + headers: { + Authorization: 'Bearer test-anon-key', + 'Content-Type': 'application/json', + }, + } + ); + }); + + it('should handle 404 with helpful error message', async () => { + const mockResponse = { + ok: false, + status: 404, + json: async () => ({ + error: 'Flow Not Found', + message: "Flow 'unknown_flow' not found. Did you add it to flows.ts?", + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + await expect( + fetchFlowSQL('unknown_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow("Flow 'unknown_flow' not found"); + await expect( + fetchFlowSQL('unknown_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('Add your flow to supabase/functions/pgflow/flows.ts'); + }); + + it('should handle ECONNREFUSED with startup instructions', async () => { + global.fetch = vi + .fn() + .mockRejectedValue(new Error('fetch failed: ECONNREFUSED')); + + await expect( + fetchFlowSQL('test_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('Could not connect to ControlPlane'); + await expect( + fetchFlowSQL('test_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('Start Supabase: supabase start'); + await expect( + fetchFlowSQL('test_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('npx pgflow@0.8.0'); + }); + + it('should handle generic HTTP errors', async () => { + const mockResponse = { + ok: false, + status: 500, + text: async () => 'Internal Server Error', + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + await expect( + fetchFlowSQL('test_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('HTTP 500: Internal Server Error'); + }); + + it('should handle fetch timeout errors', async () => { + global.fetch = vi.fn().mockRejectedValue(new Error('fetch failed')); + + await expect( + fetchFlowSQL('test_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('Could not connect to ControlPlane'); + }); + + it('should validate response format', async () => { + const mockResponse = { + ok: true, + status: 200, + json: async () => ({ + flowSlug: 'test_flow', + sql: [ + "SELECT pgflow.create_flow('test_flow');", + "SELECT pgflow.add_step('test_flow', 'step1');", + ], + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + const result = await fetchFlowSQL( + 'test_flow', + 'http://127.0.0.1:54321', + 'test-anon-key' + ); + + expect(result).toHaveProperty('flowSlug'); + expect(result).toHaveProperty('sql'); + expect(Array.isArray(result.sql)).toBe(true); + expect(result.sql.length).toBeGreaterThan(0); + }); + + it('should handle empty error messages in 404 response', async () => { + const mockResponse = { + ok: false, + status: 404, + json: async () => ({ + error: 'Flow Not Found', + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + await expect( + fetchFlowSQL('unknown_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow("Flow 'unknown_flow' not found"); + await expect( + fetchFlowSQL('unknown_flow', 'http://127.0.0.1:54321', 'test-anon-key') + ).rejects.toThrow('Did you add it to flows.ts'); + }); + + it('should construct correct URL with flow slug', async () => { + const mockResponse = { + ok: true, + status: 200, + json: async () => ({ + flowSlug: 'my_complex_flow_123', + sql: ["SELECT pgflow.create_flow('my_complex_flow_123');"], + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + await fetchFlowSQL( + 'my_complex_flow_123', + 'http://127.0.0.1:54321', + 'test-anon-key' + ); + + expect(global.fetch).toHaveBeenCalledWith( + 'http://127.0.0.1:54321/functions/v1/pgflow/flows/my_complex_flow_123', + expect.any(Object) + ); + }); + + it('should pass anon key in Authorization header', async () => { + const mockResponse = { + ok: true, + status: 200, + json: async () => ({ + flowSlug: 'test_flow', + sql: ["SELECT pgflow.create_flow('test_flow');"], + }), + }; + + global.fetch = vi.fn().mockResolvedValue(mockResponse); + + await fetchFlowSQL( + 'test_flow', + 'http://127.0.0.1:54321', + 'my-special-anon-key' + ); + + expect(global.fetch).toHaveBeenCalledWith(expect.any(String), { + headers: { + Authorization: 'Bearer my-special-anon-key', + 'Content-Type': 'application/json', + }, + }); + }); +}); diff --git a/pkgs/cli/__tests__/commands/install/create-edge-function.test.ts b/pkgs/cli/__tests__/commands/install/create-edge-function.test.ts new file mode 100644 index 000000000..a89efefee --- /dev/null +++ b/pkgs/cli/__tests__/commands/install/create-edge-function.test.ts @@ -0,0 +1,143 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { createEdgeFunction } from '../../../src/commands/install/create-edge-function'; + +describe('createEdgeFunction', () => { + let tempDir: string; + let supabasePath: string; + let pgflowFunctionDir: string; + + beforeEach(() => { + // Create a temporary directory for testing + tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pgflow-test-')); + supabasePath = path.join(tempDir, 'supabase'); + pgflowFunctionDir = path.join(supabasePath, 'functions', 'pgflow'); + }); + + afterEach(() => { + // Clean up the temporary directory + fs.rmSync(tempDir, { recursive: true, force: true }); + }); + + it('should create all three files when none exist', async () => { + const result = await createEdgeFunction({ + supabasePath, + autoConfirm: true, + }); + + // Should return true because files were created + expect(result).toBe(true); + + // Verify directory was created + expect(fs.existsSync(pgflowFunctionDir)).toBe(true); + + // Verify all files exist + const indexPath = path.join(pgflowFunctionDir, 'index.ts'); + const flowsPath = path.join(pgflowFunctionDir, 'flows.ts'); + const denoJsonPath = path.join(pgflowFunctionDir, 'deno.json'); + + expect(fs.existsSync(indexPath)).toBe(true); + expect(fs.existsSync(flowsPath)).toBe(true); + expect(fs.existsSync(denoJsonPath)).toBe(true); + + // Verify index.ts content + const indexContent = fs.readFileSync(indexPath, 'utf8'); + expect(indexContent).toContain("import { ControlPlane } from '@pgflow/edge-worker'"); + expect(indexContent).toContain("import { flows } from './flows.ts'"); + expect(indexContent).toContain('ControlPlane.serve(flows)'); + + // Verify flows.ts content + const flowsContent = fs.readFileSync(flowsPath, 'utf8'); + expect(flowsContent).toContain('export const flows = ['); + expect(flowsContent).toContain('// Import your flows here'); + + // Verify deno.json content + const denoJsonContent = fs.readFileSync(denoJsonPath, 'utf8'); + expect(denoJsonContent).toContain('"imports"'); + expect(denoJsonContent).toContain('@pgflow/edge-worker'); + expect(denoJsonContent).toContain('@pgflow/dsl'); + }); + + it('should not create files when they already exist', async () => { + // Pre-create the directory and files + fs.mkdirSync(pgflowFunctionDir, { recursive: true }); + + const indexPath = path.join(pgflowFunctionDir, 'index.ts'); + const flowsPath = path.join(pgflowFunctionDir, 'flows.ts'); + const denoJsonPath = path.join(pgflowFunctionDir, 'deno.json'); + + fs.writeFileSync(indexPath, '// existing content'); + fs.writeFileSync(flowsPath, '// existing content'); + fs.writeFileSync(denoJsonPath, '// existing content'); + + const result = await createEdgeFunction({ + supabasePath, + autoConfirm: true, + }); + + // Should return false because no changes were needed + expect(result).toBe(false); + + // Verify files still exist with original content + expect(fs.readFileSync(indexPath, 'utf8')).toBe('// existing content'); + expect(fs.readFileSync(flowsPath, 'utf8')).toBe('// existing content'); + expect(fs.readFileSync(denoJsonPath, 'utf8')).toBe('// existing content'); + }); + + it('should create only missing files when some already exist', async () => { + // Pre-create the directory and one file + fs.mkdirSync(pgflowFunctionDir, { recursive: true }); + + const indexPath = path.join(pgflowFunctionDir, 'index.ts'); + const flowsPath = path.join(pgflowFunctionDir, 'flows.ts'); + const denoJsonPath = path.join(pgflowFunctionDir, 'deno.json'); + + // Only create index.ts + fs.writeFileSync(indexPath, '// existing content'); + + const result = await createEdgeFunction({ + supabasePath, + autoConfirm: true, + }); + + // Should return true because some files were created + expect(result).toBe(true); + + // Verify index.ts was not modified + expect(fs.readFileSync(indexPath, 'utf8')).toBe('// existing content'); + + // Verify flows.ts and deno.json were created + expect(fs.existsSync(flowsPath)).toBe(true); + expect(fs.existsSync(denoJsonPath)).toBe(true); + + const flowsContent = fs.readFileSync(flowsPath, 'utf8'); + expect(flowsContent).toContain('export const flows = ['); + + const denoJsonContent = fs.readFileSync(denoJsonPath, 'utf8'); + expect(denoJsonContent).toContain('"imports"'); + }); + + it('should create parent directories if they do not exist', async () => { + // Don't create anything - let the function create it all + expect(fs.existsSync(supabasePath)).toBe(false); + + const result = await createEdgeFunction({ + supabasePath, + autoConfirm: true, + }); + + expect(result).toBe(true); + + // Verify all parent directories were created + expect(fs.existsSync(supabasePath)).toBe(true); + expect(fs.existsSync(path.join(supabasePath, 'functions'))).toBe(true); + expect(fs.existsSync(pgflowFunctionDir)).toBe(true); + + // Verify files exist + expect(fs.existsSync(path.join(pgflowFunctionDir, 'index.ts'))).toBe(true); + expect(fs.existsSync(path.join(pgflowFunctionDir, 'flows.ts'))).toBe(true); + expect(fs.existsSync(path.join(pgflowFunctionDir, 'deno.json'))).toBe(true); + }); +}); diff --git a/pkgs/cli/__tests__/e2e/compile.test.ts b/pkgs/cli/__tests__/e2e/compile.test.ts new file mode 100644 index 000000000..52b240a3c --- /dev/null +++ b/pkgs/cli/__tests__/e2e/compile.test.ts @@ -0,0 +1,165 @@ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import { runCommand } from '../helpers/process'; +import fs from 'fs'; +import path from 'path'; + +/** + * Helper to ensure Supabase is running in CLI package directory + * Checks if Supabase is running and starts it if needed + */ +async function ensureSupabaseRunning(cliDir: string): Promise { + console.log('๐Ÿ” Checking if Supabase is running...'); + + // Check if Supabase is already running + try { + const statusResult = await runCommand('pnpm', ['-C', cliDir, 'exec', 'supabase', 'status'], { + cwd: cliDir, + }); + + if (statusResult.code === 0) { + console.log('โœ“ Supabase is already running'); + return; + } + } catch (error) { + // Status check failed, need to start + } + + // Start Supabase + console.log('๐Ÿš€ Starting Supabase...'); + const startResult = await runCommand('pnpm', ['-C', cliDir, 'exec', 'supabase', 'start'], { + cwd: cliDir, + debug: true, + }); + + if (startResult.code !== 0) { + console.error('Start stdout:', startResult.stdout); + console.error('Start stderr:', startResult.stderr); + throw new Error(`Supabase start failed with exit code ${startResult.code}`); + } + + console.log('โœ“ Supabase started successfully'); +} + +/** + * Helper to ensure Edge Functions are ready to accept requests + * Retries HTTP requests until the function responds + */ +async function ensureFunctionReady(baseUrl: string, flowSlug: string): Promise { + console.log('โณ Ensuring Edge Functions are ready...'); + + const maxRetries = 30; + const retryDelayMs = 1000; + + for (let i = 0; i < maxRetries; i++) { + try { + // Try to hit the /flows/:slug endpoint to check if function is running + const response = await fetch(`${baseUrl}/flows/${flowSlug}`); + + // Any response (even 404 or 500) means the function is running + if (response.status > 0) { + console.log('โœ“ Edge Functions are ready'); + return; + } + } catch (error) { + if (i === maxRetries - 1) { + throw new Error(`Edge Functions not ready after ${maxRetries} retries: ${error}`); + } + console.log(`Retry ${i + 1}/${maxRetries}: Edge Functions not ready yet, waiting...`); + await new Promise(resolve => setTimeout(resolve, retryDelayMs)); + } + } +} + +/** + * Helper to get the Supabase Edge Functions base URL from status output + */ +async function getEdgeFunctionUrl(cliDir: string): Promise { + const statusResult = await runCommand('pnpm', ['-C', cliDir, 'exec', 'supabase', 'status'], { + cwd: cliDir, + }); + + // Parse the status output to find the API URL + // Example line: "API URL: http://127.0.0.1:54321" + const apiUrlMatch = statusResult.stdout.match(/API URL:\s*(https?:\/\/[^\s]+)/); + if (!apiUrlMatch) { + throw new Error('Could not find API URL in Supabase status output'); + } + + const apiUrl = apiUrlMatch[1]; + return `${apiUrl}/functions/v1/pgflow`; +} + +describe('pgflow compile (e2e)', () => { + const cliDir = process.cwd(); + const workspaceRoot = path.resolve(cliDir, '..', '..'); + const supabasePath = path.join(cliDir, 'supabase'); + const flowSlug = 'test_flow_e2e'; + + beforeAll(async () => { + // Ensure Supabase is running + await ensureSupabaseRunning(cliDir); + + // Get Edge Function URL + const baseUrl = await getEdgeFunctionUrl(cliDir); + + // Ensure Edge Functions are ready + await ensureFunctionReady(baseUrl, flowSlug); + }, 120000); // 2 minute timeout for setup + + afterAll(async () => { + // Clean up any test migration files + const migrationsDir = path.join(supabasePath, 'migrations'); + if (fs.existsSync(migrationsDir)) { + const testMigrations = fs + .readdirSync(migrationsDir) + .filter(f => f.includes(flowSlug) && f.endsWith('.sql')); + + for (const file of testMigrations) { + const filePath = path.join(migrationsDir, file); + fs.unlinkSync(filePath); + console.log(`๐Ÿ—‘๏ธ Cleaned up test migration: ${file}`); + } + } + }); + + it('should compile flow and create migration', async () => { + // Run pgflow compile command + console.log(`โš™๏ธ Compiling flow '${flowSlug}' via ControlPlane`); + const compileResult = await runCommand( + 'node', + [path.join(cliDir, 'dist', 'index.js'), 'compile', flowSlug, '--supabase-path', supabasePath], + { + cwd: cliDir, + env: { PATH: `${workspaceRoot}/node_modules/.bin:${process.env.PATH}` }, + debug: true, + } + ); + + // Check if compilation was successful + if (compileResult.code !== 0) { + console.error('Compile stdout:', compileResult.stdout); + console.error('Compile stderr:', compileResult.stderr); + throw new Error(`Compilation failed with exit code ${compileResult.code}`); + } + + console.log('โœ“ Flow compiled successfully'); + + // Verify migration was created + console.log('โœ… Verifying migration file'); + const migrationsDir = path.join(supabasePath, 'migrations'); + const migrationFiles = fs.readdirSync(migrationsDir).filter(f => f.includes(flowSlug) && f.endsWith('.sql')); + + expect(migrationFiles.length).toBe(1); + console.log(`โœ“ Found migration: ${migrationFiles[0]}`); + + // Verify migration contains expected SQL + const migrationPath = path.join(migrationsDir, migrationFiles[0]); + const migrationContent = fs.readFileSync(migrationPath, 'utf-8'); + + expect(migrationContent).toContain(`pgflow.create_flow('${flowSlug}'`); + expect(migrationContent).toContain(`pgflow.add_step('${flowSlug}', 'step1'`); + console.log('โœ“ Migration content is correct'); + + console.log('โœจ Compile test complete'); + }, 60000); // 1 minute timeout for the test +}); diff --git a/pkgs/cli/__tests__/helpers/process.ts b/pkgs/cli/__tests__/helpers/process.ts new file mode 100644 index 000000000..bf976f6a5 --- /dev/null +++ b/pkgs/cli/__tests__/helpers/process.ts @@ -0,0 +1,215 @@ +import { spawn, ChildProcess } from 'child_process'; + +export interface ProcessOptions { + command: string; + args: string[]; + cwd?: string; + readyPattern?: RegExp; + timeout?: number; + env?: Record; + debug?: boolean; +} + +/** + * Runs a callback function while a process is running in the background. + * Waits for the process to be ready (if readyPattern is provided) before executing the callback. + * Automatically cleans up the process when done or on error. + */ +export async function withRunningProcess( + options: ProcessOptions, + callback: () => Promise +): Promise { + const { + command, + args, + cwd, + readyPattern, + timeout = 60000, + env, + debug = false, + } = options; + + let child: ChildProcess | null = null; + let processReady = false; + let output = ''; + let errorOutput = ''; + + return new Promise((resolve, reject) => { + // Spawn the child process + child = spawn(command, args, { + cwd, + env: env ? { ...process.env, ...env } : process.env, + stdio: ['pipe', 'pipe', 'pipe'], + }); + + if (debug) { + console.log(`[process] Starting: ${command} ${args.join(' ')}`); + } + + // Handle process errors + child.on('error', (err) => { + reject( + new Error( + `Failed to start process '${command}': ${err.message}\n` + + `Working directory: ${cwd || process.cwd()}` + ) + ); + }); + + // Handle unexpected process exit + child.on('exit', (code, signal) => { + if (!processReady) { + reject( + new Error( + `Process '${command}' exited unexpectedly with code ${code} and signal ${signal}\n` + + `stdout: ${output}\n` + + `stderr: ${errorOutput}` + ) + ); + } + }); + + // Set up timeout if we're waiting for readiness + let readinessTimeout: NodeJS.Timeout | null = null; + if (readyPattern) { + readinessTimeout = setTimeout(() => { + if (child) { + child.kill('SIGTERM'); + } + reject( + new Error( + `Process '${command}' failed to become ready within ${timeout}ms\n` + + `Looking for pattern: ${readyPattern}\n` + + `stdout: ${output}\n` + + `stderr: ${errorOutput}` + ) + ); + }, timeout); + } + + // Capture stdout + child.stdout?.on('data', (data) => { + const chunk = data.toString(); + output += chunk; + + if (debug) { + process.stdout.write(`[${command}:stdout] ${chunk}`); + } + + // Check for readiness + if (!processReady && readyPattern && readyPattern.test(output)) { + processReady = true; + if (readinessTimeout) { + clearTimeout(readinessTimeout); + } + + if (debug) { + console.log(`[process] Ready: ${command}`); + } + + // Execute callback now that process is ready + executeCallback(); + } + }); + + // Capture stderr + child.stderr?.on('data', (data) => { + const chunk = data.toString(); + errorOutput += chunk; + + if (debug) { + process.stderr.write(`[${command}:stderr] ${chunk}`); + } + }); + + // If no readyPattern, execute callback immediately + if (!readyPattern) { + processReady = true; + executeCallback(); + } + + async function executeCallback() { + try { + const result = await callback(); + resolve(result); + } catch (error) { + reject(error); + } finally { + // Clean up the process + if (child) { + if (debug) { + console.log(`[process] Stopping: ${command}`); + } + child.kill('SIGTERM'); + + // Give process time to clean up gracefully + setTimeout(() => { + if (child && !child.killed) { + if (debug) { + console.log(`[process] Force killing: ${command}`); + } + child.kill('SIGKILL'); + } + }, 2000); + } + } + } + }); +} + +/** + * Helper to run a command and wait for it to complete + */ +export async function runCommand( + command: string, + args: string[], + options: { cwd?: string; env?: Record; debug?: boolean } = {} +): Promise<{ stdout: string; stderr: string; code: number | null }> { + return new Promise((resolve, reject) => { + const { cwd, env, debug = false } = options; + + if (debug) { + console.log(`[command] Running: ${command} ${args.join(' ')}`); + } + + const child = spawn(command, args, { + cwd, + env: env ? { ...process.env, ...env } : process.env, + }); + + let stdout = ''; + let stderr = ''; + + child.stdout?.on('data', (data) => { + const chunk = data.toString(); + stdout += chunk; + if (debug) { + process.stdout.write(`[${command}:stdout] ${chunk}`); + } + }); + + child.stderr?.on('data', (data) => { + const chunk = data.toString(); + stderr += chunk; + if (debug) { + process.stderr.write(`[${command}:stderr] ${chunk}`); + } + }); + + child.on('error', (err) => { + reject( + new Error( + `Failed to run command '${command}': ${err.message}\n` + + `Working directory: ${cwd || process.cwd()}` + ) + ); + }); + + child.on('close', (code) => { + if (debug) { + console.log(`[command] Exited with code: ${code}`); + } + resolve({ stdout, stderr, code }); + }); + }); +} \ No newline at end of file diff --git a/pkgs/cli/eslint.config.cjs b/pkgs/cli/eslint.config.cjs index 87adc5dee..ea7528c3c 100644 --- a/pkgs/cli/eslint.config.cjs +++ b/pkgs/cli/eslint.config.cjs @@ -1,3 +1,8 @@ const baseConfig = require('../../eslint.config.cjs'); -module.exports = [...baseConfig]; +module.exports = [ + ...baseConfig, + { + ignores: ['supabase/functions/_vendor/**'], + }, +]; diff --git a/pkgs/cli/project.json b/pkgs/cli/project.json index 7842b9d4e..c2d2e3bdd 100644 --- a/pkgs/cli/project.json +++ b/pkgs/cli/project.json @@ -14,14 +14,7 @@ "main": "{projectRoot}/src/index.ts", "tsConfig": "{projectRoot}/tsconfig.lib.json", "outputPath": "{projectRoot}/dist", - "rootDir": "{projectRoot}/src", - "assets": [ - { - "input": "{projectRoot}/src/deno", - "glob": "**/*", - "output": "deno" - } - ] + "rootDir": "{projectRoot}/src" } }, "serve": { @@ -69,22 +62,28 @@ "parallel": false } }, - "test:e2e:compile": { + "sync-e2e-deps": { "executor": "nx:run-commands", + "dependsOn": ["^build"], "local": true, - "dependsOn": ["test:e2e:install", "build"], - "inputs": ["default", "^production"], + "inputs": ["^production"], + "outputs": ["{projectRoot}/supabase/functions/_vendor"], "options": { - "commands": [ - "rm -rf supabase/", - "npx -y supabase@latest init --force --with-vscode-settings --with-intellij-settings", - "node dist/index.js compile examples/analyze_website.ts --deno-json examples/deno.json --supabase-path supabase", - "./scripts/assert-flow-compiled" - ], "cwd": "{projectRoot}", + "commands": ["./scripts/sync-e2e-deps.sh"], "parallel": false } }, + "test:e2e:compile": { + "executor": "nx:run-commands", + "local": true, + "dependsOn": ["build", "sync-e2e-deps"], + "inputs": ["default", "^production"], + "options": { + "command": "vitest run __tests__/e2e/compile.test.ts", + "cwd": "{projectRoot}" + } + }, "test:e2e:async-hang-issue-123": { "executor": "nx:run-commands", "local": true, diff --git a/pkgs/cli/scripts/sync-e2e-deps.sh b/pkgs/cli/scripts/sync-e2e-deps.sh new file mode 100755 index 000000000..3be013ab0 --- /dev/null +++ b/pkgs/cli/scripts/sync-e2e-deps.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CLI_DIR="$(dirname "$SCRIPT_DIR")" +MONOREPO_ROOT="$(cd "$CLI_DIR/../.." && pwd)" +VENDOR_DIR="$CLI_DIR/supabase/functions/_vendor" + +echo "๐Ÿ”„ Syncing edge function dependencies for CLI e2e tests..." + +# Clean and create vendor directory +rm -rf "$VENDOR_DIR" +mkdir -p "$VENDOR_DIR/@pgflow" + +# Verify builds succeeded +if [ ! -d "$MONOREPO_ROOT/pkgs/core/dist" ]; then + echo "โŒ Error: core package build failed - dist directory not found" + exit 1 +fi + +if [ ! -d "$MONOREPO_ROOT/pkgs/dsl/dist" ]; then + echo "โŒ Error: dsl package build failed - dist directory not found" + exit 1 +fi + +# Copy core package +echo "๐Ÿ“‹ Copying @pgflow/core..." +mkdir -p "$VENDOR_DIR/@pgflow/core" +cp -r "$MONOREPO_ROOT/pkgs/core/dist/"* "$VENDOR_DIR/@pgflow/core/" +cp "$MONOREPO_ROOT/pkgs/core/package.json" "$VENDOR_DIR/@pgflow/core/" + +# Copy dsl package +echo "๐Ÿ“‹ Copying @pgflow/dsl..." +mkdir -p "$VENDOR_DIR/@pgflow/dsl" +cp -r "$MONOREPO_ROOT/pkgs/dsl/dist/"* "$VENDOR_DIR/@pgflow/dsl/" +cp "$MONOREPO_ROOT/pkgs/dsl/package.json" "$VENDOR_DIR/@pgflow/dsl/" + +# Copy edge-worker source (not built) - preserving directory structure +echo "๐Ÿ“‹ Copying @pgflow/edge-worker..." +mkdir -p "$VENDOR_DIR/@pgflow/edge-worker" +# Copy the entire src directory to maintain relative imports +cp -r "$MONOREPO_ROOT/pkgs/edge-worker/src" "$VENDOR_DIR/@pgflow/edge-worker/" + +# Simple fix: replace .js with .ts in imports +find "$VENDOR_DIR/@pgflow/edge-worker" -name "*.ts" -type f -exec sed -i 's/\.js"/\.ts"/g' {} + +find "$VENDOR_DIR/@pgflow/edge-worker" -name "*.ts" -type f -exec sed -i "s/\.js'/\.ts'/g" {} + + +# Create a redirect index.ts at the root that points to src/index.ts +cat > "$VENDOR_DIR/@pgflow/edge-worker/index.ts" << 'EOF' +// Re-export from the src directory to maintain compatibility +export * from './src/index.ts'; +EOF + +# Create _internal.ts redirect as well since edge-worker exports this path +cat > "$VENDOR_DIR/@pgflow/edge-worker/_internal.ts" << 'EOF' +// Re-export from the src directory to maintain compatibility +export * from './src/_internal.ts'; +EOF + +# Verify key files exist +if [ ! -f "$VENDOR_DIR/@pgflow/core/index.js" ]; then + echo "โš ๏ธ Warning: @pgflow/core/index.js not found after copy" +fi + +if [ ! -f "$VENDOR_DIR/@pgflow/dsl/index.js" ]; then + echo "โš ๏ธ Warning: @pgflow/dsl/index.js not found after copy" +fi + +if [ ! -f "$VENDOR_DIR/@pgflow/edge-worker/src/index.ts" ]; then + echo "โš ๏ธ Warning: @pgflow/edge-worker/src/index.ts not found after copy" +fi + +# Copy migrations from core package +echo "๐Ÿ“‹ Copying migrations from @pgflow/core..." +MIGRATIONS_DIR="$CLI_DIR/supabase/migrations" +mkdir -p "$MIGRATIONS_DIR" +rm -f "$MIGRATIONS_DIR"/*.sql +cp "$MONOREPO_ROOT/pkgs/core/supabase/migrations"/*.sql "$MIGRATIONS_DIR/" + +echo "โœ… Dependencies synced to $VENDOR_DIR" +echo "โœ… Migrations synced to $MIGRATIONS_DIR" diff --git a/pkgs/cli/scripts/test-compile b/pkgs/cli/scripts/test-compile new file mode 100755 index 000000000..65607f0df --- /dev/null +++ b/pkgs/cli/scripts/test-compile @@ -0,0 +1,109 @@ +#!/usr/bin/env bash +set -e + +echo "๐Ÿงช Testing pgflow compile functionality" + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Store the CLI package directory (where pnpm workspace is) +CLI_DIR=$(pwd) +# Get the workspace root directory (two levels up from CLI_DIR) +WORKSPACE_ROOT=$(cd "${CLI_DIR}/../.." && pwd) + +# Create temp directory for testing +TEST_DIR=$(mktemp -d) +echo -e "${BLUE}๐Ÿ“ Created test directory: ${TEST_DIR}${NC}" + +# Cleanup function +cleanup() { + echo -e "${BLUE}๐Ÿงน Cleaning up...${NC}" + cd / + if [ -d "${TEST_DIR}/supabase" ]; then + pnpm -C "${CLI_DIR}" exec supabase stop --no-backup --workdir "${TEST_DIR}" || true + fi + rm -rf "${TEST_DIR}" +} + +# Register cleanup on exit +trap cleanup EXIT + +# Navigate to test directory +cd "${TEST_DIR}" + +# 1. Initialize Supabase project +echo -e "${BLUE}๐Ÿ—๏ธ Creating Supabase project${NC}" +# Use --workdir to create supabase directory in TEST_DIR instead of current working directory +pnpm -C "$CLI_DIR" exec supabase init --force --yes --with-intellij-settings --with-vscode-settings --workdir "${TEST_DIR}" + +# Verify supabase directory was created in the test directory +if [ ! -d "${TEST_DIR}/supabase" ]; then + echo -e "${YELLOW}โŒ Error: supabase directory not found at ${TEST_DIR}/supabase${NC}" + echo -e "${YELLOW} supabase init may have created it in the wrong location${NC}" + exit 1 +fi +echo -e "${GREEN}โœ“ Supabase directory created at ${TEST_DIR}/supabase${NC}" + +# 2. Run pgflow install to set up ControlPlane files +echo -e "${BLUE}๐Ÿ“ฆ Installing pgflow${NC}" +(cd "${CLI_DIR}" && PATH="${WORKSPACE_ROOT}/node_modules/.bin:$PATH" node dist/index.js install -y --supabase-path "${TEST_DIR}/supabase") + +# 3. Create a test flow in flows.ts +echo -e "${BLUE}โœ๏ธ Writing test flow to flows.ts${NC}" +cat > "${TEST_DIR}/supabase/functions/pgflow/flows.ts" << 'EOF' +import { Flow } from '@pgflow/dsl'; + +// Simple test flow for e2e testing +const TestFlow = new Flow({ slug: 'test_flow' }) + .step({ slug: 'step1' }, () => ({ result: 'done' })); + +export const flows = [TestFlow]; +EOF + +# 4. Start Supabase (this starts edge functions too) +echo -e "${BLUE}๐Ÿš€ Starting Supabase (including edge functions)${NC}" +pnpm -C "${CLI_DIR}" exec supabase start --workdir "${TEST_DIR}" + +# 5. Run pgflow compile with the new HTTP-based command +echo -e "${BLUE}โš™๏ธ Compiling flow via ControlPlane${NC}" +# Run with PATH including node_modules/.bin so supabase binary is accessible +(cd "${CLI_DIR}" && PATH="${WORKSPACE_ROOT}/node_modules/.bin:$PATH" node dist/index.js compile test_flow --supabase-path "${TEST_DIR}/supabase") + +# 6. Verify migration was created +echo -e "${BLUE}โœ… Verifying migration file${NC}" +MIGRATION_COUNT=$(find "${TEST_DIR}/supabase/migrations" -name "*test_flow.sql" 2>/dev/null | wc -l) + +if [ "$MIGRATION_COUNT" -eq 0 ]; then + echo -e "${YELLOW}โŒ Error: No migration file found${NC}" + exit 1 +fi + +if [ "$MIGRATION_COUNT" -gt 1 ]; then + echo -e "${YELLOW}โŒ Error: Multiple migration files found${NC}" + exit 1 +fi + +MIGRATION_FILE=$(find "${TEST_DIR}/supabase/migrations" -name "*test_flow.sql" | head -1) +echo -e "${GREEN}โœ“ Found migration: $(basename ${MIGRATION_FILE})${NC}" + +# 7. Verify migration contains expected SQL +if ! grep -q "pgflow.create_flow('test_flow'" "${MIGRATION_FILE}"; then + echo -e "${YELLOW}โŒ Error: Migration doesn't contain create_flow statement${NC}" + exit 1 +fi + +if ! grep -q "pgflow.add_step('test_flow', 'step1'" "${MIGRATION_FILE}"; then + echo -e "${YELLOW}โŒ Error: Migration doesn't contain add_step statement${NC}" + exit 1 +fi + +echo -e "${GREEN}โœ“ Migration content is correct${NC}" + +# 8. Stop Supabase +echo -e "${BLUE}๐Ÿ›‘ Stopping Supabase${NC}" +pnpm -C "${CLI_DIR}" exec supabase stop --no-backup --workdir "${TEST_DIR}" + +echo -e "${GREEN}โœจ Compile test complete${NC}" diff --git a/pkgs/cli/src/commands/compile/index.ts b/pkgs/cli/src/commands/compile/index.ts index bb4acef76..92e7209c3 100644 --- a/pkgs/cli/src/commands/compile/index.ts +++ b/pkgs/cli/src/commands/compile/index.ts @@ -1,87 +1,176 @@ import { type Command } from 'commander'; import chalk from 'chalk'; -import { intro, log, note, outro } from '@clack/prompts'; +import { intro, log, outro } from '@clack/prompts'; import path from 'path'; import fs from 'fs'; -import { spawn } from 'child_process'; -import { fileURLToPath } from 'url'; - -// Get the directory name in ES modules -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); +import { spawn as defaultSpawn } from 'child_process'; +import type { ChildProcess } from 'child_process'; /** - * Formats a command and its arguments for display with syntax highlighting - * Each argument is displayed on a separate line for better readability + * Get Supabase API URL and anon key from `supabase status` command */ -function formatCommand(command: string, args: string[]): string { - const cmd = chalk.cyan(command); - const formattedArgs = args.map((arg) => { - // Highlight config and file paths differently - if (arg.startsWith('--config=')) { - const [flag, value] = arg.split('='); - return ` ${chalk.yellow(flag)}=${chalk.green(value)}`; - } else if (arg.startsWith('--')) { - return ` ${chalk.yellow(arg)}`; - } else if (arg.endsWith('.ts') || arg.endsWith('.json')) { - return ` ${chalk.green(arg)}`; - } - return ` ${chalk.white(arg)}`; - }); +export async function getSupabaseConfig( + supabasePath: string, + spawn: ( + command: string, + args: string[], + options?: { cwd?: string } + ) => ChildProcess = defaultSpawn +): Promise<{ apiUrl: string; anonKey: string }> { + return new Promise((resolve, reject) => { + const supabase = spawn('supabase', ['status', '--output=json'], { + cwd: supabasePath, + }); + + let stdout = ''; + let stderr = ''; - return `$ ${cmd}\n${formattedArgs.join('\n')}`; + supabase.stdout?.on('data', (data) => { + stdout += data.toString(); + }); + + supabase.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + supabase.on('close', (code) => { + if (code !== 0) { + reject( + new Error( + `Failed to get Supabase status (exit code ${code})${ + stderr ? `\n${stderr}` : '' + }` + ) + ); + return; + } + + try { + // Parse JSON output + const status = JSON.parse(stdout); + + if (!status.API_URL) { + reject( + new Error( + 'Could not find API_URL in supabase status.\n' + + 'Make sure Supabase is running: supabase start' + ) + ); + return; + } + + if (!status.ANON_KEY) { + reject( + new Error( + 'Could not find ANON_KEY in supabase status.\n' + + 'Make sure Supabase is running: supabase start' + ) + ); + return; + } + + resolve({ apiUrl: status.API_URL, anonKey: status.ANON_KEY }); + } catch (parseError) { + reject( + new Error( + `Failed to parse supabase status JSON: ${parseError}\n` + + 'Output: ' + + stdout + ) + ); + } + }); + + supabase.on('error', (err) => { + reject( + new Error( + `Failed to run supabase status: ${err.message}.\n` + + 'Make sure Supabase CLI is installed.' + ) + ); + }); + }); } /** - * Creates a task log entry with a command and its output + * Fetch flow SQL from ControlPlane HTTP endpoint */ -function createTaskLog( - command: string, - args: string[], - output: string -): string { - return [ - chalk.bold('Command:'), - formatCommand(command, args), - '', - chalk.bold('Output:'), - output.trim() ? output.trim() : '(no output)', - ].join('\n'); +export async function fetchFlowSQL( + flowSlug: string, + baseUrl: string, + anonKey: string +): Promise<{ flowSlug: string; sql: string[] }> { + const url = `${baseUrl}/functions/v1/pgflow/flows/${flowSlug}`; + + try { + const response = await fetch(url, { + headers: { + Authorization: `Bearer ${anonKey}`, + 'Content-Type': 'application/json', + }, + }); + + if (response.status === 404) { + const errorData = await response.json(); + throw new Error( + `Flow '${flowSlug}' not found.\n\n` + + `${errorData.message || 'Did you add it to flows.ts?'}\n\n` + + `Fix:\n` + + `1. Add your flow to supabase/functions/pgflow/flows.ts\n` + + `2. Restart edge functions: supabase functions serve` + ); + } + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`HTTP ${response.status}: ${errorText}`); + } + + return await response.json(); + } catch (error) { + if (error instanceof Error) { + // Check for connection refused errors + if ( + error.message.includes('ECONNREFUSED') || + error.message.includes('fetch failed') + ) { + throw new Error( + 'Could not connect to ControlPlane.\n\n' + + 'Fix options:\n' + + '1. Start Supabase: supabase start\n' + + '2. Start edge functions: supabase functions serve\n\n' + + 'Or use previous version: npx pgflow@0.8.0 compile path/to/flow.ts' + ); + } + + throw error; + } + + throw new Error(`Unknown error: ${String(error)}`); + } } export default (program: Command) => { program .command('compile') - .description('Compiles a TypeScript-defined flow into SQL migration') - .argument('', 'Path to the flow TypeScript file') + .description('Compiles a flow into SQL migration via ControlPlane HTTP') + .argument('', 'Flow slug to compile (e.g., my_flow)') .option( '--deno-json ', - 'Path to deno.json configuration file' + '[DEPRECATED] No longer used. Will be removed in v1.0' ) .option('--supabase-path ', 'Path to the Supabase folder') - .action(async (flowPath, options) => { + .action(async (flowSlug, options) => { intro('pgflow - Compile Flow to SQL'); try { - // Resolve paths - const resolvedFlowPath = path.resolve(process.cwd(), flowPath); - - // Only resolve denoJsonPath if it's provided - let resolvedDenoJsonPath: string | undefined; + // Show deprecation warning for --deno-json if (options.denoJson) { - resolvedDenoJsonPath = path.resolve(process.cwd(), options.denoJson); - - // Validate deno.json path if provided - if (!fs.existsSync(resolvedDenoJsonPath)) { - log.error(`deno.json file not found: ${resolvedDenoJsonPath}`); - process.exit(1); - } - } - - // Validate flow path - if (!fs.existsSync(resolvedFlowPath)) { - log.error(`Flow file not found: ${resolvedFlowPath}`); - process.exit(1); + log.warn( + 'The --deno-json flag is deprecated and no longer used.\n' + + 'Flow compilation now happens via HTTP, not local Deno.\n' + + 'This flag will be removed in v1.0' + ); } // Validate Supabase path @@ -102,12 +191,6 @@ export default (program: Command) => { process.exit(1); } - // Find the internal_compile.js script - const internalCompileScript = path.resolve( - __dirname, - '../../deno/internal_compile.js' - ); - // Create migrations directory if it doesn't exist const migrationsDir = path.resolve(supabasePath, 'migrations'); if (!fs.existsSync(migrationsDir)) { @@ -115,6 +198,23 @@ export default (program: Command) => { log.success(`Created migrations directory: ${migrationsDir}`); } + // Get Supabase configuration + log.info('Getting Supabase configuration...'); + const { apiUrl, anonKey } = await getSupabaseConfig(supabasePath); + log.info(`API URL: ${apiUrl}`); + + // Fetch flow SQL from ControlPlane + log.info(`Compiling flow: ${flowSlug}`); + const result = await fetchFlowSQL(flowSlug, apiUrl, anonKey); + + // Validate result + if (!result.sql || result.sql.length === 0) { + throw new Error('ControlPlane returned empty SQL'); + } + + // Join SQL statements + const compiledSql = result.sql.join('\n') + '\n'; + // Generate timestamp for migration file in format YYYYMMDDHHMMSS using UTC const now = new Date(); const timestamp = [ @@ -126,42 +226,13 @@ export default (program: Command) => { String(now.getUTCSeconds()).padStart(2, '0'), ].join(''); - // Run the compilation - log.info(`Compiling flow: ${path.basename(resolvedFlowPath)}`); - const compiledSql = await runDenoCompilation( - internalCompileScript, - resolvedFlowPath, - resolvedDenoJsonPath - ); - - // Extract flow name from the first line of the SQL output using regex - // Looking for pattern: SELECT pgflow.create_flow('flow_name', ...); - const flowNameMatch = compiledSql.match( - /SELECT\s+pgflow\.create_flow\s*\(\s*'([^']+)'/i - ); - - // Use extracted flow name or fallback to the file basename if extraction fails - let flowName; - if (flowNameMatch && flowNameMatch[1]) { - flowName = flowNameMatch[1]; - log.info(`Extracted flow name: ${flowName}`); - } else { - // Fallback to file basename if regex doesn't match - flowName = path.basename( - resolvedFlowPath, - path.extname(resolvedFlowPath) - ); - log.warn( - `Could not extract flow name from SQL, using file basename: ${flowName}` - ); - } - - // Create migration filename in the format: _create__flow.sql - const migrationFileName = `${timestamp}_create_${flowName}_flow.sql`; + // Create migration filename in the format: _create_.sql + const migrationFileName = `${timestamp}_create_${flowSlug}.sql`; const migrationFilePath = path.join(migrationsDir, migrationFileName); // Write the SQL to a migration file fs.writeFileSync(migrationFilePath, compiledSql); + // Show the migration file path relative to the current directory const relativeFilePath = path.relative( process.cwd(), @@ -177,7 +248,7 @@ export default (program: Command) => { `- Run ${chalk.cyan('supabase migration up')} to apply the migration`, '', chalk.bold('Continue the setup:'), - chalk.blue.underline('https://pgflow.dev/getting-started/run-flow/') + chalk.blue.underline('https://pgflow.dev/getting-started/run-flow/'), ].join('\n') ); } catch (error) { @@ -192,7 +263,9 @@ export default (program: Command) => { chalk.bold('Compilation failed!'), '', chalk.bold('For troubleshooting help:'), - chalk.blue.underline('https://pgflow.dev/getting-started/compile-to-sql/') + chalk.blue.underline( + 'https://pgflow.dev/getting-started/compile-to-sql/' + ), ].join('\n') ); @@ -200,79 +273,3 @@ export default (program: Command) => { } }); }; - -/** - * Runs the Deno compilation script and returns the compiled SQL - */ -async function runDenoCompilation( - scriptPath: string, - flowPath: string, - denoJsonPath?: string -): Promise { - return new Promise((resolve, reject) => { - // Validate input paths - if (!scriptPath) { - return reject(new Error('Internal script path is required')); - } - - if (!flowPath) { - return reject(new Error('Flow path is required')); - } - - // Build the command arguments array - const args = ['run', '--allow-read', '--allow-net', '--allow-env']; - - // Only add the config argument if denoJsonPath is provided and valid - if (denoJsonPath && typeof denoJsonPath === 'string') { - args.push(`--config=${denoJsonPath}`); - } - - // Add the script path and flow path - args.push(scriptPath, flowPath); - - // Log the command for debugging with colored output - log.info('Running Deno compiler'); - - const deno = spawn('deno', args); - - let stdout = ''; - let stderr = ''; - - deno.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - deno.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - deno.on('close', (code) => { - // Always display the task log with command and output - note(createTaskLog('deno', args, stdout)); - - if (code === 0) { - if (stdout.trim().length === 0) { - reject(new Error('Compilation produced no output')); - } else { - resolve(stdout); - } - } else { - reject( - new Error( - `Deno process exited with code ${code}${ - stderr ? `\n${stderr}` : '' - }` - ) - ); - } - }); - - deno.on('error', (err) => { - reject( - new Error( - `Failed to start Deno process: ${err.message}. Make sure Deno is installed.` - ) - ); - }); - }); -} diff --git a/pkgs/cli/src/commands/install/create-edge-function.ts b/pkgs/cli/src/commands/install/create-edge-function.ts new file mode 100644 index 000000000..655b67f34 --- /dev/null +++ b/pkgs/cli/src/commands/install/create-edge-function.ts @@ -0,0 +1,132 @@ +import fs from 'fs'; +import path from 'path'; +import { log, confirm, note } from '@clack/prompts'; +import chalk from 'chalk'; + +const INDEX_TS_TEMPLATE = `import { ControlPlane } from '@pgflow/edge-worker'; +import { flows } from './flows.ts'; + +ControlPlane.serve(flows); +`; + +const FLOWS_TS_TEMPLATE = `// Import your flows here +// import { MyFlow } from '../_flows/my_flow.ts'; + +// Export flows array for ControlPlane +export const flows = [ + // MyFlow, +]; +`; + +const DENO_JSON_TEMPLATE = `{ + "imports": { + "@pgflow/core": "jsr:@pgflow/core@latest", + "@pgflow/core/": "jsr:@pgflow/core/", + "@pgflow/dsl": "jsr:@pgflow/dsl@latest", + "@pgflow/dsl/": "jsr:@pgflow/dsl/", + "@pgflow/dsl/supabase": "jsr:@pgflow/dsl/supabase", + "@pgflow/edge-worker": "jsr:@pgflow/edge-worker@latest", + "@pgflow/edge-worker/": "jsr:@pgflow/edge-worker/", + "@pgflow/edge-worker/_internal": "jsr:@pgflow/edge-worker/_internal" + } +} +`; + +export async function createEdgeFunction({ + supabasePath, + autoConfirm = false, +}: { + supabasePath: string; + autoConfirm?: boolean; +}): Promise { + const functionsDir = path.join(supabasePath, 'functions'); + const pgflowFunctionDir = path.join(functionsDir, 'pgflow'); + + const indexPath = path.join(pgflowFunctionDir, 'index.ts'); + const flowsPath = path.join(pgflowFunctionDir, 'flows.ts'); + const denoJsonPath = path.join(pgflowFunctionDir, 'deno.json'); + + // Check what needs to be created + const filesToCreate: Array<{ path: string; name: string }> = []; + + if (!fs.existsSync(indexPath)) { + filesToCreate.push({ path: indexPath, name: 'index.ts' }); + } + + if (!fs.existsSync(flowsPath)) { + filesToCreate.push({ path: flowsPath, name: 'flows.ts' }); + } + + if (!fs.existsSync(denoJsonPath)) { + filesToCreate.push({ path: denoJsonPath, name: 'deno.json' }); + } + + // If all files exist, return success + if (filesToCreate.length === 0) { + log.success('ControlPlane edge function files are already in place'); + + const detailedMsg = [ + 'Existing files:', + ` ${chalk.dim('โ€ข')} ${chalk.bold('supabase/functions/pgflow/index.ts')}`, + ` ${chalk.dim('โ€ข')} ${chalk.bold('supabase/functions/pgflow/flows.ts')}`, + ` ${chalk.dim('โ€ข')} ${chalk.bold('supabase/functions/pgflow/deno.json')}`, + ].join('\n'); + + note(detailedMsg, 'ControlPlane Edge Function'); + + return false; + } + + // Show what will be created + log.info(`Found ${filesToCreate.length} file${filesToCreate.length !== 1 ? 's' : ''} to create`); + + const summaryParts = [`${chalk.green('Files to create:')}\n${filesToCreate + .map((file) => `${chalk.green('+')} ${file.name}`) + .join('\n')}`]; + + note(summaryParts.join('\n'), 'ControlPlane Edge Function'); + + // Get confirmation + let shouldContinue = autoConfirm; + + if (!autoConfirm) { + const confirmResult = await confirm({ + message: `Create ${filesToCreate.length} file${filesToCreate.length !== 1 ? 's' : ''}?`, + }); + + shouldContinue = confirmResult === true; + } + + if (!shouldContinue) { + log.warn('Edge function setup skipped'); + return false; + } + + // Create the directory if it doesn't exist + if (!fs.existsSync(pgflowFunctionDir)) { + fs.mkdirSync(pgflowFunctionDir, { recursive: true }); + } + + // Create files + if (filesToCreate.some((f) => f.path === indexPath)) { + fs.writeFileSync(indexPath, INDEX_TS_TEMPLATE); + } + + if (filesToCreate.some((f) => f.path === flowsPath)) { + fs.writeFileSync(flowsPath, FLOWS_TS_TEMPLATE); + } + + if (filesToCreate.some((f) => f.path === denoJsonPath)) { + fs.writeFileSync(denoJsonPath, DENO_JSON_TEMPLATE); + } + + // Show success message + const detailedSuccessMsg = [ + `Created ${filesToCreate.length} file${filesToCreate.length !== 1 ? 's' : ''}:`, + ...filesToCreate.map((file) => ` ${chalk.bold(file.name)}`), + ].join('\n'); + + log.success(detailedSuccessMsg); + + return true; +} diff --git a/pkgs/cli/src/commands/install/index.ts b/pkgs/cli/src/commands/install/index.ts index cdf874c94..c7b732e77 100644 --- a/pkgs/cli/src/commands/install/index.ts +++ b/pkgs/cli/src/commands/install/index.ts @@ -4,6 +4,7 @@ import chalk from 'chalk'; import { copyMigrations } from './copy-migrations.js'; import { updateConfigToml } from './update-config-toml.js'; import { updateEnvFile } from './update-env-file.js'; +import { createEdgeFunction } from './create-edge-function.js'; import { supabasePathPrompt } from './supabase-path-prompt.js'; export default (program: Command) => { @@ -42,7 +43,17 @@ export default (program: Command) => { }); }, - // Step 4: Update environment variables + // Step 4: Create ControlPlane edge function + edgeFunction: async ({ results: { supabasePath } }) => { + if (!supabasePath) return false; + + return await createEdgeFunction({ + supabasePath, + autoConfirm: options.yes, + }); + }, + + // Step 5: Update environment variables envFile: async ({ results: { supabasePath } }) => { if (!supabasePath) return false; @@ -65,6 +76,7 @@ export default (program: Command) => { const supabasePath = results.supabasePath; const configUpdate = results.configUpdate; const migrations = results.migrations; + const edgeFunction = results.edgeFunction; const envFile = results.envFile; // Exit if supabasePath is null (validation failed or user cancelled) @@ -77,7 +89,7 @@ export default (program: Command) => { const outroMessages = []; // Always start with a bolded acknowledgement - if (migrations || configUpdate || envFile) { + if (migrations || configUpdate || edgeFunction || envFile) { outroMessages.push(chalk.bold('pgflow setup completed successfully!')); } else { outroMessages.push( diff --git a/pkgs/cli/src/deno/internal_compile.js b/pkgs/cli/src/deno/internal_compile.js deleted file mode 100644 index 20170dcea..000000000 --- a/pkgs/cli/src/deno/internal_compile.js +++ /dev/null @@ -1,55 +0,0 @@ -/** - * internal_compile.js - * - * This script is executed by Deno to compile a Flow into SQL statements. - * It takes a path to a flow file as an argument, imports the default export, - * and passes it to compileFlow() from the DSL package. - */ - -// Import the compileFlow function directly from @pgflow/dsl -// The import map in deno.json will resolve this import -import { compileFlow } from '@pgflow/dsl'; - -// Get the flow file path from command line arguments -const flowFilePath = Deno.args[0]; - -if (!flowFilePath) { - console.error('Error: No flow file path provided'); - Deno.exit(1); -} - -try { - // Dynamically import the flow file - const flowModule = await import(`file://${flowFilePath}`); - - // Check if there's a default export - if (!flowModule.default) { - console.error(`Error: No default export found in ${flowFilePath}`); - Deno.exit(1); - } - - // Get the flow instance - const flow = flowModule.default; - - // Compile the flow to SQL - const sqlStatements = compileFlow(flow); - - // Output the SQL statements to stdout - console.log(sqlStatements.join('\n')); -} catch (error) { - console.error(`Error compiling flow: ${error.message}`); - - // If the error is related to importing compileFlow, provide more detailed error - if (error.message.includes('@pgflow/dsl')) { - console.error( - 'Failed to import compileFlow from @pgflow/dsl. This might be due to:' - ); - console.error( - '1. The function not being exported correctly from the package' - ); - console.error('2. A version mismatch between the CLI and DSL packages'); - console.error('3. Issues with the Deno import map configuration'); - } - - Deno.exit(1); -} diff --git a/pkgs/cli/supabase/.gitignore b/pkgs/cli/supabase/.gitignore new file mode 100644 index 000000000..ad9264f0b --- /dev/null +++ b/pkgs/cli/supabase/.gitignore @@ -0,0 +1,8 @@ +# Supabase +.branches +.temp + +# dotenvx +.env.keys +.env.local +.env.*.local diff --git a/pkgs/cli/supabase/config.toml b/pkgs/cli/supabase/config.toml new file mode 100644 index 000000000..9c8ffe1a7 --- /dev/null +++ b/pkgs/cli/supabase/config.toml @@ -0,0 +1,357 @@ +# For detailed configuration reference documentation, visit: +# https://supabase.com/docs/guides/local-development/cli/config +# A string used to distinguish different Supabase projects on the same host. Defaults to the +# working directory name when running `supabase init`. +project_id = "cli" + +[api] +enabled = true +# Port to use for the API URL. +port = 54321 +# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API +# endpoints. `public` and `graphql_public` schemas are included by default. +schemas = ["public", "graphql_public"] +# Extra schemas to add to the search_path of every request. +extra_search_path = ["public", "extensions"] +# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size +# for accidental or malicious requests. +max_rows = 1000 + +[api.tls] +# Enable HTTPS endpoints locally using a self-signed certificate. +enabled = false +# Paths to self-signed certificate pair. +# cert_path = "../certs/my-cert.pem" +# key_path = "../certs/my-key.pem" + +[db] +# Port to use for the local database URL. +port = 54322 +# Port used by db diff command to initialize the shadow database. +shadow_port = 54320 +# The database major version to use. This has to be the same as your remote database's. Run `SHOW +# server_version;` on the remote database to check. +major_version = 17 + +[db.pooler] +enabled = true +# Port to use for the local connection pooler. +port = 54329 +# Specifies when a server connection can be reused by other clients. +# Configure one of the supported pooler modes: `transaction`, `session`. +pool_mode = "transaction" +# How many server connections to allow per user/database pair. +default_pool_size = 20 +# Maximum number of client connections allowed. +max_client_conn = 100 + +# [db.vault] +# secret_key = "env(SECRET_VALUE)" + +[db.migrations] +# If disabled, migrations will be skipped during a db push or reset. +enabled = true +# Specifies an ordered list of schema files that describe your database. +# Supports glob patterns relative to supabase directory: "./schemas/*.sql" +schema_paths = [] + +[db.seed] +# If enabled, seeds the database after migrations during a db reset. +enabled = true +# Specifies an ordered list of seed files to load during db reset. +# Supports glob patterns relative to supabase directory: "./seeds/*.sql" +sql_paths = ["./seed.sql"] + +[db.network_restrictions] +# Enable management of network restrictions. +enabled = false +# List of IPv4 CIDR blocks allowed to connect to the database. +# Defaults to allow all IPv4 connections. Set empty array to block all IPs. +allowed_cidrs = ["0.0.0.0/0"] +# List of IPv6 CIDR blocks allowed to connect to the database. +# Defaults to allow all IPv6 connections. Set empty array to block all IPs. +allowed_cidrs_v6 = ["::/0"] + +[realtime] +enabled = true +# Bind realtime via either IPv4 or IPv6. (default: IPv4) +# ip_version = "IPv6" +# The maximum length in bytes of HTTP request headers. (default: 4096) +# max_header_length = 4096 + +[studio] +enabled = true +# Port to use for Supabase Studio. +port = 54323 +# External URL of the API server that frontend connects to. +api_url = "http://127.0.0.1" +# OpenAI API Key to use for Supabase AI in the Supabase Studio. +openai_api_key = "env(OPENAI_API_KEY)" + +# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they +# are monitored, and you can view the emails that would have been sent from the web interface. +[inbucket] +enabled = true +# Port to use for the email testing server web interface. +port = 54324 +# Uncomment to expose additional ports for testing user applications that send emails. +# smtp_port = 54325 +# pop3_port = 54326 +# admin_email = "admin@email.com" +# sender_name = "Admin" + +[storage] +enabled = true +# The maximum file size allowed (e.g. "5MB", "500KB"). +file_size_limit = "50MiB" + +# Image transformation API is available to Supabase Pro plan. +# [storage.image_transformation] +# enabled = true + +# Uncomment to configure local storage buckets +# [storage.buckets.images] +# public = false +# file_size_limit = "50MiB" +# allowed_mime_types = ["image/png", "image/jpeg"] +# objects_path = "./images" + +[auth] +enabled = true +# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used +# in emails. +site_url = "http://127.0.0.1:3000" +# A list of *exact* URLs that auth providers are permitted to redirect to post authentication. +additional_redirect_urls = ["https://127.0.0.1:3000"] +# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week). +jwt_expiry = 3600 +# JWT issuer URL. If not set, defaults to the local API URL (http://127.0.0.1:/auth/v1). +# jwt_issuer = "" +# Path to JWT signing key. DO NOT commit your signing keys file to git. +# signing_keys_path = "./signing_keys.json" +# If disabled, the refresh token will never expire. +enable_refresh_token_rotation = true +# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds. +# Requires enable_refresh_token_rotation = true. +refresh_token_reuse_interval = 10 +# Allow/disallow new user signups to your project. +enable_signup = true +# Allow/disallow anonymous sign-ins to your project. +enable_anonymous_sign_ins = false +# Allow/disallow testing manual linking of accounts +enable_manual_linking = false +# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more. +minimum_password_length = 6 +# Passwords that do not meet the following requirements will be rejected as weak. Supported values +# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols` +password_requirements = "" + +[auth.rate_limit] +# Number of emails that can be sent per hour. Requires auth.email.smtp to be enabled. +email_sent = 2 +# Number of SMS messages that can be sent per hour. Requires auth.sms to be enabled. +sms_sent = 30 +# Number of anonymous sign-ins that can be made per hour per IP address. Requires enable_anonymous_sign_ins = true. +anonymous_users = 30 +# Number of sessions that can be refreshed in a 5 minute interval per IP address. +token_refresh = 150 +# Number of sign up and sign-in requests that can be made in a 5 minute interval per IP address (excludes anonymous users). +sign_in_sign_ups = 30 +# Number of OTP / Magic link verifications that can be made in a 5 minute interval per IP address. +token_verifications = 30 +# Number of Web3 logins that can be made in a 5 minute interval per IP address. +web3 = 30 + +# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`. +# [auth.captcha] +# enabled = true +# provider = "hcaptcha" +# secret = "" + +[auth.email] +# Allow/disallow new user signups via email to your project. +enable_signup = true +# If enabled, a user will be required to confirm any email change on both the old, and new email +# addresses. If disabled, only the new email is required to confirm. +double_confirm_changes = true +# If enabled, users need to confirm their email address before signing in. +enable_confirmations = false +# If enabled, users will need to reauthenticate or have logged in recently to change their password. +secure_password_change = false +# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email. +max_frequency = "1s" +# Number of characters used in the email OTP. +otp_length = 6 +# Number of seconds before the email OTP expires (defaults to 1 hour). +otp_expiry = 3600 + +# Use a production-ready SMTP server +# [auth.email.smtp] +# enabled = true +# host = "smtp.sendgrid.net" +# port = 587 +# user = "apikey" +# pass = "env(SENDGRID_API_KEY)" +# admin_email = "admin@email.com" +# sender_name = "Admin" + +# Uncomment to customize email template +# [auth.email.template.invite] +# subject = "You have been invited" +# content_path = "./supabase/templates/invite.html" + +# Uncomment to customize notification email template +# [auth.email.notification.password_changed] +# enabled = true +# subject = "Your password has been changed" +# content_path = "./templates/password_changed_notification.html" + +[auth.sms] +# Allow/disallow new user signups via SMS to your project. +enable_signup = false +# If enabled, users need to confirm their phone number before signing in. +enable_confirmations = false +# Template for sending OTP to users +template = "Your code is {{ .Code }}" +# Controls the minimum amount of time that must pass before sending another sms otp. +max_frequency = "5s" + +# Use pre-defined map of phone number to OTP for testing. +# [auth.sms.test_otp] +# 4152127777 = "123456" + +# Configure logged in session timeouts. +# [auth.sessions] +# Force log out after the specified duration. +# timebox = "24h" +# Force log out if the user has been inactive longer than the specified duration. +# inactivity_timeout = "8h" + +# This hook runs before a new user is created and allows developers to reject the request based on the incoming user object. +# [auth.hook.before_user_created] +# enabled = true +# uri = "pg-functions://postgres/auth/before-user-created-hook" + +# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used. +# [auth.hook.custom_access_token] +# enabled = true +# uri = "pg-functions:////" + +# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`. +[auth.sms.twilio] +enabled = false +account_sid = "" +message_service_sid = "" +# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead: +auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)" + +# Multi-factor-authentication is available to Supabase Pro plan. +[auth.mfa] +# Control how many MFA factors can be enrolled at once per user. +max_enrolled_factors = 10 + +# Control MFA via App Authenticator (TOTP) +[auth.mfa.totp] +enroll_enabled = false +verify_enabled = false + +# Configure MFA via Phone Messaging +[auth.mfa.phone] +enroll_enabled = false +verify_enabled = false +otp_length = 6 +template = "Your code is {{ .Code }}" +max_frequency = "5s" + +# Configure MFA via WebAuthn +# [auth.mfa.web_authn] +# enroll_enabled = true +# verify_enabled = true + +# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`, +# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`, +# `twitter`, `slack`, `spotify`, `workos`, `zoom`. +[auth.external.apple] +enabled = false +client_id = "" +# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead: +secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)" +# Overrides the default auth redirectUrl. +redirect_uri = "" +# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure, +# or any other third-party OIDC providers. +url = "" +# If enabled, the nonce check will be skipped. Required for local sign in with Google auth. +skip_nonce_check = false +# If enabled, it will allow the user to successfully authenticate when the provider does not return an email address. +email_optional = false + +# Allow Solana wallet holders to sign in to your project via the Sign in with Solana (SIWS, EIP-4361) standard. +# You can configure "web3" rate limit in the [auth.rate_limit] section and set up [auth.captcha] if self-hosting. +[auth.web3.solana] +enabled = false + +# Use Firebase Auth as a third-party provider alongside Supabase Auth. +[auth.third_party.firebase] +enabled = false +# project_id = "my-firebase-project" + +# Use Auth0 as a third-party provider alongside Supabase Auth. +[auth.third_party.auth0] +enabled = false +# tenant = "my-auth0-tenant" +# tenant_region = "us" + +# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth. +[auth.third_party.aws_cognito] +enabled = false +# user_pool_id = "my-user-pool-id" +# user_pool_region = "us-east-1" + +# Use Clerk as a third-party provider alongside Supabase Auth. +[auth.third_party.clerk] +enabled = false +# Obtain from https://clerk.com/setup/supabase +# domain = "example.clerk.accounts.dev" + +# OAuth server configuration +[auth.oauth_server] +# Enable OAuth server functionality +enabled = false +# Path for OAuth consent flow UI +authorization_url_path = "/oauth/consent" +# Allow dynamic client registration +allow_dynamic_registration = false + +[edge_runtime] +enabled = true +# Supported request policies: `oneshot`, `per_worker`. +# `per_worker` (default) โ€” enables hot reload during local development. +# `oneshot` โ€” fallback mode if hot reload causes issues (e.g. in large repos or with symlinks). +policy = "per_worker" +# Port to attach the Chrome inspector for debugging edge functions. +inspector_port = 8083 +# The Deno major version to use. +deno_version = 2 + +# [edge_runtime.secrets] +# secret_key = "env(SECRET_VALUE)" + +[analytics] +enabled = true +port = 54327 +# Configure one of the supported backends: `postgres`, `bigquery`. +backend = "postgres" + +# Experimental features may be deprecated any time +[experimental] +# Configures Postgres storage engine to use OrioleDB (S3) +orioledb_version = "" +# Configures S3 bucket URL, eg. .s3-.amazonaws.com +s3_host = "env(S3_HOST)" +# Configures S3 bucket region, eg. us-east-1 +s3_region = "env(S3_REGION)" +# Configures AWS_ACCESS_KEY_ID for S3 bucket +s3_access_key = "env(S3_ACCESS_KEY)" +# Configures AWS_SECRET_ACCESS_KEY for S3 bucket +s3_secret_key = "env(S3_SECRET_KEY)" diff --git a/pkgs/cli/supabase/config.toml.backup b/pkgs/cli/supabase/config.toml.backup new file mode 100644 index 000000000..f6332412e --- /dev/null +++ b/pkgs/cli/supabase/config.toml.backup @@ -0,0 +1,357 @@ +# For detailed configuration reference documentation, visit: +# https://supabase.com/docs/guides/local-development/cli/config +# A string used to distinguish different Supabase projects on the same host. Defaults to the +# working directory name when running `supabase init`. +project_id = "cli" + +[api] +enabled = true +# Port to use for the API URL. +port = 54321 +# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API +# endpoints. `public` and `graphql_public` schemas are included by default. +schemas = ["public", "graphql_public"] +# Extra schemas to add to the search_path of every request. +extra_search_path = ["public", "extensions"] +# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size +# for accidental or malicious requests. +max_rows = 1000 + +[api.tls] +# Enable HTTPS endpoints locally using a self-signed certificate. +enabled = false +# Paths to self-signed certificate pair. +# cert_path = "../certs/my-cert.pem" +# key_path = "../certs/my-key.pem" + +[db] +# Port to use for the local database URL. +port = 54322 +# Port used by db diff command to initialize the shadow database. +shadow_port = 54320 +# The database major version to use. This has to be the same as your remote database's. Run `SHOW +# server_version;` on the remote database to check. +major_version = 17 + +[db.pooler] +enabled = false +# Port to use for the local connection pooler. +port = 54329 +# Specifies when a server connection can be reused by other clients. +# Configure one of the supported pooler modes: `transaction`, `session`. +pool_mode = "transaction" +# How many server connections to allow per user/database pair. +default_pool_size = 20 +# Maximum number of client connections allowed. +max_client_conn = 100 + +# [db.vault] +# secret_key = "env(SECRET_VALUE)" + +[db.migrations] +# If disabled, migrations will be skipped during a db push or reset. +enabled = true +# Specifies an ordered list of schema files that describe your database. +# Supports glob patterns relative to supabase directory: "./schemas/*.sql" +schema_paths = [] + +[db.seed] +# If enabled, seeds the database after migrations during a db reset. +enabled = true +# Specifies an ordered list of seed files to load during db reset. +# Supports glob patterns relative to supabase directory: "./seeds/*.sql" +sql_paths = ["./seed.sql"] + +[db.network_restrictions] +# Enable management of network restrictions. +enabled = false +# List of IPv4 CIDR blocks allowed to connect to the database. +# Defaults to allow all IPv4 connections. Set empty array to block all IPs. +allowed_cidrs = ["0.0.0.0/0"] +# List of IPv6 CIDR blocks allowed to connect to the database. +# Defaults to allow all IPv6 connections. Set empty array to block all IPs. +allowed_cidrs_v6 = ["::/0"] + +[realtime] +enabled = true +# Bind realtime via either IPv4 or IPv6. (default: IPv4) +# ip_version = "IPv6" +# The maximum length in bytes of HTTP request headers. (default: 4096) +# max_header_length = 4096 + +[studio] +enabled = true +# Port to use for Supabase Studio. +port = 54323 +# External URL of the API server that frontend connects to. +api_url = "http://127.0.0.1" +# OpenAI API Key to use for Supabase AI in the Supabase Studio. +openai_api_key = "env(OPENAI_API_KEY)" + +# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they +# are monitored, and you can view the emails that would have been sent from the web interface. +[inbucket] +enabled = true +# Port to use for the email testing server web interface. +port = 54324 +# Uncomment to expose additional ports for testing user applications that send emails. +# smtp_port = 54325 +# pop3_port = 54326 +# admin_email = "admin@email.com" +# sender_name = "Admin" + +[storage] +enabled = true +# The maximum file size allowed (e.g. "5MB", "500KB"). +file_size_limit = "50MiB" + +# Image transformation API is available to Supabase Pro plan. +# [storage.image_transformation] +# enabled = true + +# Uncomment to configure local storage buckets +# [storage.buckets.images] +# public = false +# file_size_limit = "50MiB" +# allowed_mime_types = ["image/png", "image/jpeg"] +# objects_path = "./images" + +[auth] +enabled = true +# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used +# in emails. +site_url = "http://127.0.0.1:3000" +# A list of *exact* URLs that auth providers are permitted to redirect to post authentication. +additional_redirect_urls = ["https://127.0.0.1:3000"] +# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week). +jwt_expiry = 3600 +# JWT issuer URL. If not set, defaults to the local API URL (http://127.0.0.1:/auth/v1). +# jwt_issuer = "" +# Path to JWT signing key. DO NOT commit your signing keys file to git. +# signing_keys_path = "./signing_keys.json" +# If disabled, the refresh token will never expire. +enable_refresh_token_rotation = true +# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds. +# Requires enable_refresh_token_rotation = true. +refresh_token_reuse_interval = 10 +# Allow/disallow new user signups to your project. +enable_signup = true +# Allow/disallow anonymous sign-ins to your project. +enable_anonymous_sign_ins = false +# Allow/disallow testing manual linking of accounts +enable_manual_linking = false +# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more. +minimum_password_length = 6 +# Passwords that do not meet the following requirements will be rejected as weak. Supported values +# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols` +password_requirements = "" + +[auth.rate_limit] +# Number of emails that can be sent per hour. Requires auth.email.smtp to be enabled. +email_sent = 2 +# Number of SMS messages that can be sent per hour. Requires auth.sms to be enabled. +sms_sent = 30 +# Number of anonymous sign-ins that can be made per hour per IP address. Requires enable_anonymous_sign_ins = true. +anonymous_users = 30 +# Number of sessions that can be refreshed in a 5 minute interval per IP address. +token_refresh = 150 +# Number of sign up and sign-in requests that can be made in a 5 minute interval per IP address (excludes anonymous users). +sign_in_sign_ups = 30 +# Number of OTP / Magic link verifications that can be made in a 5 minute interval per IP address. +token_verifications = 30 +# Number of Web3 logins that can be made in a 5 minute interval per IP address. +web3 = 30 + +# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`. +# [auth.captcha] +# enabled = true +# provider = "hcaptcha" +# secret = "" + +[auth.email] +# Allow/disallow new user signups via email to your project. +enable_signup = true +# If enabled, a user will be required to confirm any email change on both the old, and new email +# addresses. If disabled, only the new email is required to confirm. +double_confirm_changes = true +# If enabled, users need to confirm their email address before signing in. +enable_confirmations = false +# If enabled, users will need to reauthenticate or have logged in recently to change their password. +secure_password_change = false +# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email. +max_frequency = "1s" +# Number of characters used in the email OTP. +otp_length = 6 +# Number of seconds before the email OTP expires (defaults to 1 hour). +otp_expiry = 3600 + +# Use a production-ready SMTP server +# [auth.email.smtp] +# enabled = true +# host = "smtp.sendgrid.net" +# port = 587 +# user = "apikey" +# pass = "env(SENDGRID_API_KEY)" +# admin_email = "admin@email.com" +# sender_name = "Admin" + +# Uncomment to customize email template +# [auth.email.template.invite] +# subject = "You have been invited" +# content_path = "./supabase/templates/invite.html" + +# Uncomment to customize notification email template +# [auth.email.notification.password_changed] +# enabled = true +# subject = "Your password has been changed" +# content_path = "./templates/password_changed_notification.html" + +[auth.sms] +# Allow/disallow new user signups via SMS to your project. +enable_signup = false +# If enabled, users need to confirm their phone number before signing in. +enable_confirmations = false +# Template for sending OTP to users +template = "Your code is {{ .Code }}" +# Controls the minimum amount of time that must pass before sending another sms otp. +max_frequency = "5s" + +# Use pre-defined map of phone number to OTP for testing. +# [auth.sms.test_otp] +# 4152127777 = "123456" + +# Configure logged in session timeouts. +# [auth.sessions] +# Force log out after the specified duration. +# timebox = "24h" +# Force log out if the user has been inactive longer than the specified duration. +# inactivity_timeout = "8h" + +# This hook runs before a new user is created and allows developers to reject the request based on the incoming user object. +# [auth.hook.before_user_created] +# enabled = true +# uri = "pg-functions://postgres/auth/before-user-created-hook" + +# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used. +# [auth.hook.custom_access_token] +# enabled = true +# uri = "pg-functions:////" + +# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`. +[auth.sms.twilio] +enabled = false +account_sid = "" +message_service_sid = "" +# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead: +auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)" + +# Multi-factor-authentication is available to Supabase Pro plan. +[auth.mfa] +# Control how many MFA factors can be enrolled at once per user. +max_enrolled_factors = 10 + +# Control MFA via App Authenticator (TOTP) +[auth.mfa.totp] +enroll_enabled = false +verify_enabled = false + +# Configure MFA via Phone Messaging +[auth.mfa.phone] +enroll_enabled = false +verify_enabled = false +otp_length = 6 +template = "Your code is {{ .Code }}" +max_frequency = "5s" + +# Configure MFA via WebAuthn +# [auth.mfa.web_authn] +# enroll_enabled = true +# verify_enabled = true + +# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`, +# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`, +# `twitter`, `slack`, `spotify`, `workos`, `zoom`. +[auth.external.apple] +enabled = false +client_id = "" +# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead: +secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)" +# Overrides the default auth redirectUrl. +redirect_uri = "" +# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure, +# or any other third-party OIDC providers. +url = "" +# If enabled, the nonce check will be skipped. Required for local sign in with Google auth. +skip_nonce_check = false +# If enabled, it will allow the user to successfully authenticate when the provider does not return an email address. +email_optional = false + +# Allow Solana wallet holders to sign in to your project via the Sign in with Solana (SIWS, EIP-4361) standard. +# You can configure "web3" rate limit in the [auth.rate_limit] section and set up [auth.captcha] if self-hosting. +[auth.web3.solana] +enabled = false + +# Use Firebase Auth as a third-party provider alongside Supabase Auth. +[auth.third_party.firebase] +enabled = false +# project_id = "my-firebase-project" + +# Use Auth0 as a third-party provider alongside Supabase Auth. +[auth.third_party.auth0] +enabled = false +# tenant = "my-auth0-tenant" +# tenant_region = "us" + +# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth. +[auth.third_party.aws_cognito] +enabled = false +# user_pool_id = "my-user-pool-id" +# user_pool_region = "us-east-1" + +# Use Clerk as a third-party provider alongside Supabase Auth. +[auth.third_party.clerk] +enabled = false +# Obtain from https://clerk.com/setup/supabase +# domain = "example.clerk.accounts.dev" + +# OAuth server configuration +[auth.oauth_server] +# Enable OAuth server functionality +enabled = false +# Path for OAuth consent flow UI +authorization_url_path = "/oauth/consent" +# Allow dynamic client registration +allow_dynamic_registration = false + +[edge_runtime] +enabled = true +# Supported request policies: `oneshot`, `per_worker`. +# `per_worker` (default) โ€” enables hot reload during local development. +# `oneshot` โ€” fallback mode if hot reload causes issues (e.g. in large repos or with symlinks). +policy = "per_worker" +# Port to attach the Chrome inspector for debugging edge functions. +inspector_port = 8083 +# The Deno major version to use. +deno_version = 2 + +# [edge_runtime.secrets] +# secret_key = "env(SECRET_VALUE)" + +[analytics] +enabled = true +port = 54327 +# Configure one of the supported backends: `postgres`, `bigquery`. +backend = "postgres" + +# Experimental features may be deprecated any time +[experimental] +# Configures Postgres storage engine to use OrioleDB (S3) +orioledb_version = "" +# Configures S3 bucket URL, eg. .s3-.amazonaws.com +s3_host = "env(S3_HOST)" +# Configures S3 bucket region, eg. us-east-1 +s3_region = "env(S3_REGION)" +# Configures AWS_ACCESS_KEY_ID for S3 bucket +s3_access_key = "env(S3_ACCESS_KEY)" +# Configures AWS_SECRET_ACCESS_KEY for S3 bucket +s3_secret_key = "env(S3_SECRET_KEY)" diff --git a/pkgs/cli/supabase/functions/deno.json b/pkgs/cli/supabase/functions/deno.json new file mode 100644 index 000000000..2f39179dd --- /dev/null +++ b/pkgs/cli/supabase/functions/deno.json @@ -0,0 +1,26 @@ +{ + "unstable": ["sloppy-imports"], + "lint": { + "rules": { + "exclude": ["no-sloppy-imports"] + } + }, + "imports": { + "@pgflow/core": "./_vendor/@pgflow/core/index.js", + "@pgflow/core/": "./_vendor/@pgflow/core/", + "@pgflow/dsl": "./_vendor/@pgflow/dsl/index.js", + "@pgflow/dsl/": "./_vendor/@pgflow/dsl/", + "@pgflow/dsl/supabase": "./_vendor/@pgflow/dsl/supabase.js", + "@pgflow/edge-worker": "./_vendor/@pgflow/edge-worker/index.ts", + "@pgflow/edge-worker/": "./_vendor/@pgflow/edge-worker/", + "@pgflow/edge-worker/_internal": "./_vendor/@pgflow/edge-worker/_internal.ts", + "@henrygd/queue": "jsr:@henrygd/queue@1.0.7", + "@std/assert": "jsr:@std/assert@0.224.0", + "@std/async": "jsr:@std/async@0.224.0", + "@std/crypto": "jsr:@std/crypto@1.0.5", + "@std/log": "jsr:@std/log@0.224.13", + "@std/testing/mock": "jsr:@std/testing@0.224.0/mock", + "@supabase/supabase-js": "jsr:@supabase/supabase-js@2.49.4", + "postgres": "npm:postgres@3.4.5" + } +} diff --git a/pkgs/cli/supabase/functions/pgflow/flows.ts b/pkgs/cli/supabase/functions/pgflow/flows.ts new file mode 100644 index 000000000..b5dec59be --- /dev/null +++ b/pkgs/cli/supabase/functions/pgflow/flows.ts @@ -0,0 +1,9 @@ +import { Flow } from '@pgflow/dsl'; + +// Test flow for E2E testing of compile command +// This flow is used by the CLI E2E tests to verify that the compile command works correctly +const TestFlow = new Flow({ slug: 'test_flow_e2e' }) + .step({ slug: 'step1' }, () => ({ result: 'done' })); + +// Export flows array for ControlPlane +export const flows = [TestFlow]; diff --git a/pkgs/cli/supabase/functions/pgflow/index.ts b/pkgs/cli/supabase/functions/pgflow/index.ts new file mode 100644 index 000000000..5234ae0d9 --- /dev/null +++ b/pkgs/cli/supabase/functions/pgflow/index.ts @@ -0,0 +1,4 @@ +import { ControlPlane } from '@pgflow/edge-worker'; +import { flows } from './flows.ts'; + +ControlPlane.serve(flows); diff --git a/pkgs/cli/tsconfig.lib.json b/pkgs/cli/tsconfig.lib.json index 203c7dbe3..881352899 100644 --- a/pkgs/cli/tsconfig.lib.json +++ b/pkgs/cli/tsconfig.lib.json @@ -1,5 +1,8 @@ { "extends": "./tsconfig.json", + "compilerOptions": { + "lib": ["es2022", "dom", "dom.iterable"] + }, "include": ["src/**/*.ts"], "references": [ { diff --git a/pkgs/client/package.json b/pkgs/client/package.json index 995c77a7d..adcfff2d0 100644 --- a/pkgs/client/package.json +++ b/pkgs/client/package.json @@ -44,7 +44,6 @@ "@pgflow/dsl": "workspace:*", "@types/uuid": "^10.0.0", "postgres": "^3.4.5", - "supabase": "^2.34.3", "terser": "^5.43.0", "vite-plugin-dts": "~3.8.1", "vitest": "1.3.1" diff --git a/pkgs/core/package.json b/pkgs/core/package.json index dcf3880fb..35f433139 100644 --- a/pkgs/core/package.json +++ b/pkgs/core/package.json @@ -19,8 +19,7 @@ } }, "devDependencies": { - "@types/node": "^22.14.1", - "supabase": "^2.34.3" + "@types/node": "^22.14.1" }, "dependencies": { "@pgflow/dsl": "workspace:*", diff --git a/pkgs/edge-worker/package.json b/pkgs/edge-worker/package.json index f4b248a2e..957fa8fe1 100644 --- a/pkgs/edge-worker/package.json +++ b/pkgs/edge-worker/package.json @@ -29,8 +29,7 @@ }, "devDependencies": { "@types/deno": "^2.3.0", - "@types/node": "~18.16.20", - "supabase": "^2.34.3" + "@types/node": "~18.16.20" }, "publishConfig": { "access": "public" diff --git a/pkgs/edge-worker/src/control-plane/index.ts b/pkgs/edge-worker/src/control-plane/index.ts new file mode 100644 index 000000000..0f1107dca --- /dev/null +++ b/pkgs/edge-worker/src/control-plane/index.ts @@ -0,0 +1,27 @@ +/** + * ControlPlane - HTTP API for flow compilation + * + * Provides HTTP endpoints for compiling flows to SQL without requiring + * local Deno runtime or file system access. + * + * @example + * ```typescript + * import { ControlPlane } from '@pgflow/edge-worker'; + * import { flows } from './flows.ts'; + * + * ControlPlane.serve(flows); + * ``` + */ + +import { serveControlPlane } from './server.js'; + +/** + * Main ControlPlane API + */ +export const ControlPlane = { + /** + * Start the ControlPlane HTTP server + * @param flows Array of flow definitions to register + */ + serve: serveControlPlane, +}; diff --git a/pkgs/edge-worker/src/control-plane/server.ts b/pkgs/edge-worker/src/control-plane/server.ts new file mode 100644 index 000000000..417e18448 --- /dev/null +++ b/pkgs/edge-worker/src/control-plane/server.ts @@ -0,0 +1,150 @@ +import type { AnyFlow } from '@pgflow/dsl'; +import { compileFlow } from '@pgflow/dsl'; + +/** + * Response type for the /flows/:slug endpoint + */ +export interface FlowCompilationResponse { + flowSlug: string; + sql: string[]; +} + +/** + * Error response type + */ +export interface ErrorResponse { + error: string; + message: string; +} + +/** + * Builds a flow registry and validates no duplicate slugs + * @param flows Array of flow definitions + * @returns Map of slug to flow + */ +function buildFlowRegistry(flows: AnyFlow[]): Map { + const registry = new Map(); + + for (const flow of flows) { + if (registry.has(flow.slug)) { + throw new Error( + `Duplicate flow slug detected: '${flow.slug}'. Each flow must have a unique slug.` + ); + } + registry.set(flow.slug, flow); + } + + return registry; +} + +/** + * Creates a request handler for the ControlPlane HTTP API + * @param flows Array of flow definitions to register + * @returns Request handler function + */ +export function createControlPlaneHandler(flows: AnyFlow[]) { + const registry = buildFlowRegistry(flows); + + return (req: Request): Response => { + const url = new URL(req.url); + + // Supabase Edge Functions always include function name as first segment + // Kong strips /functions/v1/ prefix, so handler receives: /pgflow/flows/slug + // We strip /pgflow to get: /flows/slug + const pathname = url.pathname.replace(/^\/[^/]+/, ''); + + // Handle GET /flows/:slug + const flowsMatch = pathname.match(/^\/flows\/([a-zA-Z0-9_]+)$/); + if (flowsMatch && req.method === 'GET') { + const slug = flowsMatch[1]; + return handleGetFlow(registry, slug); + } + + // 404 for unknown routes + return jsonResponse( + { + error: 'Not Found', + message: `Route ${req.method} ${url.pathname} not found`, + }, + 404 + ); + }; +} + +/** + * Serves the ControlPlane HTTP API for flow compilation + * @param flows Array of flow definitions to register + */ +export function serveControlPlane(flows: AnyFlow[]): void { + const handler = createControlPlaneHandler(flows); + + // Create HTTP server using Deno.serve (follows Supabase Edge Function pattern) + Deno.serve({}, handler); +} + +/** + * Handles GET /flows/:slug requests + */ +function handleGetFlow( + registry: Map, + slug: string +): Response { + try { + const flow = registry.get(slug); + + if (!flow) { + return jsonResponse( + { + error: 'Flow Not Found', + message: `Flow '${slug}' not found. Did you add it to flows.ts?`, + }, + 404 + ); + } + + // Compile the flow to SQL + const sql = compileFlow(flow); + + const response: FlowCompilationResponse = { + flowSlug: slug, + sql, + }; + + return jsonResponse(response, 200); + } catch (error) { + console.error('Error compiling flow:', error); + return jsonResponse( + { + error: 'Compilation Error', + message: error instanceof Error ? error.message : 'Unknown error', + }, + 500 + ); + } +} + +/** + * Helper to create JSON responses + */ +function jsonResponse(data: unknown, status: number): Response { + return new Response(JSON.stringify(data), { + status, + headers: { + 'Content-Type': 'application/json', + }, + }); +} + +/** + * ControlPlane class for serving flow compilation HTTP API + */ +export class ControlPlane { + /** + * Serves the ControlPlane HTTP API for flow compilation + * @param flows Array of flow definitions to register + */ + static serve(flows: AnyFlow[]): void { + const handler = createControlPlaneHandler(flows); + Deno.serve({}, handler); + } +} diff --git a/pkgs/edge-worker/src/index.ts b/pkgs/edge-worker/src/index.ts index 432f72424..b75344c97 100644 --- a/pkgs/edge-worker/src/index.ts +++ b/pkgs/edge-worker/src/index.ts @@ -6,6 +6,9 @@ export { EdgeWorker } from './EdgeWorker.js'; export { createFlowWorker } from './flow/createFlowWorker.js'; export { FlowWorkerLifecycle } from './flow/FlowWorkerLifecycle.js'; +// Export ControlPlane for HTTP-based flow compilation +export { ControlPlane } from './control-plane/index.js'; + // Export platform adapters export * from './platform/index.js'; @@ -23,6 +26,3 @@ export type { ILifecycle, IBatchProcessor, } from './core/types.js'; - -// Export context types -export type { Context } from './core/context.js'; diff --git a/pkgs/edge-worker/supabase/functions/deno.json b/pkgs/edge-worker/supabase/functions/deno.json index a8405e35f..e8950dd35 100644 --- a/pkgs/edge-worker/supabase/functions/deno.json +++ b/pkgs/edge-worker/supabase/functions/deno.json @@ -1,5 +1,10 @@ { "unstable": ["sloppy-imports"], + "lint": { + "rules": { + "exclude": ["no-sloppy-imports"] + } + }, "imports": { "@pgflow/core": "./_vendor/@pgflow/core/index.js", "@pgflow/core/": "./_vendor/@pgflow/core/", diff --git a/pkgs/edge-worker/supabase/functions/pgflow/index.ts b/pkgs/edge-worker/supabase/functions/pgflow/index.ts new file mode 100644 index 000000000..25bfb6ed9 --- /dev/null +++ b/pkgs/edge-worker/supabase/functions/pgflow/index.ts @@ -0,0 +1,19 @@ +import { Flow } from '@pgflow/dsl'; +import { ControlPlane } from '@pgflow/edge-worker'; + +// Test flows for e2e testing +const TestFlow1 = new Flow({ slug: 'test_flow_1' }).step( + { slug: 'step1' }, + () => ({ result: 'ok' }) +); + +const TestFlow2 = new Flow({ slug: 'test_flow_2' }) + .step({ slug: 'step1' }, () => ({ value: 1 })) + .step({ slug: 'step2', dependsOn: ['step1'] }, () => ({ value: 2 })); + +const TestFlow3 = new Flow({ slug: 'test_flow_3', maxAttempts: 5 }).step( + { slug: 'step1' }, + () => ({ done: true }) +); + +ControlPlane.serve([TestFlow1, TestFlow2, TestFlow3]); diff --git a/pkgs/edge-worker/tests/e2e/control-plane.test.ts b/pkgs/edge-worker/tests/e2e/control-plane.test.ts new file mode 100644 index 000000000..daf2c4460 --- /dev/null +++ b/pkgs/edge-worker/tests/e2e/control-plane.test.ts @@ -0,0 +1,90 @@ +import { assertEquals, assertExists } from '@std/assert'; +import { e2eConfig } from '../config.ts'; +import { log } from './_helpers.ts'; + +const API_URL = e2eConfig.apiUrl; +const BASE_URL = `${API_URL}/functions/v1/pgflow`; + +/** + * Helper to ensure the pgflow function is responsive + * Makes initial request and retries until server is ready + */ +async function ensureServerReady() { + log('Ensuring pgflow function is ready...'); + + const maxRetries = 15; + const retryDelayMs = 1000; + + for (let i = 0; i < maxRetries; i++) { + try { + // Try to hit the /flows/:slug endpoint to wake up the function + const response = await fetch(`${BASE_URL}/flows/test_flow_1`, { + signal: AbortSignal.timeout(5000), + }); + + // Any response (even 404) means the function is running + if (response.status > 0) { + // Consume the response body to avoid resource leaks + await response.body?.cancel(); + log('pgflow function is ready!'); + return; + } + } catch (error) { + if (i === maxRetries - 1) { + throw new Error(`Server not ready after ${maxRetries} retries: ${error}`); + } + log(`Retry ${i + 1}/${maxRetries}: Server not ready yet, waiting...`); + await new Promise((resolve) => setTimeout(resolve, retryDelayMs)); + } + } +} + +Deno.test('E2E ControlPlane - GET /flows/:slug returns compiled SQL', async () => { + await ensureServerReady(); + + const response = await fetch(`${BASE_URL}/flows/test_flow_1`); + const data = await response.json(); + + assertEquals(response.status, 200); + assertEquals(data.flowSlug, 'test_flow_1'); + assertExists(data.sql); + assertEquals(Array.isArray(data.sql), true); + assertEquals(data.sql.length > 0, true); + + log(`Successfully compiled flow test_flow_1 (${data.sql.length} SQL statements)`); +}); + +Deno.test('E2E ControlPlane - GET /flows/:slug returns 404 for unknown flow', async () => { + const response = await fetch(`${BASE_URL}/flows/unknown_flow`); + const data = await response.json(); + + assertEquals(response.status, 404); + assertEquals(data.error, 'Flow Not Found'); + assertExists(data.message); + + log('404 error correctly returned for unknown flow'); +}); + +Deno.test('E2E ControlPlane - GET /invalid/route returns 404', async () => { + const response = await fetch(`${BASE_URL}/invalid/route`); + const data = await response.json(); + + assertEquals(response.status, 404); + assertEquals(data.error, 'Not Found'); + assertExists(data.message); + + log('404 error correctly returned for invalid route'); +}); + +Deno.test('E2E ControlPlane - POST /flows/:slug returns 404 (wrong method)', async () => { + const response = await fetch(`${BASE_URL}/flows/test_flow_1`, { + method: 'POST', + }); + const data = await response.json(); + + assertEquals(response.status, 404); + assertEquals(data.error, 'Not Found'); + assertExists(data.message); + + log('404 error correctly returned for wrong HTTP method'); +}); diff --git a/pkgs/edge-worker/tests/unit/control-plane/server.test.ts b/pkgs/edge-worker/tests/unit/control-plane/server.test.ts new file mode 100644 index 000000000..c4daae35c --- /dev/null +++ b/pkgs/edge-worker/tests/unit/control-plane/server.test.ts @@ -0,0 +1,123 @@ +import { assertEquals, assertMatch } from '@std/assert'; +import { Flow, compileFlow } from '@pgflow/dsl'; +import { createControlPlaneHandler } from '../../../src/control-plane/server.ts'; + +// Test flows covering different DSL features +const FlowWithSingleStep = new Flow({ slug: 'flow_single_step' }) + .step({ slug: 'step1' }, () => ({ result: 'done' })); + +const FlowWithRuntimeOptions = new Flow({ + slug: 'flow_with_options', + maxAttempts: 5, + timeout: 120, + baseDelay: 2000, +}).step({ slug: 'step1' }, () => ({ result: 'ok' })); + +const FlowWithMultipleSteps = new Flow({ slug: 'flow_multiple_steps' }) + .step({ slug: 'step1' }, () => ({ value: 1 })) + .step({ slug: 'step2', dependsOn: ['step1'] }, () => ({ value: 2 })) + .step({ slug: 'step3', dependsOn: ['step2'] }, () => ({ value: 3 })); + +const FlowWithArrayStep = new Flow<{ items: string[] }>({ + slug: 'flow_with_array', +}) + .array({ slug: 'process_items' }, ({ run }) => + run.items.map((item) => ({ item, processed: true })) + ); + +const FlowWithMapStep = new Flow({ slug: 'flow_with_map' }) + .map({ slug: 'uppercase' }, (text) => text.toUpperCase()) + .step({ slug: 'join', dependsOn: ['uppercase'] }, (input) => ({ + result: input.uppercase.join(','), + })); + +const FlowWithStepOptions = new Flow({ slug: 'flow_step_options' }) + .step( + { slug: 'step1', maxAttempts: 10, timeout: 60, baseDelay: 500 }, + () => ({ result: 'done' }) + ); + +const FlowWithParallelSteps = new Flow({ slug: 'flow_parallel' }) + .step({ slug: 'step1' }, () => ({ a: 1 })) + .step({ slug: 'step2' }, () => ({ b: 2 })) + .step({ slug: 'step3', dependsOn: ['step1', 'step2'] }, () => ({ + c: 3, + })); + +// All test flows +const ALL_TEST_FLOWS = [ + FlowWithSingleStep, + FlowWithRuntimeOptions, + FlowWithMultipleSteps, + FlowWithArrayStep, + FlowWithMapStep, + FlowWithStepOptions, + FlowWithParallelSteps, +]; + +Deno.test('ControlPlane - should reject duplicate flow slugs', () => { + // createControlPlaneHandler validates flows, so we test that directly + let error: Error | null = null; + try { + createControlPlaneHandler([FlowWithSingleStep, FlowWithSingleStep]); + } catch (e) { + error = e as Error; + } + + assertEquals(error instanceof Error, true); + assertMatch(error!.message, /Duplicate flow slug detected: 'flow_single_step'/); +}); + +Deno.test('ControlPlane Handler - GET /flows/:slug returns 404 for unknown flow', async () => { + const handler = createControlPlaneHandler(ALL_TEST_FLOWS); + + const request = new Request('http://localhost/pgflow/flows/unknown_flow'); + const response = handler(request); + + assertEquals(response.status, 404); + const data = await response.json(); + assertEquals(data.error, 'Flow Not Found'); + assertMatch(data.message, /Flow 'unknown_flow' not found/); +}); + +Deno.test('ControlPlane Handler - returns 404 for invalid routes', async () => { + const handler = createControlPlaneHandler(ALL_TEST_FLOWS); + + const request = new Request('http://localhost/pgflow/invalid/route'); + const response = handler(request); + + assertEquals(response.status, 404); + const data = await response.json(); + assertEquals(data.error, 'Not Found'); + assertMatch(data.message, /Route GET \/pgflow\/invalid\/route not found/); +}); + +Deno.test('ControlPlane Handler - returns 404 for wrong HTTP method', async () => { + const handler = createControlPlaneHandler(ALL_TEST_FLOWS); + + const request = new Request('http://localhost/pgflow/flows/flow_single_step', { + method: 'POST', + }); + const response = handler(request); + + assertEquals(response.status, 404); + const data = await response.json(); + assertEquals(data.error, 'Not Found'); + assertMatch(data.message, /Route POST \/pgflow\/flows\/flow_single_step not found/); +}); + +// Dynamically generate tests for each flow variation +ALL_TEST_FLOWS.forEach((flow) => { + Deno.test(`ControlPlane Handler - compiles flow: ${flow.slug}`, async () => { + const handler = createControlPlaneHandler(ALL_TEST_FLOWS); + const expectedSql = compileFlow(flow); + + const request = new Request(`http://localhost/pgflow/flows/${flow.slug}`); + const response = handler(request); + + assertEquals(response.status, 200); + const data = await response.json(); + assertEquals(data.flowSlug, flow.slug); + assertEquals(data.sql, expectedSql); + }); +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 206a954b9..f575a697e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -93,6 +93,9 @@ importers: prettier: specifier: ^2.6.2 version: 2.8.8 + supabase: + specifier: ^2.34.3 + version: 2.54.11 tslib: specifier: ^2.3.0 version: 2.8.1 @@ -273,9 +276,6 @@ importers: postgres: specifier: ^3.4.5 version: 3.4.5 - supabase: - specifier: ^2.34.3 - version: 2.54.11 terser: specifier: ^5.43.0 version: 5.43.1 @@ -298,9 +298,6 @@ importers: '@types/node': specifier: ^22.14.1 version: 22.19.0 - supabase: - specifier: ^2.34.3 - version: 2.54.11 pkgs/dsl: devDependencies: @@ -338,9 +335,6 @@ importers: '@types/node': specifier: ~18.16.20 version: 18.16.20 - supabase: - specifier: ^2.34.3 - version: 2.54.11 pkgs/example-flows: dependencies: