diff --git a/.github/actions/cache-docker-images/README.md b/.github/actions/cache-docker-images/README.md new file mode 100644 index 0000000000..e4aff307f3 --- /dev/null +++ b/.github/actions/cache-docker-images/README.md @@ -0,0 +1,189 @@ +# Docker Image Caching Action + +This action provides intelligent Docker image caching to avoid rate limiting issues from Docker registries, particularly useful for ECR Public Registry and Docker Hub. + +## Features + +- πŸ—‚οΈ **Smart Caching**: Automatically generates cache keys based on image names +- πŸ”„ **Incremental Updates**: Merges new images with existing cache +- ⚑ **Fast Restoration**: Loads cached images before attempting pulls +- πŸ›‘οΈ **Rate Limit Protection**: Includes retry logic and specific ECR error handling +- πŸ“Š **Detailed Reporting**: Provides comprehensive cache hit/miss reporting +- πŸ” **AutoKitteh Updates**: Intelligent version checking and automatic updates for AutoKitteh images + +## Usage + +### Basic Usage + +```yaml +- name: Cache Docker Images + uses: ./.github/actions/cache-docker-images + with: + cache-key: "my-project-images" + images: "postgres:15-alpine,temporalio/auto-setup:1.24.2,public.ecr.aws/autokitteh/server:latest" +``` + +### Advanced Usage + +```yaml +- name: Cache Docker Images + id: docker-cache + uses: ./.github/actions/cache-docker-images + with: + cache-key: "test-env-images-v2" + images: "postgres:15-alpine,temporalio/auto-setup:${{ matrix.temporal-version }},public.ecr.aws/autokitteh/server:latest" + restore-only: "false" + +- name: Check cache results + run: | + echo "Cache hit: ${{ steps.docker-cache.outputs.cache-hit }}" + echo "Loaded images: ${{ steps.docker-cache.outputs.images-loaded }}" +``` + +### AutoKitteh Update Checking + +```yaml +- name: Cache Docker Images with AutoKitteh Updates + id: docker-cache + uses: ./.github/actions/cache-docker-images + with: + cache-key: "test-env-images-v2" + images: "postgres:15-alpine,public.ecr.aws/autokitteh/server:v1.2.3" + check-updates: "true" + +- name: Check what was updated + run: | + echo "Cache hit: ${{ steps.docker-cache.outputs.cache-hit }}" + echo "Loaded images: ${{ steps.docker-cache.outputs.images-loaded }}" + echo "Updated images: ${{ steps.docker-cache.outputs.updated-images }}" +``` + +## Inputs + +| Input | Description | Required | Default | +|-------|-------------|----------|---------| +| `cache-key` | Base cache key for the Docker images | No | `"docker-images"` | +| `images` | Comma-separated list of Docker images to cache | Yes | - | +| `restore-only` | Only restore cache, don't save new cache | No | `"false"` | +| `check-updates` | Check for AutoKitteh updates and pull newer versions | No | `"false"` | + +## Outputs + +| Output | Description | +|--------|-------------| +| `cache-hit` | Whether the cache was restored (`"true"` or `"false"`) | +| `images-loaded` | Comma-separated list of images loaded from cache | +| `updated-images` | Comma-separated list of AutoKitteh images that were updated | + +## How It Works + +1. **Cache Key Generation**: Creates a unique cache key based on image names and runner OS +2. **Cache Restoration**: Attempts to restore previously cached Docker images +3. **Image Loading**: Loads cached images using `docker load` +4. **Missing Image Detection**: Identifies which images need to be pulled +5. **Smart Pulling**: Only pulls missing images with retry logic +6. **AutoKitteh Update Checking**: (Optional) Checks GitHub releases for newer AutoKitteh versions +7. **Intelligent Updates**: Only pulls AutoKitteh images if a newer version is available +8. **Cache Saving**: Saves newly pulled images to cache for future runs + +## Integration with Existing Workflows + +### Replace existing Docker pulls + +**Before:** +```yaml +- name: Pull Docker images + run: | + docker pull postgres:15-alpine + docker pull temporalio/auto-setup:1.24.2 + docker pull public.ecr.aws/autokitteh/server:latest +``` + +**After:** +```yaml +- name: Cache and pull Docker images + uses: ./.github/actions/cache-docker-images + with: + cache-key: "my-workflow-images-v1" + images: "postgres:15-alpine,temporalio/auto-setup:1.24.2,public.ecr.aws/autokitteh/server:latest" +``` + +### Use in matrix builds + +```yaml +strategy: + matrix: + browser: [Chrome, Firefox, Safari] + +steps: + - name: Cache Docker images for ${{ matrix.browser }} + uses: ./.github/actions/cache-docker-images + with: + cache-key: "test-images-${{ matrix.browser }}-v1" + images: "postgres:15-alpine,temporalio/auto-setup:1.24.2,public.ecr.aws/autokitteh/server:latest" +``` + +## Benefits + +- **Reduces Rate Limiting**: Avoids repeated pulls from registries +- **Faster Builds**: Cached images load much faster than network pulls +- **Cost Savings**: Reduces bandwidth usage in CI/CD +- **Reliability**: Builds continue even if registries have temporary issues +- **Smart Updates**: Only pulls AutoKitteh images when there's actually a new version +- **Version Awareness**: Automatically tracks and compares semantic versions + +## Cache Behavior + +- **Cache Duration**: GitHub Actions cache retention (up to 7 days by default) +- **Cache Size**: Compressed Docker images (typically 50-80% smaller) +- **Cache Scope**: Per repository and branch +- **Cache Keys**: Include image names hash to ensure consistency + +## Error Handling + +The action includes specific error handling for: +- ECR Public Registry rate limits +- Docker Hub rate limits +- Network timeouts +- Corrupted cache files +- Missing images + +## Troubleshooting + +### Cache misses frequently +- Check if image tags are changing (use specific tags instead of `latest`) +- Verify cache key consistency across runs +- Consider cache size limits (10GB per repository) + +### Images fail to load from cache +- Cache may be corrupted - action will fall back to pulling +- Check Docker daemon status +- Verify sufficient disk space + +### Rate limiting still occurs +- Increase retry attempts in the action +- Consider using authenticated registry access +- Use alternative registries or mirrors + +## AutoKitteh Update Feature + +When `check-updates: "true"` is enabled, the action will: + +1. **Check GitHub Releases**: Queries the AutoKitteh repository for the latest release +2. **Version Comparison**: Compares current image version with the latest release +3. **Smart Updates**: Only pulls if a newer version is available +4. **Cache Updates**: Automatically updates the cache with new versions +5. **Detailed Reporting**: Reports which images were updated + +### Supported AutoKitteh Image Formats + +- `public.ecr.aws/autokitteh/server:v1.2.3` - Specific version +- `public.ecr.aws/autokitteh/server:latest` - Always checks for updates +- Any image containing "autokitteh" in the name + +## Version History + +- **v1.0**: Initial release with basic caching +- **v1.1**: Added retry logic and ECR-specific error handling +- **v1.2**: Improved cache merging and cleanup +- **v1.3**: Added intelligent AutoKitteh version checking and updates \ No newline at end of file diff --git a/.github/actions/cache-docker-images/action.yml b/.github/actions/cache-docker-images/action.yml new file mode 100644 index 0000000000..4cfe499422 --- /dev/null +++ b/.github/actions/cache-docker-images/action.yml @@ -0,0 +1,325 @@ +name: "Cache Docker Images" +description: "Intelligent Docker image caching to avoid rate limiting issues from Docker registries" + +inputs: + cache-key: + description: "Base cache key for the Docker images" + required: false + default: "docker-images" + images: + description: "Comma-separated list of Docker images to cache" + required: true + restore-only: + description: "Only restore cache, don't save new cache" + required: false + default: "false" + check-updates: + description: "Check for AutoKitteh updates and pull newer versions" + required: false + default: "false" + +outputs: + cache-hit: + description: "Whether the cache was restored (true or false)" + value: ${{ steps.cache-restore.outputs.cache-hit }} + images-loaded: + description: "Comma-separated list of images loaded from cache" + value: ${{ steps.process-images.outputs.images-loaded }} + updated-images: + description: "Comma-separated list of AutoKitteh images that were updated" + value: ${{ steps.check-autokitteh-version.outputs.updated-images }} + +runs: + using: "composite" + steps: + - name: πŸ”§ Generate cache key + id: cache-key + shell: bash + run: | + # Create a hash of the image list for cache key uniqueness + IMAGES_HASH=$(echo "${{ inputs.images }}" | sha256sum | cut -d' ' -f1 | head -c 8) + CACHE_KEY="${{ inputs.cache-key }}-${IMAGES_HASH}-${{ runner.os }}" + echo "cache-key=$CACHE_KEY" >> "$GITHUB_OUTPUT" + echo "Generated cache key: $CACHE_KEY" + + - name: πŸ”„ Restore Docker images from cache + id: cache-restore + uses: actions/cache/restore@v4 + with: + path: /tmp/docker-cache + key: ${{ steps.cache-key.outputs.cache-key }} + + - name: πŸ“¦ Process cached images + id: process-images + shell: bash + run: | + # Convert comma-separated images to array + IFS=',' read -ra IMAGES <<< "${{ inputs.images }}" + LOADED_IMAGES="" + MISSING_IMAGES="" + + echo "Processing images: ${{ inputs.images }}" + + if [ "${{ steps.cache-restore.outputs.cache-hit }}" == "true" ]; then + echo "βœ… Cache hit! Loading images from cache..." + + # Load cached images + for image in "${IMAGES[@]}"; do + image=$(echo "$image" | xargs) # trim whitespace + cache_file="/tmp/docker-cache/${image//[\/:]/_}.tar" + + if [ -f "$cache_file" ]; then + echo "Loading $image from cache..." + if docker load < "$cache_file"; then + if [ -n "$LOADED_IMAGES" ]; then + LOADED_IMAGES="$LOADED_IMAGES,$image" + else + LOADED_IMAGES="$image" + fi + echo "βœ… Loaded $image from cache" + else + echo "❌ Failed to load $image from cache, will pull fresh" + if [ -n "$MISSING_IMAGES" ]; then + MISSING_IMAGES="$MISSING_IMAGES,$image" + else + MISSING_IMAGES="$image" + fi + fi + else + echo "⚠️ Cache file not found for $image, will pull fresh" + if [ -n "$MISSING_IMAGES" ]; then + MISSING_IMAGES="$MISSING_IMAGES,$image" + else + MISSING_IMAGES="$image" + fi + fi + done + else + echo "❌ Cache miss! All images need to be pulled" + MISSING_IMAGES="${{ inputs.images }}" + fi + + echo "images-loaded=$LOADED_IMAGES" >> "$GITHUB_OUTPUT" + echo "missing-images=$MISSING_IMAGES" >> "$GITHUB_OUTPUT" + + echo "Images loaded from cache: $LOADED_IMAGES" + echo "Images to pull: $MISSING_IMAGES" + + - name: 🐳 Pull missing Docker images + if: steps.process-images.outputs.missing-images != '' + shell: bash + run: | + IFS=',' read -ra MISSING <<< "${{ steps.process-images.outputs.missing-images }}" + + echo "Pulling missing images..." + for image in "${MISSING[@]}"; do + image=$(echo "$image" | xargs) # trim whitespace + if [ -n "$image" ]; then + echo "Pulling $image..." + + # Retry logic for rate limiting + for attempt in 1 2 3; do + if docker pull "$image"; then + echo "βœ… Successfully pulled $image" + break + else + echo "❌ Failed to pull $image (attempt $attempt/3)" + if [ $attempt -eq 3 ]; then + echo "❌ Failed to pull $image after 3 attempts" + exit 1 + fi + + # Wait with exponential backoff + wait_time=$((attempt * 30)) + echo "⏳ Waiting ${wait_time}s before retry..." + sleep $wait_time + fi + done + fi + done + + - name: πŸ’Ύ Save images to cache + if: inputs.restore-only != 'true' && steps.process-images.outputs.missing-images != '' + shell: bash + run: | + echo "Saving images to cache..." + mkdir -p /tmp/docker-cache + + IFS=',' read -ra ALL_IMAGES <<< "${{ inputs.images }}" + + for image in "${ALL_IMAGES[@]}"; do + image=$(echo "$image" | xargs) # trim whitespace + if [ -n "$image" ]; then + cache_file="/tmp/docker-cache/${image//[\/:]/_}.tar" + + # Only save if not already cached or if we just pulled it + if [ ! -f "$cache_file" ] || [[ "${{ steps.process-images.outputs.missing-images }}" == *"$image"* ]]; then + echo "Saving $image to cache..." + if docker save "$image" > "$cache_file"; then + echo "βœ… Saved $image to cache" + else + echo "❌ Failed to save $image to cache" + rm -f "$cache_file" # Remove partial file + fi + else + echo "⏭️ $image already cached, skipping" + fi + fi + done + + - name: πŸ—‚οΈ Update cache + if: inputs.restore-only != 'true' + uses: actions/cache/save@v4 + with: + path: /tmp/docker-cache + key: ${{ steps.cache-key.outputs.cache-key }} + + - name: πŸ“Š Cache summary + shell: bash + run: | + echo "πŸ—‚οΈ Docker Image Cache Summary" + echo "==============================" + echo "Cache key: ${{ steps.cache-key.outputs.cache-key }}" + echo "Cache hit: ${{ steps.cache-restore.outputs.cache-hit }}" + echo "Images loaded from cache: ${{ steps.process-images.outputs.images-loaded }}" + echo "Images pulled fresh: ${{ steps.process-images.outputs.missing-images }}" + echo "Restore only mode: ${{ inputs.restore-only }}" + + # Show cache directory size if it exists + if [ -d "/tmp/docker-cache" ]; then + echo "Cache directory size: $(du -sh /tmp/docker-cache | cut -f1)" + echo "Cached files:" + ls -la /tmp/docker-cache/ || true + fi + + - name: πŸ” Check AutoKitteh version and update if needed + if: inputs.check-updates == 'true' + id: check-autokitteh-version + shell: bash + run: | + echo "πŸ” Checking for AutoKitteh updates..." + + # Function to extract version from image tag + get_image_version() { + local image="$1" + if [[ "$image" == *"autokitteh"* ]]; then + # Extract version from image tag (e.g., "public.ecr.aws/autokitteh/server:v1.2.3" -> "v1.2.3") + echo "$image" | sed 's/.*://' | sed 's/^v//' + fi + } + + # Function to compare versions (returns 0 if v1 >= v2, 1 if v1 < v2) + version_compare() { + local v1="$1" + local v2="$2" + + # Remove 'v' prefix if present + v1=$(echo "$v1" | sed 's/^v//') + v2=$(echo "$v2" | sed 's/^v//') + + # Use sort -V for version comparison + if [ "$(printf '%s\n%s' "$v1" "$v2" | sort -V | head -n1)" = "$v2" ]; then + return 0 # v1 >= v2 + else + return 1 # v1 < v2 + fi + } + + # Check each image for AutoKitteh updates + IFS=',' read -ra ALL_IMAGES <<< "${{ inputs.images }}" + UPDATED_IMAGES="" + + for image in "${ALL_IMAGES[@]}"; do + image=$(echo "$image" | xargs) # trim whitespace + + if [[ "$image" == *"autokitteh"* ]]; then + echo "πŸ“¦ Checking AutoKitteh image: $image" + + # Get current image version + CURRENT_VERSION=$(get_image_version "$image") + + if [ -z "$CURRENT_VERSION" ] || [ "$CURRENT_VERSION" = "latest" ]; then + echo "⚠️ Image uses 'latest' tag or version not detectable, checking for updates anyway..." + SHOULD_UPDATE=true + else + echo "πŸ“‹ Current version: $CURRENT_VERSION" + + # Get latest release from GitHub API + echo "🌐 Fetching latest release from GitHub..." + LATEST_RELEASE=$(curl -s "https://api.github.com/repos/autokitteh/autokitteh/releases/latest" | grep '"tag_name"' | cut -d'"' -f4) + + if [ -z "$LATEST_RELEASE" ]; then + echo "❌ Failed to fetch latest release information" + continue + fi + + echo "πŸ†• Latest release: $LATEST_RELEASE" + + # Compare versions + if version_compare "$CURRENT_VERSION" "$LATEST_RELEASE"; then + echo "βœ… Current version ($CURRENT_VERSION) is up to date" + SHOULD_UPDATE=false + else + echo "πŸ”„ New version available: $LATEST_RELEASE (current: $CURRENT_VERSION)" + SHOULD_UPDATE=true + fi + fi + + if [ "$SHOULD_UPDATE" = true ]; then + echo "⬇️ Pulling updated AutoKitteh image..." + + # Construct new image name with latest version + if [ -n "$LATEST_RELEASE" ] && [ "$LATEST_RELEASE" != "latest" ]; then + # Replace version in image name + NEW_IMAGE=$(echo "$image" | sed "s/:.*/:$LATEST_RELEASE/") + echo "🏷️ New image: $NEW_IMAGE" + else + NEW_IMAGE="$image" + fi + + # Pull the new image + for attempt in 1 2 3; do + if docker pull "$NEW_IMAGE"; then + echo "βœ… Successfully pulled updated image: $NEW_IMAGE" + + # Update cache with new image + cache_file="/tmp/docker-cache/${NEW_IMAGE//[\/:]/_}.tar" + mkdir -p /tmp/docker-cache + + if docker save "$NEW_IMAGE" > "$cache_file"; then + echo "πŸ’Ύ Updated cache with new image" + if [ -n "$UPDATED_IMAGES" ]; then + UPDATED_IMAGES="$UPDATED_IMAGES,$NEW_IMAGE" + else + UPDATED_IMAGES="$NEW_IMAGE" + fi + else + echo "⚠️ Failed to save updated image to cache" + fi + break + else + echo "❌ Failed to pull $NEW_IMAGE (attempt $attempt/3)" + if [ $attempt -eq 3 ]; then + echo "❌ Failed to pull updated image after 3 attempts" + else + wait_time=$((attempt * 30)) + echo "⏳ Waiting ${wait_time}s before retry..." + sleep $wait_time + fi + fi + done + fi + else + echo "⏭️ Skipping non-AutoKitteh image: $image" + fi + done + + if [ -n "$UPDATED_IMAGES" ]; then + echo "" + echo "πŸŽ‰ Updated images: $UPDATED_IMAGES" + echo "updated-images=$UPDATED_IMAGES" >> "$GITHUB_OUTPUT" + else + echo "" + echo "ℹ️ No AutoKitteh images needed updating" + echo "updated-images=" >> "$GITHUB_OUTPUT" + fi \ No newline at end of file diff --git a/.github/actions/setup-test-env/action.yml b/.github/actions/setup-test-env/action.yml index 5817097b0e..9dd4a51f6f 100644 --- a/.github/actions/setup-test-env/action.yml +++ b/.github/actions/setup-test-env/action.yml @@ -1,56 +1,1070 @@ -name: "Setup Test Environment" -description: "Sets up the test environment including Docker container" +--- +# Setup Test Environment (Enterprise) +# Composite GitHub Action that spins up PostgreSQL, Temporal dev-server, and +# AutoKitteh-EE, waits for the services to become healthy, and prints useful +# diagnostics. Designed for Ubuntu-based runners. +# +# Prerequisites: +# - Docker installed on the runner +# - Ubuntu-based runner (for apt-get and Playwright deps) +# - Optional: jq for formatted network inspection +# +# USAGE EXAMPLE: +# - uses: ./.github/actions/setup-test-env +# id: test-env +# with: +# descope-project-id: ${{ secrets.DESCOPE_PROJECT_ID }} +# browser: firefox +# autokitteh-image: autokitteh/ee:latest +# temporal-version: 1.24.2 +# temporal-encryption-keys: | +# [{"keyId":1,"key":"bXkta2V5LTEyMw=="}] +# rsa-private-key: ${{ secrets.RSA_PRIVATE_KEY }} +# rsa-public-key: ${{ secrets.RSA_PUBLIC_KEY }} +# postgres-port: 5432 +# temporal-port: 7233 +# autokitteh-port: 9980 +# +# Downstream steps can access container IDs via: +# steps.test-env.outputs.postgres-id +# steps.test-env.outputs.temporal-id +# steps.test-env.outputs.autokitteh-id + +name: "Setup Test Environment (Enterprise)" +description: "Start PostgreSQL, Temporal, and AutoKitteh EE containers using Docker" inputs: - descope-project-id: - description: "Descope project ID" - required: true - browser: - description: "Browser to test" - required: true - autokitteh-image: - description: "AutoKitteh Docker image" - required: true + descope-project-id: + description: "Descope project ID" + required: true + browser: + description: "Browser under test (Chrome | Firefox | Safari | Edge)" + required: true + autokitteh-image: + description: "AutoKitteh EE Docker image tag (e.g., autokitteh/ee:latest)" + required: true + temporal-version: + description: "Temporal auto-setup image tag (e.g., 1.24.2)" + required: false + default: "1.24.2" + temporal-encryption-keys: + description: "JSON array of Temporal encryption keys (optional)" + required: false + default: "" + rsa-private-key: + description: "RSA private key (PEM) for JWT signing" + required: true + rsa-public-key: + description: "RSA public key (PEM) for JWT verification" + required: true + postgres-user: + description: "PostgreSQL username" + required: false + default: "postgres" + postgres-password: + description: "PostgreSQL password" + required: false + default: "postgres" + postgres-db: + description: "PostgreSQL database name" + required: false + default: "autokitteh" + postgres-port: + description: "PostgreSQL port" + required: false + default: "5432" + session-cookie-keys: + description: "Session cookie keys" + required: false + default: "78d85f1c9fd9df7d3d459a75f1db315ef634dc854ba90bc5add3e6cb6f135bd6,d9591b1ab2d0e5de1fef96a5a8a50b883430884211f16a206f84ad57897f99d5" + temporal-port: + description: "Temporal port" + required: false + default: "7233" + autokitteh-port: + description: "AutoKitteh port" + required: false + default: "9980" + port-offset: + description: "Port offset for parallel runs (will be added to base ports)" + required: false + default: "0" + postgres-timeout: + description: "Timeout (seconds) for PostgreSQL startup" + required: false + default: "120" + temporal-timeout: + description: "Timeout (seconds) for Temporal startup" + required: false + default: "240" + autokitteh-timeout: + description: "Timeout (seconds) for AutoKitteh startup" + required: false + default: "120" + log-tail-lines: + description: "Number of log lines to capture" + required: false + default: "100" + cleanup: + description: "Remove containers and network after completion" + required: false + default: "false" + +outputs: + postgres-id: + description: "Container ID for Postgres" + value: ${{ steps.postgres.outputs.postgres-id }} + temporal-id: + description: "Container ID for Temporal" + value: ${{ steps.temporal.outputs.temporal-id }} + autokitteh-id: + description: "Container ID for AutoKitteh" + value: ${{ steps.autokitteh.outputs.autokitteh-id }} runs: - using: "composite" - steps: - - name: Start AutoKitteh container - shell: bash - env: - DESCOPE_PROJECT_ID: ${{ inputs.descope-project-id }} - run: | - CONTAINER_ID=$(docker run -d \ - -p 9980:9980 \ - -e AK_AUTHHTTPMIDDLEWARE__USE_DEFAULT_USER="false" \ - -e AK_AUTHLOGINHTTPSVC__DESCOPE__ENABLED="true" \ - -e AK_HTTP__CORS__ALLOWED_ORIGINS="http://localhost:8000" \ - -e AK_AUTHSESSIONS__ALLOWED_CORS_COOKIE="true" \ - -e AK_AUTHLOGINHTTPSVC__DESCOPE__PROJECT_ID="$DESCOPE_PROJECT_ID" \ - -e AK_AUTHSESSIONS__COOKIE_KEYS="78d85f1c9fd9df7d3d459a75f1db315ef634dc854ba90bc5add3e6cb6f135bd6,d9591b1ab2d0e5de1fef96a5a8a50b883430884211f16a206f84ad57897f99d5" \ - -e AK_AUTHJWTTOKENS__HMAC__SIGN_KEY="183b1c8f4c64b3450907b3859be0b94044d3c92a3116f02213585a85dd0cb154" \ - -e AK_AUTHJWTTOKENS__ALGORITHM="hmac" \ - ${{ inputs.autokitteh-image }} up --mode=dev --config db.seed_commands="insert into orgs(created_by, created_at, org_id, display_name, updated_by, updated_at) values('7bcbb000-0000-0000-0000-000000000000', '2025-01-06 09:47:45.348066+00:00', '01943b03-6344-702a-9471-1b0098752177', 'Tests''s Personal Org', '00000000-0000-0000-0000-000000000000', '2025-01-06 11:47:45.348369+02:00');insert into users(user_id, email, display_name, created_by, created_at, default_org_id, updated_by, updated_at,status) values('01943b03-6345-7606-8e13-e319ae2f1929', 'test@autokitteh.com', 'Tests User', '7bcbb000-0000-0000-0000-000000000000', '2025-01-06 09:47:45.349399+00:00', '01943b03-6344-702a-9471-1b0098752177', '00000000-0000-0000-0000-000000000000', '2025-01-06 11:47:45.349424+02:00',1);insert into org_members(created_by, created_at, org_id, user_id, status) values('00000000-0000-0000-0000-000000000000', '2025-01-06 11:47:45.350038+02:00', '01943b03-6344-702a-9471-1b0098752177', '01943b03-6345-7606-8e13-e319ae2f1929',1);") - echo "CONTAINER_ID=$CONTAINER_ID" >> $GITHUB_ENV - - - name: Setup logging - shell: bash - run: | - docker logs -f ${{ env.CONTAINER_ID }} > docker-logs.txt & - echo "LOGS_PID=$!" >> $GITHUB_ENV - - - name: Wait for Backend - shell: bash - run: | - URL=http://localhost:9980 - echo "Waiting for backend to start..." - until $(curl --output /dev/null --silent --head --fail -X POST $URL); do - printf '.' - sleep 5 - done - echo "Backend is up!" - - - name: Install browser-specific dependencies - if: inputs.browser == 'Safari' - shell: bash - run: npx playwright install-deps webkit + using: "composite" + steps: + # ─────────────────────────────────────────────────────────────── + - name: πŸ•ΈοΈ Create dedicated Docker network + shell: bash + run: | + NETWORK_NAME="temporal-net-${GITHUB_RUN_ID}-${{ inputs.browser }}" + POSTGRES_CONTAINER_NAME="postgres-db-${{ inputs.browser }}" + TEMPORAL_CONTAINER_NAME="temporal-dev-${{ inputs.browser }}" + AUTOKITTEH_CONTAINER_NAME="autokitteh-ee-${{ inputs.browser }}" + echo "NETWORK_NAME=$NETWORK_NAME" >> "$GITHUB_ENV" + echo "POSTGRES_CONTAINER_NAME=$POSTGRES_CONTAINER_NAME" >> "$GITHUB_ENV" + echo "TEMPORAL_CONTAINER_NAME=$TEMPORAL_CONTAINER_NAME" >> "$GITHUB_ENV" + echo "AUTOKITTEH_CONTAINER_NAME=$AUTOKITTEH_CONTAINER_NAME" >> "$GITHUB_ENV" + docker network create "$NETWORK_NAME" 2>/dev/null || echo "Network $NETWORK_NAME already exists" + + # ─────────────────────────────────────────────────────────────── + - name: πŸ—‚οΈ Cache and pull Docker images + uses: ./.github/actions/cache-docker-images + id: cache-images + with: + cache-key: "test-env-images-v2" + images: "postgres:15-alpine,temporalio/auto-setup:${{ inputs.temporal-version }},${{ inputs.autokitteh-image }}" + restore-only: "true" + + # ─────────────────────────────────────────────────────────────── + - name: πŸ” Validate inputs + shell: bash + run: | + # Validate browser input + case "${{ inputs.browser }}" in + Chrome|Firefox|Safari|Edge) ;; + *) echo "❌ Invalid browser: ${{ inputs.browser }}. Must be Chrome, Firefox, Safari, or Edge"; exit 1 ;; + esac + # Validate temporal-encryption-keys JSON + if [ -n "${{ inputs.temporal-encryption-keys }}" ]; then + echo "${{ inputs.temporal-encryption-keys }}" | jq . >/dev/null 2>&1 || { + echo "❌ Invalid JSON format for temporal-encryption-keys" + exit 1 + } + fi + # Check port availability + for port in ${{ inputs.postgres-port }} ${{ inputs.temporal-port }} ${{ inputs.autokitteh-port }}; do + if command -v netstat >/dev/null 2>&1; then + if netstat -tln | grep -q ":${port} "; then + echo "❌ Port $port is already in use" + exit 1 + fi + elif command -v ss >/dev/null 2>&1; then + if ss -tln | grep -q ":${port} "; then + echo "❌ Port $port is already in use" + exit 1 + fi + else + echo "⚠️ Cannot check port availability (netstat/ss not available), proceeding..." + fi + done + + # ─────────────────────────────────────────────────────────────── + - name: πŸ”§ Calculate dynamic ports + shell: bash + run: | + # Calculate port offsets based on browser name to avoid conflicts + case "${{ inputs.browser }}" in + Chrome) + PORT_OFFSET=0 + ;; + Firefox) + PORT_OFFSET=10 + ;; + Safari) + PORT_OFFSET=20 + ;; + Edge) + PORT_OFFSET=30 + ;; + *) + PORT_OFFSET=${{ inputs.port-offset }} + ;; + esac + + # Calculate actual ports + POSTGRES_PORT=$((5432 + PORT_OFFSET)) + TEMPORAL_PORT=$((7233 + PORT_OFFSET)) + AUTOKITTEH_PORT=$((9980 + PORT_OFFSET)) + + echo "PORT_OFFSET=$PORT_OFFSET" >> "$GITHUB_ENV" + echo "POSTGRES_PORT=$POSTGRES_PORT" >> "$GITHUB_ENV" + echo "TEMPORAL_PORT=$TEMPORAL_PORT" >> "$GITHUB_ENV" + echo "AUTOKITTEH_PORT=$AUTOKITTEH_PORT" >> "$GITHUB_ENV" + + echo "Dynamic ports for ${{ inputs.browser }}:" + echo "- PostgreSQL: $POSTGRES_PORT (base: 5432 + $PORT_OFFSET)" + echo "- Temporal: $TEMPORAL_PORT (base: 7233 + $PORT_OFFSET)" + echo "- AutoKitteh: $AUTOKITTEH_PORT (base: 9980 + $PORT_OFFSET)" + + # ─────────────────────────────────────────────────────────────── + - name: 🐘 Start PostgreSQL + id: postgres + shell: bash + run: | + source "${{ github.action_path }}/scripts/progress-bar.sh" + echo "~~~~~~Start PostgreSQL~~~~~~" + echo "Creating PostgreSQL container with:" + echo "- Container name: $POSTGRES_CONTAINER_NAME" + echo "- Network: $NETWORK_NAME" + echo "- Port: $POSTGRES_PORT:5432" + echo "- User: ${{ inputs.postgres-user }}" + echo "- Database: ${{ inputs.postgres-db }}" + + POSTGRES_CONTAINER_ID=$(docker run -d --name "$POSTGRES_CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + -e POSTGRES_PASSWORD=${{ inputs.postgres-password }} \ + -e POSTGRES_USER=${{ inputs.postgres-user }} \ + -e POSTGRES_DB=${{ inputs.postgres-db }} \ + -p $POSTGRES_PORT:5432 \ + -v pgdata:/var/lib/postgresql/data \ + postgres:15-alpine) + + echo "βœ… PostgreSQL container created with ID: $POSTGRES_CONTAINER_ID" + echo "postgres-id=$POSTGRES_CONTAINER_ID" >> "$GITHUB_OUTPUT" + echo "POSTGRES_CONTAINER_ID=$POSTGRES_CONTAINER_ID" >> "$GITHUB_ENV" + + echo "Waiting for PostgreSQL to be ready..." + for ((i=1; i<=${{ inputs.postgres-timeout }} / 2; i++)); do + if docker exec "$POSTGRES_CONTAINER_NAME" pg_isready -U ${{ inputs.postgres-user }} >/dev/null 2>&1; then + printf "\rβœ… PostgreSQL ready in $((i * 2)) seconds! \n" + break + fi + if [[ -t 1 ]]; then + ProgressBar ${i} $(( ${{ inputs.postgres-timeout }} / 2 )) "PostgreSQL" + else + echo "Waiting for PostgreSQL ($((i * 2))s)..." + fi + [ $i -eq $(( ${{ inputs.postgres-timeout }} / 2 )) ] && { + printf "\n❌ PostgreSQL failed to start after ${{ inputs.postgres-timeout }} seconds\n" + echo "PostgreSQL container logs:" + docker logs "$POSTGRES_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} + exit 1 + } + sleep 2 + done + + # Create Temporal & visibility DBs + echo "Creating Temporal databases..." + for db in temporal temporal_visibility; do + echo "Checking/creating database: $db" + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -tc "SELECT 1 FROM pg_database WHERE datname='${db}'" | grep -q 1 || \ + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -c "CREATE DATABASE ${db};" + echo "βœ… Database $db ready" + done + + # ─────────────────────────────────────────────────────────────── + - name: πŸ•°οΈ Start Temporal dev server + id: temporal + shell: bash + run: | + source "${{ github.action_path }}/scripts/progress-bar.sh" + echo "~~~~~~Start Temporal dev server~~~~~~" + echo "Creating Temporal container with:" + echo "- Container name: $TEMPORAL_CONTAINER_NAME" + echo "- Network: $NETWORK_NAME" + echo "- Port: $TEMPORAL_PORT:7233" + echo "- PostgreSQL host: $POSTGRES_CONTAINER_NAME" + echo "- Database user: ${{ inputs.postgres-user }}" + + TEMPORAL_CONTAINER_ID=$(docker run -d --name "$TEMPORAL_CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + -e DB=postgres12 \ + -e POSTGRES_USER=${{ inputs.postgres-user }} \ + -e POSTGRES_PWD=${{ inputs.postgres-password }} \ + -e POSTGRES_SEEDS="$POSTGRES_CONTAINER_NAME" \ + -e DB_PORT=5432 \ + -p $TEMPORAL_PORT:7233 \ + temporalio/auto-setup:${{ inputs.temporal-version }}) + + echo "βœ… Temporal container created with ID: $TEMPORAL_CONTAINER_ID" + echo "temporal-id=$TEMPORAL_CONTAINER_ID" >> "$GITHUB_OUTPUT" + echo "TEMPORAL_CONTAINER_ID=$TEMPORAL_CONTAINER_ID" >> "$GITHUB_ENV" + + echo "Waiting for Temporal dev server port to be open..." + for ((i=1; i<=${{ inputs.temporal-timeout }} / 2; i++)); do + # Try multiple methods to check if Temporal is ready + if command -v lsof >/dev/null 2>&1 && lsof -i :$TEMPORAL_PORT >/dev/null 2>&1; then + printf "\rβœ… Port $TEMPORAL_PORT is open after $((i * 2)) seconds!\n" + break + elif command -v curl >/dev/null 2>&1 && curl -f http://localhost:$TEMPORAL_PORT/health >/dev/null 2>&1; then + printf "\rβœ… Temporal health check passed after $((i * 2)) seconds!\n" + break + elif docker logs "$TEMPORAL_CONTAINER_NAME" 2>/dev/null | grep -q "Temporal server started"; then + printf "\rβœ… Temporal server started after $((i * 2)) seconds!\n" + break + fi + if [[ -t 1 ]]; then + ProgressBar ${i} $(( ${{ inputs.temporal-timeout }} / 2 )) "Temporal Port" + else + echo "Waiting for Temporal port ($((i * 2))s)..." + fi + [ $i -eq $(( ${{ inputs.temporal-timeout }} / 2 )) ] && { + printf "\n❌ Temporal port $TEMPORAL_PORT not ready after ${{ inputs.temporal-timeout }} seconds\n" + echo "Temporal container logs:" + docker logs "$TEMPORAL_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} + exit 1 + } + sleep 2 + done + + echo "Waiting for Temporal dev server to be ready (namespace)..." + for ((i=1; i<=${{ inputs.temporal-timeout }} / 2; i++)); do + # Try multiple methods to check if Temporal namespace is ready + if docker exec -q "$TEMPORAL_CONTAINER_NAME" tctl --address localhost:7233 namespace describe --ns default >/dev/null 2>&1; then + printf "\rβœ… Temporal ready in $((i * 2)) seconds! \n" + break + elif docker logs "$TEMPORAL_CONTAINER_NAME" 2>/dev/null | grep -q "Search attributes have been added"; then + printf "\rβœ… Temporal ready (detected from logs) in $((i * 2)) seconds! \n" + break + elif docker logs "$TEMPORAL_CONTAINER_NAME" 2>/dev/null | grep -q "Default namespace default already registered"; then + printf "\rβœ… Temporal ready (namespace registered) in $((i * 2)) seconds! \n" + break + elif command -v curl >/dev/null 2>&1 && curl -f http://localhost:$TEMPORAL_PORT/api/v1/namespaces/default >/dev/null 2>&1; then + printf "\rβœ… Temporal ready (API check) in $((i * 2)) seconds! \n" + break + fi + # If the error is 'namespace not found', tolerate and keep waiting + if docker exec "$TEMPORAL_CONTAINER_NAME" tctl --address localhost:7233 namespace describe --ns default 2>&1 | grep -q 'Namespace default is not found'; then + echo "Namespace not found yet, waiting... ($((i * 2))s)" + else + echo "Waiting for Temporal ($((i * 2))s)..." + fi + [ $i -eq $(( ${{ inputs.temporal-timeout }} / 2 )) ] && { + printf "\n❌ Temporal failed to start after ${{ inputs.temporal-timeout }} seconds\n" + echo "Temporal container logs:" + docker logs "$TEMPORAL_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} + exit 1 + } + sleep 2 + done + + # ─────────────────────────────────────────────────────────────── + - name: 🐳 Diagnostics before AutoKitteh + shell: bash + run: | + echo "~~~~~~Comprehensive Diagnostics~~~~~~" + echo "== Environment Variables ==" + echo "- NETWORK_NAME: $NETWORK_NAME" + echo "- POSTGRES_CONTAINER_NAME: $POSTGRES_CONTAINER_NAME" + echo "- TEMPORAL_CONTAINER_NAME: $TEMPORAL_CONTAINER_NAME" + echo "- AUTOKITTEH_CONTAINER_NAME: $AUTOKITTEH_CONTAINER_NAME" + echo "" + + echo "== Docker Version ==" + docker version || true + echo "" + + echo "== Docker System Info ==" + docker system info || true + echo "" + + echo "== All Docker Containers ==" + docker ps -a --format "table {{.ID}}\t{{.Image}}\t{{.Names}}\t{{.Status}}\t{{.Ports}}" + echo "" + + echo "== Containers on our network ==" + docker ps -a --filter network="$NETWORK_NAME" --format "table {{.ID}}\t{{.Image}}\t{{.Names}}\t{{.Status}}\t{{.Ports}}" + echo "" + + echo "== Network inspection ==" + if command -v jq >/dev/null 2>&1; then + docker network inspect "$NETWORK_NAME" | jq '.[0].Containers' + else + echo "jq not installed, showing raw network inspection:" + docker network inspect "$NETWORK_NAME" + fi + echo "" + + echo "== Port bindings check ==" + echo "Checking port availability..." + for port in $POSTGRES_PORT $TEMPORAL_PORT $AUTOKITTEH_PORT; do + if command -v netstat >/dev/null 2>&1; then + if netstat -tln | grep -q ":${port} "; then + echo "Port $port: IN USE" + netstat -tln | grep ":${port} " || true + else + echo "Port $port: AVAILABLE" + fi + elif command -v ss >/dev/null 2>&1; then + if ss -tln | grep -q ":${port} "; then + echo "Port $port: IN USE" + ss -tln | grep ":${port} " || true + else + echo "Port $port: AVAILABLE" + fi + else + echo "Port $port: Cannot check (no netstat/ss available)" + fi + done + echo "" + + echo "== Container Health Checks ==" + echo "PostgreSQL container status:" + docker ps --filter name="$POSTGRES_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}" || true + echo "PostgreSQL readiness:" + docker exec "$POSTGRES_CONTAINER_NAME" pg_isready -U ${{ inputs.postgres-user }} 2>&1 || true + echo "" + + echo "Temporal container status:" + docker ps --filter name="$TEMPORAL_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}" || true + echo "Temporal port check:" + curl -f http://localhost:$TEMPORAL_PORT/health 2>&1 || echo "Temporal health endpoint not accessible" + echo "" + + echo "== Recent Container Logs ==" + echo "PostgreSQL logs (last 20 lines):" + docker logs "$POSTGRES_CONTAINER_NAME" --tail 20 || true + echo "" + echo "Temporal logs (last 20 lines):" + docker logs "$TEMPORAL_CONTAINER_NAME" --tail 20 || true + echo "" + + # ─────────────────────────────────────────────────────────────── + - name: πŸ“‹ Print Autokitteh Environment Variables + shell: bash + env: + DESCOPE_PROJECT_ID: ${{ inputs.descope-project-id }} + RSA_PRIVATE_KEY: ${{ inputs.rsa-private-key }} + RSA_PUBLIC_KEY: ${{ inputs.rsa-public-key }} + AUTOKITTEH_IMAGE: ${{ inputs.autokitteh-image }} + SESSION_COOKIE_KEYS: ${{ inputs.session-cookie-keys }} + run: | + echo "~~~~~~Autokitteh Environment Variables~~~~~~" + + # Build the DSN + DSN="postgres://${{ inputs.postgres-user }}:${{ inputs.postgres-password }}@$POSTGRES_CONTAINER_NAME:5432/${{ inputs.postgres-db }}?sslmode=disable" + TEMPORAL_HOSTPORT="$TEMPORAL_CONTAINER_NAME:7233" + + echo "== Container Configuration ==" + echo "- Container name: $AUTOKITTEH_CONTAINER_NAME" + echo "- Network name: $NETWORK_NAME" + echo "- Image: $AUTOKITTEH_IMAGE" + echo "- Port: $AUTOKITTEH_PORT:9980" + echo "- Session cookie keys: [REDACTED]" + echo "" + + echo "== Secrets Status ==" + echo "DESCOPE_PROJECT_ID: $(if [ -n "$DESCOPE_PROJECT_ID" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "SESSION_COOKIE_KEYS: $(if [ -n "$SESSION_COOKIE_KEYS" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "RSA_PRIVATE_KEY: $(if [ -n "$RSA_PRIVATE_KEY" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "RSA_PUBLIC_KEY: $(if [ -n "$RSA_PUBLIC_KEY" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "" + + echo "== Autokitteh Environment Variables (AK_*) ==" + echo "AK_DB__TYPE=postgres" + echo "AK_DB__DSN=$DSN" + echo "AK_TEMPORALCLIENT__HOSTPORT=$TEMPORAL_HOSTPORT" + echo "AK_TEMPORALCLIENT__NAMESPACE=default" + echo "AK_AUTHHTTPMIDDLEWARE__USE_DEFAULT_USER=false" + echo "AK_AUTHLOGINHTTPSVC__DESCOPE__ENABLED=true" + echo "AK_AUTHLOGINHTTPSVC__DESCOPE__PROJECT_ID=$DESCOPE_PROJECT_ID" + echo "AK_AUTHSESSIONS__ALLOWED_CORS_COOKIE=$SESSION_COOKIE_KEY" + echo "AK_HTTP__CORS__ALLOWED_ORIGINS=http://localhost:8000" + echo "AK_AUTHSESSIONS__ALLOWED_CORS_COOKIE=true" + echo "AK_AUTHJWTTOKENS__ALGORITHM=rsa" + echo "AK_AUTHJWTTOKENS__RSA__PRIVATE_KEY=[REDACTED]" + echo "AK_AUTHJWTTOKENS__RSA__PUBLIC_KEY=[REDACTED]" + echo "AK_TEMPORALCLIENT__DATA_CONVERTER__ENCRYPTION__KEYS=key1=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + echo "AK_SECRETS__PROVIDER=db" + echo "" + + echo "== Additional Docker Configuration ==" + echo "--platform=linux/amd64" + echo "--add-host=host.docker.internal:host-gateway" + + # ─────────────────────────────────────────────────────────────── + - name: πŸš€ Start AutoKitteh Enterprise + id: autokitteh + shell: bash + env: + DESCOPE_PROJECT_ID: ${{ inputs.descope-project-id }} + RSA_PRIVATE_KEY: ${{ inputs.rsa-private-key }} + RSA_PUBLIC_KEY: ${{ inputs.rsa-public-key }} + AUTOKITTEH_IMAGE: ${{ inputs.autokitteh-image }} + SESSION_COOKIE_KEYS: ${{ inputs.session-cookie-keys }} + run: | + source "${{ github.action_path }}/scripts/progress-bar.sh" + echo "~~~~~~Starting AutoKitteh Enterprise~~~~~~" + echo "Environment variables:" + echo "- DESCOPE_PROJECT_ID: $(if [ -n "$DESCOPE_PROJECT_ID" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "- RSA keys: $(if [ -n "$RSA_PRIVATE_KEY" ] && [ -n "$RSA_PUBLIC_KEY" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "- SESSION_COOKIE_KEYS: $(if [ -n "$SESSION_COOKIE_KEYS" ]; then echo "Present"; else echo "MISSING"; fi)" + echo "- AUTOKITTEH_IMAGE: $AUTOKITTEH_IMAGE" + echo "- Container name: $AUTOKITTEH_CONTAINER_NAME" + echo "- Network name: $NETWORK_NAME" + echo "" + + echo "== Comprehensive RSA Key Validation ==" + + # Validate RSA keys are not empty + echo "Checking RSA key presence..." + if [ -z "$RSA_PRIVATE_KEY" ]; then + echo "❌ RSA private key is missing or empty" + echo "Please ensure AK_RSA_PRIVATE_KEY environment variable is set" + exit 1 + fi + + if [ -z "$RSA_PUBLIC_KEY" ]; then + echo "❌ RSA public key is missing or empty" + echo "Please ensure AK_RSA_PUBLIC_KEY environment variable is set" + exit 1 + fi + + echo "βœ… Both RSA keys are present" + + # Validate RSA key format + echo "Validating RSA key format..." + if ! echo "$RSA_PRIVATE_KEY" | head -1 | grep -q "BEGIN.*PRIVATE KEY"; then + echo "❌ Private key does not start with proper PEM header" + echo "Expected: -----BEGIN RSA PRIVATE KEY----- or -----BEGIN PRIVATE KEY-----" + echo "Got: $(echo "$RSA_PRIVATE_KEY" | head -1)" + exit 1 + fi + + if ! echo "$RSA_PUBLIC_KEY" | head -1 | grep -q "BEGIN.*PUBLIC KEY"; then + echo "❌ Public key does not start with proper PEM header" + echo "Expected: -----BEGIN PUBLIC KEY-----" + echo "Got: $(echo "$RSA_PUBLIC_KEY" | head -1)" + exit 1 + fi + + if ! echo "$RSA_PRIVATE_KEY" | tail -1 | grep -q "END.*PRIVATE KEY"; then + echo "❌ Private key does not end with proper PEM footer" + echo "Expected: -----END RSA PRIVATE KEY----- or -----END PRIVATE KEY-----" + echo "Got: $(echo "$RSA_PRIVATE_KEY" | tail -1)" + exit 1 + fi + + if ! echo "$RSA_PUBLIC_KEY" | tail -1 | grep -q "END.*PUBLIC KEY"; then + echo "❌ Public key does not end with proper PEM footer" + echo "Expected: -----END PUBLIC KEY-----" + echo "Got: $(echo "$RSA_PUBLIC_KEY" | tail -1)" + exit 1 + fi + + echo "βœ… RSA key format validation passed" + + # Validate RSA keys can be parsed by OpenSSL + echo "Validating RSA keys with OpenSSL..." + + # Create temporary files for validation (safer than piping for complex keys) + TEMP_PRIVATE_KEY=$(mktemp) + TEMP_PUBLIC_KEY=$(mktemp) + + echo "$RSA_PRIVATE_KEY" > "$TEMP_PRIVATE_KEY" + echo "$RSA_PUBLIC_KEY" > "$TEMP_PUBLIC_KEY" + + # Validate private key + if ! openssl rsa -in "$TEMP_PRIVATE_KEY" -check -noout >/dev/null 2>&1; then + echo "❌ Private key failed OpenSSL validation" + echo "OpenSSL error:" + openssl rsa -in "$TEMP_PRIVATE_KEY" -check -noout 2>&1 | head -3 + rm -f "$TEMP_PRIVATE_KEY" "$TEMP_PUBLIC_KEY" + exit 1 + fi + + # Validate public key + if ! openssl pkey -pubin -in "$TEMP_PUBLIC_KEY" -noout >/dev/null 2>&1; then + echo "❌ Public key failed OpenSSL validation" + echo "OpenSSL error:" + openssl pkey -pubin -in "$TEMP_PUBLIC_KEY" -noout 2>&1 | head -3 + rm -f "$TEMP_PRIVATE_KEY" "$TEMP_PUBLIC_KEY" + exit 1 + fi + + echo "βœ… OpenSSL validation passed for both keys" + + # Validate key pair match + echo "Validating RSA key pair match..." + + # Extract public key from private key and compare with provided public key + DERIVED_PUBLIC_KEY=$(openssl rsa -in "$TEMP_PRIVATE_KEY" -pubout 2>/dev/null) + PROVIDED_PUBLIC_KEY=$(cat "$TEMP_PUBLIC_KEY") + + if [ "$DERIVED_PUBLIC_KEY" != "$PROVIDED_PUBLIC_KEY" ]; then + echo "❌ RSA key pair mismatch" + echo "The public key does not match the private key" + echo "This could indicate:" + echo " - Wrong public key provided" + echo " - Keys from different pairs" + echo " - Corrupted key data" + rm -f "$TEMP_PRIVATE_KEY" "$TEMP_PUBLIC_KEY" + exit 1 + fi + + echo "βœ… RSA key pair validation passed - keys match!" + + # Validate key strength (optional but recommended) + echo "Checking RSA key strength..." + KEY_SIZE=$(openssl rsa -in "$TEMP_PRIVATE_KEY" -text -noout 2>/dev/null | grep "Private-Key:" | grep -o '[0-9]*') + + if [ -n "$KEY_SIZE" ]; then + echo "RSA key size: $KEY_SIZE bits" + if [ "$KEY_SIZE" -lt 2048 ]; then + echo "⚠️ Warning: RSA key size ($KEY_SIZE bits) is below recommended minimum (2048 bits)" + echo "Consider using a stronger key for production environments" + elif [ "$KEY_SIZE" -ge 4096 ]; then + echo "βœ… Excellent key strength ($KEY_SIZE bits)" + else + echo "βœ… Good key strength ($KEY_SIZE bits)" + fi + else + echo "ℹ️ Could not determine key size" + fi + + # Clean up temporary files + rm -f "$TEMP_PRIVATE_KEY" "$TEMP_PUBLIC_KEY" + + echo "βœ… All RSA key validations passed successfully!" + echo "" + + # Validate image exists + if ! docker image inspect "$AUTOKITTEH_IMAGE" >/dev/null 2>&1; then + echo "❌ AutoKitteh image '$AUTOKITTEH_IMAGE' not found locally, attempting to pull..." + if ! docker pull "$AUTOKITTEH_IMAGE"; then + echo "❌ Failed to pull AutoKitteh image '$AUTOKITTEH_IMAGE'" + exit 1 + fi + fi + + echo "Creating AutoKitteh container..." + + # Build the DSN + DSN="postgres://${{ inputs.postgres-user }}:${{ inputs.postgres-password }}@$POSTGRES_CONTAINER_NAME:5432/${{ inputs.postgres-db }}?sslmode=disable" + TEMPORAL_HOSTPORT="$TEMPORAL_CONTAINER_NAME:7233" + + echo "Running docker command..." + echo "- Container: $AUTOKITTEH_CONTAINER_NAME" + echo "- Network: $NETWORK_NAME" + echo "- Port: $AUTOKITTEH_PORT:9980" + echo "- DSN: $DSN" + echo "- Temporal: $TEMPORAL_HOSTPORT" + echo "- Descope Project ID: $DESCOPE_PROJECT_ID" + echo "- RSA keys are $(echo "$RSA_PRIVATE_KEY" | wc -l) lines (private), $(echo "$RSA_PUBLIC_KEY" | wc -l) lines (public)" + + # Check if port is already in use + echo "Checking if port $AUTOKITTEH_PORT is available..." + if command -v lsof >/dev/null 2>&1; then + if lsof -i :$AUTOKITTEH_PORT >/dev/null 2>&1; then + echo "❌ Port $AUTOKITTEH_PORT is already in use:" + lsof -i :$AUTOKITTEH_PORT + echo "Please stop the process using this port before running the tests." + exit 1 + else + echo "βœ… Port $AUTOKITTEH_PORT is available" + fi + elif command -v netstat >/dev/null 2>&1; then + if netstat -an | grep ":$AUTOKITTEH_PORT " >/dev/null 2>&1; then + echo "❌ Port $AUTOKITTEH_PORT is already in use:" + netstat -an | grep ":$AUTOKITTEH_PORT " + echo "Please stop the process using this port before running the tests." + exit 1 + else + echo "βœ… Port $AUTOKITTEH_PORT is available" + fi + else + echo "⚠️ Cannot check port availability (no lsof or netstat available), proceeding..." + fi + + # Run docker command with proper error capture + set +e # Temporarily disable exit on error to capture the output + DOCKER_RUN_OUTPUT=$(docker run -d --name "$AUTOKITTEH_CONTAINER_NAME" \ + --platform linux/amd64 \ + --network "$NETWORK_NAME" \ + -p $AUTOKITTEH_PORT:9980 \ + --add-host=host.docker.internal:host-gateway \ + -e AK_DB__TYPE=postgres \ + -e AK_DB__DSN="$DSN" \ + -e AK_TEMPORALCLIENT__HOSTPORT="$TEMPORAL_HOSTPORT" \ + -e AK_TEMPORALCLIENT__NAMESPACE=default \ + -e AK_AUTHHTTPMIDDLEWARE__USE_DEFAULT_USER=false \ + -e AK_AUTHLOGINHTTPSVC__DESCOPE__ENABLED=true \ + -e AK_AUTHLOGINHTTPSVC__DESCOPE__PROJECT_ID="$DESCOPE_PROJECT_ID" \ + -e AK_AUTHSESSIONS__ALLOWED_CORS_COOKIE="$SESSION_COOKIE_KEY" \ + -e AK_HTTP__CORS__ALLOWED_ORIGINS=http://localhost:8000 \ + -e AK_AUTHSESSIONS__ALLOWED_CORS_COOKIE=true \ + -e AK_AUTHJWTTOKENS__ALGORITHM=rsa \ + -e AK_AUTHJWTTOKENS__RSA__PRIVATE_KEY="$RSA_PRIVATE_KEY" \ + -e AK_AUTHJWTTOKENS__RSA__PUBLIC_KEY="$RSA_PUBLIC_KEY" \ + -e AK_TEMPORALCLIENT__DATA_CONVERTER__ENCRYPTION__KEYS="key1=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" \ + -e AK_SECRETS__PROVIDER=db \ + "$AUTOKITTEH_IMAGE" up 2>&1) + DOCKER_EXIT_CODE=$? + set -e # Re-enable exit on error + + if [ $DOCKER_EXIT_CODE -ne 0 ]; then + echo "❌ Docker run failed with exit code $DOCKER_EXIT_CODE" + echo "Docker run output:" + echo "$DOCKER_RUN_OUTPUT" + echo "" + echo "Checking if container was created anyway..." + docker ps -a --filter name="$AUTOKITTEH_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Image}}" || true + echo "" + echo "Troubleshooting information:" + echo "- Image: $AUTOKITTEH_IMAGE" + echo "- Container name: $AUTOKITTEH_CONTAINER_NAME" + echo "- Network: $NETWORK_NAME" + echo "- Platform: linux/amd64 (specified to avoid platform mismatch)" + exit 1 + fi + + echo "βœ… Docker run succeeded" + + # Extract container ID from output (filter out warnings) + CONTAINER_ID=$(echo "$DOCKER_RUN_OUTPUT" | grep -v "WARNING:" | tail -1) + if [ -z "$CONTAINER_ID" ] || [ ${#CONTAINER_ID} -ne 64 ]; then + echo "❌ Failed to create AutoKitteh container - invalid container ID returned: '$CONTAINER_ID'" + echo "Full docker run output:" + echo "$DOCKER_RUN_OUTPUT" + docker ps -a | grep "$AUTOKITTEH_CONTAINER_NAME" || echo "No $AUTOKITTEH_CONTAINER_NAME container found" + exit 1 + fi + + echo "βœ… AutoKitteh container created with ID: $CONTAINER_ID" + echo "autokitteh-id=$CONTAINER_ID" >> "$GITHUB_OUTPUT" + echo "CONTAINER_ID=$CONTAINER_ID" >> "$GITHUB_ENV" + + # Give the container a moment to start up and then check for immediate failures + echo "Checking for immediate startup errors..." + sleep 3 + + # Early error detection - check logs for critical errors before waiting + EARLY_LOGS=$(docker logs "$AUTOKITTEH_CONTAINER_NAME" 2>&1 || true) + if echo "$EARLY_LOGS" | grep -q "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build"; then + printf "\n❌ AutoKitteh failed to start due to configuration errors (detected early)\n" + echo "Critical error detected in container logs:" + echo "$EARLY_LOGS" | grep -A5 -B5 "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build" || true + echo "" + echo "Full container logs:" + docker logs "$AUTOKITTEH_CONTAINER_NAME" || true + echo "" + echo "Container status:" + docker ps -a --filter name="$AUTOKITTEH_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" || true + exit 1 + fi + + echo "Waiting for AutoKitteh Enterprise to be ready..." + for ((i=1; i<=${{ inputs.autokitteh-timeout }} / 2; i++)); do + # Check if container is still running + if ! docker ps --format '{{.Names}}' | grep -q "$AUTOKITTEH_CONTAINER_NAME"; then + printf "\n❌ $AUTOKITTEH_CONTAINER_NAME exited unexpectedly\n" + echo "Container status:" + docker ps -a --filter name="$AUTOKITTEH_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" || true + echo "Container logs:" + docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} || true + echo "Inspecting container:" + docker inspect "$AUTOKITTEH_CONTAINER_NAME" || true + exit 1 + fi + + # Check for critical application errors in logs + CONTAINER_LOGS=$(docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail 50 2>&1 || true) + + # Check for fatal startup errors that would prevent the service from working + if echo "$CONTAINER_LOGS" | grep -q "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build"; then + printf "\n❌ AutoKitteh failed to start due to configuration errors\n" + echo "Critical error detected in container logs:" + echo "$CONTAINER_LOGS" | grep -A5 -B5 "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build" || true + echo "" + echo "Full container logs:" + docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} || true + echo "" + echo "Container status:" + docker ps -a --filter name="$AUTOKITTEH_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" || true + exit 1 + fi + + # Try health check + echo "Attempting health check ($((i * 2))s)..." + if [ "${ACT:-false}" = "true" ]; then + # In Act environment, just check if container is running (simplified health check) + if docker ps --format '{{.Names}}' | grep -q "$AUTOKITTEH_CONTAINER_NAME"; then + printf "\rβœ… AutoKitteh ready (Act mode) in $((i * 2)) seconds! \n" + break + fi + else + # Full health check for non-Act environments + if curl -fs http://localhost:$AUTOKITTEH_PORT/healthz >/dev/null 2>&1; then + printf "\rβœ… AutoKitteh ready in $((i * 2)) seconds! \n" + break + else + # Show more detailed curl output for debugging + echo "Health check failed, detailed curl output:" + curl -v http://localhost:$AUTOKITTEH_PORT/healthz 2>&1 || true + echo "Container logs (last 10 lines):" + docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail 10 || true + fi + fi + + if [[ -t 1 ]]; then + ProgressBar ${i} $(( ${{ inputs.autokitteh-timeout }} / 2 )) "AutoKitteh" + else + echo "Waiting for AutoKitteh ($((i * 2))s)..." + fi + [ $i -eq $(( ${{ inputs.autokitteh-timeout }} / 2 )) ] && { + printf "\n❌ AutoKitteh health check failed after ${{ inputs.autokitteh-timeout }} seconds\n" + + # Check for specific error patterns in the final logs + FINAL_LOGS=$(docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail 100 2>&1 || true) + if echo "$FINAL_LOGS" | grep -q "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build"; then + echo "❌ CONFIGURATION ERROR DETECTED:" + echo "AutoKitteh failed due to configuration issues, likely invalid RSA keys or service configuration." + echo "" + echo "Critical errors found:" + echo "$FINAL_LOGS" | grep -A5 -B5 "failed to parse PEM block containing private key\|invalid private key\|Error: new service.*failed to build" || true + else + echo "❌ TIMEOUT ERROR:" + echo "AutoKitteh container is running but health check endpoint is not responding." + echo "This might indicate network issues or service startup problems." + fi + + echo "" + echo "Final container status:" + docker ps -a --filter name="$AUTOKITTEH_CONTAINER_NAME" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" || true + echo "Final container logs:" + docker logs "$AUTOKITTEH_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} || true + echo "Container inspection:" + docker inspect "$AUTOKITTEH_CONTAINER_NAME" || true + echo "Network connectivity test:" + docker exec "$AUTOKITTEH_CONTAINER_NAME" ping -c 3 "$POSTGRES_CONTAINER_NAME" || true + docker exec "$AUTOKITTEH_CONTAINER_NAME" ping -c 3 "$TEMPORAL_CONTAINER_NAME" || true + exit 1 + } + sleep 2 + done + + # ─────────────────────────────────────────────────────────────── + - name: πŸ—„οΈ Create schema and insert test data into PostgreSQL + shell: bash + run: | + echo "~~~~~~Creating schema and inserting test data into PostgreSQL~~~~~~" + echo "Database connection details:" + echo "- Container: $POSTGRES_CONTAINER_NAME" + echo "- User: ${{ inputs.postgres-user }}" + echo "- Database: ${{ inputs.postgres-db }}" + echo "" + + echo "Testing database connectivity..." + if docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c "SELECT version();" 2>&1; then + echo "βœ… Database connection successful" + else + echo "❌ Database connection failed" + echo "Container logs:" + docker logs "$POSTGRES_CONTAINER_NAME" --tail 20 || true + exit 1 + fi + echo "" + + echo "Dropping and recreating core tables..." + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c " + -- Drop tables if they exist (in reverse dependency order) + DROP TABLE IF EXISTS org_members CASCADE; + DROP TABLE IF EXISTS users CASCADE; + DROP TABLE IF EXISTS orgs CASCADE; + + -- Create orgs table + CREATE TABLE orgs ( + created_by uuid, + created_at timestamp with time zone, + org_id uuid NOT NULL, + display_name text, + updated_by uuid, + updated_at timestamp with time zone, + name text, + deleted_at timestamp with time zone + ); + + -- Create users table + CREATE TABLE users ( + user_id uuid NOT NULL, + email text NOT NULL, + display_name text, + created_by uuid, + created_at timestamp with time zone, + default_org_id uuid, + updated_by uuid, + updated_at timestamp with time zone, + status integer + ); + + -- Create org_members table + CREATE TABLE org_members ( + created_by uuid, + created_at timestamp with time zone, + org_id uuid NOT NULL, + user_id uuid NOT NULL, + status bigint, + roles jsonb, + updated_by uuid, + updated_at timestamp with time zone + ); + " 2>&1 || { + echo "❌ Failed to recreate tables" + exit 1 + } + + echo "Adding primary keys and constraints..." + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c " + -- Add primary keys + ALTER TABLE orgs ADD CONSTRAINT orgs_pkey PRIMARY KEY (org_id); + ALTER TABLE users ADD CONSTRAINT users_pkey PRIMARY KEY (user_id); + ALTER TABLE org_members ADD CONSTRAINT org_members_pkey PRIMARY KEY (org_id, user_id); + + -- Add foreign key constraints + ALTER TABLE org_members ADD CONSTRAINT fk_org_members_org FOREIGN KEY (org_id) REFERENCES orgs(org_id); + ALTER TABLE org_members ADD CONSTRAINT fk_org_members_user FOREIGN KEY (user_id) REFERENCES users(user_id); + " 2>&1 || { + echo "❌ Failed to add constraints" + exit 1 + } + + echo "Creating indexes..." + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c " + CREATE INDEX idx_org_members_status ON org_members USING btree (status); + CREATE INDEX idx_orgs_deleted_at ON orgs USING btree (deleted_at); + CREATE INDEX idx_orgs_name ON orgs USING btree (name); + CREATE INDEX idx_users_status ON users USING btree (status); + " 2>&1 || { + echo "❌ Failed to create indexes" + exit 1 + } + + echo "Inserting real data from local autokitteh database..." + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c " + -- Insert organizations + INSERT INTO orgs (created_by, created_at, org_id, display_name, updated_by, updated_at, name, deleted_at) + VALUES + ('7bcbb000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.281346+03', '0197ab9d-e560-7484-a70f-ce91103e156a', 'Ronen Mars''s Personal Org', '00000000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.285768+03', 'Sneaky_Monkey_Org', NULL), + ('00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.923921+03', '60c40000-0000-0000-0000-000000000002', 'Default Org', '00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.924044+03', NULL, NULL); + + -- Insert users + INSERT INTO users (user_id, email, display_name, created_by, created_at, default_org_id, updated_by, updated_at, status) + VALUES + ('0197ab9d-e569-763d-b789-60da8a616f9e', 'ronen@autokitteh.com', 'Ronen Mars', '7bcbb000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.289413+03', '0197ab9d-e560-7484-a70f-ce91103e156a', '00000000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.28975+03', 1), + ('7bcbb000-0000-0000-0000-000000000001', 'default@autokitteh.com', 'Default User', '00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.920902+03', '60c40000-0000-0000-0000-000000000002', '00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.921021+03', 1); + + -- Insert organization memberships + INSERT INTO org_members (created_by, created_at, org_id, user_id, status, roles, updated_by, updated_at) + VALUES + ('00000000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.296447+03', '0197ab9d-e560-7484-a70f-ce91103e156a', '0197ab9d-e569-763d-b789-60da8a616f9e', 1, '[\"admin\"]'::jsonb, '00000000-0000-0000-0000-000000000000', '2025-06-26 12:42:21.296447+03'), + ('00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.924957+03', '60c40000-0000-0000-0000-000000000002', '7bcbb000-0000-0000-0000-000000000001', 1, '[\"admin\"]'::jsonb, '00000000-0000-0000-0000-000000000000', '2025-06-29 19:51:36.924957+03'); + " 2>&1 || { + echo "❌ Failed to insert test data into PostgreSQL" + echo "Container logs:" + docker logs "$POSTGRES_CONTAINER_NAME" --tail ${{ inputs.log-tail-lines }} + exit 1 + } + + echo "βœ… Schema created and data inserted successfully" + echo "" + echo "Verifying inserted data..." + echo "Organizations:" + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c "SELECT org_id, display_name, name FROM orgs;" 2>&1 || true + echo "Users:" + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c "SELECT user_id, email, display_name FROM users;" 2>&1 || true + echo "Organization members:" + docker exec "$POSTGRES_CONTAINER_NAME" psql -U ${{ inputs.postgres-user }} -d ${{ inputs.postgres-db }} -c "SELECT org_id, user_id, status, roles FROM org_members;" 2>&1 || true + + # ─────────────────────────────────────────────────────────────── + - name: 🧩 Install Playwright dependencies + if: ${{ inputs.browser != '' }} + shell: bash + run: | + echo "~~~~~~Install Playwright deps for ${{ inputs.browser }}~~~~~~" + # Map browser names to Playwright install-deps targets + case "${{ inputs.browser }}" in + Chrome) + PLAYWRIGHT_BROWSER="chrome" + ;; + Firefox) + PLAYWRIGHT_BROWSER="firefox" + ;; + Safari) + PLAYWRIGHT_BROWSER="webkit" + ;; + Edge) + PLAYWRIGHT_BROWSER="msedge" + ;; + *) + echo "❌ Unsupported browser: ${{ inputs.browser }}" + exit 1 + ;; + esac + echo "Converting ${{ inputs.browser }} -> $PLAYWRIGHT_BROWSER" + npx playwright install-deps $PLAYWRIGHT_BROWSER + + # ─────────────────────────────────────────────────────────────── + - name: πŸ“‹ Show container summary & service health + shell: bash + run: | + echo "~~~~~~Container Summary~~~~~~" + docker ps --filter network="$NETWORK_NAME" \ + --format "table {{.ID}}\t{{.Image}}\t{{.Names}}\t{{.Status}}\t{{.Ports}}" + + echo "" + echo "~~~~~~Port Bindings Check~~~~~~" + docker ps -a --filter network="$NETWORK_NAME" \ + --format "table {{.Names}}\t{{.Ports}}" + + echo "" + echo "~~~~~~Service Health Check~~~~~~" + echo "PostgreSQL: $(docker exec "$POSTGRES_CONTAINER_NAME" pg_isready -U ${{ inputs.postgres-user }} >/dev/null 2>&1 && echo 'Ready' || echo 'Not ready')" + echo "Temporal: $(curl -fs http://localhost:$TEMPORAL_PORT/api/v1/namespaces/default >/dev/null 2>&1 && echo 'Ready' || echo 'Not ready')" + echo "AutoKitteh: $(curl -fs http://localhost:$AUTOKITTEH_PORT/healthz >/dev/null 2>&1 && echo 'Ready' || echo 'Not ready')" + + # ─────────────────────────────────────────────────────────────── + - name: πŸ“Š Show Docker stats and disk usage + shell: bash + run: | + echo "~~~~~~Docker Resource Usage (snapshot)~~~~~~" + docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + echo "~~~~~~Disk Usage (df -h)~~~~~~" + df -h + + # ─────────────────────────────────────────────────────────────── + - name: πŸ› οΈ Capture container logs on failure + if: failure() + shell: bash + run: | + echo "~~~~~~Capturing container logs due to failure~~~~~~" + docker version || true + docker info || true + echo "" + for c in "$POSTGRES_CONTAINER_NAME" "$TEMPORAL_CONTAINER_NAME" "$AUTOKITTEH_CONTAINER_NAME"; do + if docker ps -a --format '{{.Names}}' | grep -q "$c"; then + echo "Logs for $c:"; docker logs "$c" --tail ${{ inputs.log-tail-lines }} || true; echo "" + else + echo "$c container not found"; echo "" + fi + done + echo "Network inspection:"; docker network inspect "$NETWORK_NAME" 2>/dev/null || true + echo "Port usage:"; + if command -v netstat >/dev/null 2>&1; then + netstat -tlnp 2>/dev/null | grep -E ':( $POSTGRES_PORT | $TEMPORAL_PORT | $AUTOKITTEH_PORT )' || echo "No relevant ports in use" + elif command -v ss >/dev/null 2>&1; then + ss -tlnp 2>/dev/null | grep -E ':( $POSTGRES_PORT | $TEMPORAL_PORT | $AUTOKITTEH_PORT )' || echo "No relevant ports in use" + else + echo "No relevant ports in use" + fi \ No newline at end of file diff --git a/.github/actions/setup-test-env/scripts/progress-bar.sh b/.github/actions/setup-test-env/scripts/progress-bar.sh new file mode 100644 index 0000000000..417139d980 --- /dev/null +++ b/.github/actions/setup-test-env/scripts/progress-bar.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# progress-bar.sh β€” reusable spinner + progress bar + +ProgressBar() { + local current=$1 total=$2 service=${3:-Service} + local elapsed=$((current * 2)) # loop sleeps 2 s + local pct=$((current * 100 / total)) + local done=$((pct * 4 / 10)) left=$((40 - done)) + + printf -v fill '%*s' "$done" + printf -v empty '%*s' "$left" + + local sp='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + local ch=${sp:current%${#sp}:1} + + printf '\r%s %s: [%s%s] %s%% (%ss)' \ + "$ch" "$service" "${fill// /#}" "${empty// /-}" "$pct" "$elapsed" +} +export -f ProgressBar diff --git a/.github/workflows/build_test_and_release.yml b/.github/workflows/build_test_and_release.yml index edad51be8e..b0f97d87a1 100644 --- a/.github/workflows/build_test_and_release.yml +++ b/.github/workflows/build_test_and_release.yml @@ -8,8 +8,8 @@ permissions: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - NODE_VERSION: "21.2.0" - SKIP_E2E_CHECK: ${{ !contains(github.event.head_commit.message, '[skip_e2e]') }} + NODE_VERSION: "22" + ACT: ${{ contains(github.actor, 'nektos/act') }} jobs: setup: @@ -21,7 +21,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - token: ${{ secrets.PAT_TOKEN }} + token: ${{ github.token }} submodules: true - uses: actions/setup-node@v4 @@ -30,6 +30,7 @@ jobs: cache: "npm" - name: Restore node_modules cache + if: env.ACT != 'true' id: restore-node-modules uses: actions/cache/restore@v4 with: @@ -37,17 +38,18 @@ jobs: key: node-modules-${{ hashFiles('package-lock.json') }} - name: Install dependencies - run: npm ci + run: npm ci --no-audit --no-progress - name: Save node_modules cache + if: env.ACT != 'true' && steps.restore-node-modules.outputs.cache-hit != 'true' id: save-node-modules uses: actions/cache/save@v4 - if: steps.restore-node-modules.outputs.cache-hit != 'true' with: path: node_modules key: node-modules-${{ hashFiles('package-lock.json') }} - name: Restore Playwright browsers cache + if: env.ACT != 'true' id: restore-playwright uses: actions/cache/restore@v4 with: @@ -55,23 +57,24 @@ jobs: key: playwright-${{ hashFiles('package-lock.json') }} - name: Install Playwright Browsers - if: steps.restore-playwright.outputs.cache-hit != 'true' + if: steps.restore-playwright.outputs.cache-hit != 'true' && env.ACT != 'true' run: npx playwright install --with-deps - name: Save Playwright browsers cache + if: env.ACT != 'true' && steps.restore-playwright.outputs.cache-hit != 'true' uses: actions/cache/save@v4 - if: steps.restore-playwright.outputs.cache-hit != 'true' with: path: ~/.cache/ms-playwright key: playwright-${{ hashFiles('package-lock.json') }} - name: Update kittehub.zip + if: env.ACT != 'true' uses: ./.github/actions/update-kittehub with: - pat-token: ${{ secrets.PAT_TOKEN }} + pat-token: ${{ secrets.PAT_TOKEN || github.token }} test: - name: πŸ§ͺ Testing ${{ matrix.browser }} + name: 🎯 Testing ${{ matrix.browser }} needs: setup runs-on: ubuntu-latest timeout-minutes: 20 @@ -83,7 +86,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - token: ${{ secrets.PAT_TOKEN }} + token: ${{ secrets.PAT_TOKEN || github.token }} submodules: true - uses: actions/setup-node@v4 @@ -92,23 +95,91 @@ jobs: cache: "npm" - name: Restore cached node_modules + if: env.ACT != 'true' uses: actions/cache/restore@v4 with: path: node_modules key: node-modules-${{ hashFiles('package-lock.json') }} - name: Restore Playwright browsers + if: env.ACT != 'true' uses: actions/cache/restore@v4 with: path: ~/.cache/ms-playwright key: playwright-${{ hashFiles('package-lock.json') }} - - name: Setup test environment + - name: Install Playwright + if: env.ACT != 'true' + run: npx playwright install --with-deps + + - name: Install dependencies (Act fallback) + if: env.ACT == 'true' + run: npm ci --no-audit --no-progress + + - name: Install system dependencies for Playwright (Act only) + if: env.ACT == 'true' + run: | + echo "Installing system dependencies for Playwright in Act environment..." + sudo apt-get update + sudo apt-get install -y \ + libnss3 \ + libnspr4 \ + libatk1.0-0 \ + libatspi2.0-0 \ + libxcomposite1 \ + libxdamage1 \ + libxrandr2 \ + libgbm1 \ + libxkbcommon0 \ + libasound2 + + - name: Install Chromium browser for Act + if: env.ACT == 'true' + run: | + echo "Installing Chromium browser for Act environment..." + npx playwright install chromium + + - name: Setup test environment (Temporal + Postgres + Autokitteh Enterprise) + id: setup-test-env uses: ./.github/actions/setup-test-env with: descope-project-id: ${{ secrets.VITE_DESCOPE_PROJECT_ID }} browser: ${{ matrix.browser }} autokitteh-image: ${{ vars.AUTOKITTEH_IMAGE }} + rsa-private-key: ${{ secrets.AK_RSA_PRIVATE_KEY }} + rsa-public-key: ${{ secrets.AK_RSA_PUBLIC_KEY }} + session-cookie-keys: ${{ secrets.AK_AUTHSESSIONS__COOKIE_KEYS }} + + - name: Save Docker images to cache (only if setup succeeded) + if: success() + uses: ./.github/actions/cache-docker-images + with: + cache-key: "test-env-images-v2" + images: "postgres:15-alpine,temporalio/auto-setup:1.24.2,${{ vars.AUTOKITTEH_IMAGE }}" + restore-only: "false" + - name: Debug Autokitteh status + shell: bash + run: | + echo "== All containers ==" + docker ps -a + echo "" + echo "== Network containers ==" + docker ps -a --filter network=temporal-net-${{ github.run_id }}-${{ matrix.browser }} + echo "" + echo "== AutoKitteh container check ==" + if docker ps -a --format '{{.Names}}' | grep -q "autokitteh-ee-${{ matrix.browser }}"; then + echo "AutoKitteh container found!" + docker logs autokitteh-ee-${{ matrix.browser }} || true + else + echo "❌ AutoKitteh container not found!" + echo "Looking for any autokitteh containers..." + docker ps -a | grep autokitteh || echo "No autokitteh containers found" + fi + echo "" + echo "== Environment variables ==" + echo "AUTOKITTEH_IMAGE: ${{ vars.AUTOKITTEH_IMAGE }}" + echo "Run ID: ${{ github.run_id }}" + echo "Browser: ${{ matrix.browser }}" - name: Run Playwright tests env: @@ -116,12 +187,12 @@ jobs: run: npx playwright test --project=${{ matrix.browser }} - name: Upload test artifacts - if: always() + if: always() && env.ACT != 'true' uses: ./.github/actions/upload-test-artifacts with: browser: ${{ matrix.browser }} run-id: ${{ github.run_id }} - + build: needs: setup runs-on: ubuntu-latest @@ -200,4 +271,4 @@ jobs: HUSKY: 0 run: | npm ci - npx semantic-release + npx semantic-release \ No newline at end of file diff --git a/.gitignore b/.gitignore index b499313f97..fe197a1115 100644 --- a/.gitignore +++ b/.gitignore @@ -49,4 +49,8 @@ directory_contents.json .cursor # MCP -.vscode/mcp.json \ No newline at end of file +.vscode/mcp.json + +# Act Local Test +.vars +.secrets \ No newline at end of file diff --git a/package.json b/package.json index ae98e047b6..2d94de664d 100644 --- a/package.json +++ b/package.json @@ -4,9 +4,9 @@ "type": "module", "version": "2.207.2", "scripts": { - "build": "vite build", + "build": "node scripts/validateRuleConsistency.mjs && vite build", "build-storybook": "storybook build", - "build:prod": "node scripts/fetchTemplates && node scripts/verifyTourStepIdsUniqueness.mjs && NODE_ENV=production npm run build", + "build:prod": "node scripts/fetchTemplates && node scripts/verifyTourStepIdsUniqueness.mjs && node scripts/validateRuleConsistency.mjs && NODE_ENV=production npm run build", "fetch-templates": "node scripts/fetchTemplates", "generate-interfaces-manifest": "node scripts/generateInterfacesManifest", "dev": "vite", @@ -25,7 +25,8 @@ "test:e2e:report": "npx playwright show-report", "test:e2e:ui": "npx playwright test --ui", "tsc": "tsc", - "type-check": "tsc --pretty --noEmit" + "type-check": "tsc --pretty --noEmit", + "validate-rules": "node scripts/validateRuleConsistency.mjs" }, "dependencies": { "@bufbuild/protobuf": "^1.10.0", diff --git a/scripts/README-rule-validation.md b/scripts/README-rule-validation.md new file mode 100644 index 0000000000..8ebc95bbb8 --- /dev/null +++ b/scripts/README-rule-validation.md @@ -0,0 +1,112 @@ +# Rule Validation System + +This directory contains a build-time validation system that ensures consistency between backend and frontend rule messages. + +## Overview + +The validation system automatically extracts rule messages from: +- **Backend**: `src/autokitteh/internal/backend/projects/lint.go` (Go) +- **Frontend**: `src/constants/project.constants.ts` (TypeScript) + +And compares them to ensure they match exactly. + +## Files + +### `validateRuleConsistency.mjs` +The main validation script that: +1. Extracts rules from the Go backend file using regex parsing +2. Extracts rules from the TypeScript constants file +3. Compares all rule IDs and messages for exact matches +4. Fails the build if any inconsistencies are found + +## Usage + +### Automatic (Build-time) +The validation runs automatically during: +- `npm run build` - Development builds +- `npm run build:prod` - Production builds + +### Manual +Run validation independently: +```bash +npm run validate-rules +``` + +## How It Works + +1. **Extraction**: Uses regex patterns to extract rule maps from both files +2. **Comparison**: Compares rule IDs and messages for exact matches +3. **Validation**: Fails if any rules are missing, extra, or have different messages +4. **Build Prevention**: Stops the build process if validation fails + +## Error Handling + +When validation fails, the script: +- Displays detailed error messages showing exactly what doesn't match +- Provides file paths for both backend and frontend files +- Exits with code 1 to prevent the build from continuing + +## Example Output + +### Success (Colorized) +``` +Validating rule consistency between backend and frontend... +Extracting rules from Go backend... + Found 11 backend rules +Extracting rules from TypeScript frontend... + Found 11 frontend rules + +════════════════════════════════════════════════════════════════════════════════ +RULE VALIDATION PASSED - Frontend and Backend rules are synchronized +════════════════════════════════════════════════════════════════════════════════ +Successfully validated 11 rules + All rule messages match perfectly between backend and frontend +``` + +### Failure (Colorized) +``` +════════════════════════════════════════════════════════════════════════════════ +RULE VALIDATION FAILED - Frontend and Backend rule messages are out of sync! +════════════════════════════════════════════════════════════════════════════════ + +The following inconsistencies were found: + +1. Rule message mismatch for W1: + Backend: "Empty variable" + Frontend: "Empty variable MISMATCH" + +2. Extra rule in frontend: W3 - "Extra rule in frontend" + +──────────────────────────────────────────────────────────────────────────────── +Please update the rule messages in one of these files to match: +β€’ Backend: src/autokitteh/internal/backend/projects/lint.go +β€’ Frontend: src/constants/project.constants.ts +──────────────────────────────────────────────────────────────────────────────── + +Build has been stopped to prevent inconsistent behavior. +``` + +**Note**: The actual output includes color coding for better readability: +- Green: Success messages and matching content +- Red: Error messages and mismatched content +- Yellow: Warning messages and rule IDs +- Blue: Backend-related content +- Magenta: Frontend-related content +- Cyan: File paths and highlighted text + +## Maintenance + +When adding or modifying rules: + +1. **Update Backend**: Modify the `Rules` map in `lint.go` +2. **Update Frontend**: Modify the `violationRules` object in `project.constants.ts` +3. **Test**: Run `npm run validate-rules` to ensure consistency +4. **Build**: The validation will run automatically during build + +## Benefits + +- **Early Detection**: Catches inconsistencies at build time, not runtime +- **Automatic Validation**: No manual checking required +- **Clear Error Messages**: Detailed feedback on what needs to be fixed +- **Build Safety**: Prevents shipping inconsistent rule messages +- **Zero Runtime Overhead**: Validation only runs during build process \ No newline at end of file diff --git a/scripts/cleanup-act-docker.sh b/scripts/cleanup-act-docker.sh new file mode 100644 index 0000000000..a30653b8c5 --- /dev/null +++ b/scripts/cleanup-act-docker.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +# cleanup-act-docker.sh +# Script to clean up Docker containers and networks created by "act" tool +# when running GitHub Actions workflows locally + +set -e + +echo "🧹 Cleaning up Docker resources created by 'act'..." + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Function to print colored output +print_status() { + local color=$1 + local message=$2 + echo -e "${color}${message}${NC}" +} + +# Stop and remove containers with common act patterns +echo "πŸ›‘ Stopping and removing act containers..." + +# Patterns for containers created by the setup-test-env action +CONTAINER_PATTERNS=( + "postgres-db-*" + "temporal-dev-*" + "autokitteh-ee-*" + "act-*" +) + +for pattern in "${CONTAINER_PATTERNS[@]}"; do + containers=$(docker ps -aq --filter "name=${pattern}" 2>/dev/null || true) + if [ -n "$containers" ]; then + print_status $YELLOW "Found containers matching pattern: ${pattern}" + echo "$containers" | xargs docker rm -f 2>/dev/null || true + print_status $GREEN "βœ… Removed containers matching: ${pattern}" + fi +done + +# Remove networks with temporal-net pattern (created by setup-test-env) +echo "🌐 Cleaning up networks..." +networks=$(docker network ls --filter "name=temporal-net-*" -q 2>/dev/null || true) +if [ -n "$networks" ]; then + print_status $YELLOW "Found temporal networks to remove" + echo "$networks" | xargs docker network rm 2>/dev/null || true + print_status $GREEN "βœ… Removed temporal networks" +fi + +# Remove act-specific networks +act_networks=$(docker network ls --filter "name=act_*" -q 2>/dev/null || true) +if [ -n "$act_networks" ]; then + print_status $YELLOW "Found act networks to remove" + echo "$act_networks" | xargs docker network rm 2>/dev/null || true + print_status $GREEN "βœ… Removed act networks" +fi + +# Clean up volumes (be careful with this - only remove act-specific ones) +echo "πŸ“¦ Cleaning up volumes..." +volumes=$(docker volume ls --filter "name=act_*" -q 2>/dev/null || true) +if [ -n "$volumes" ]; then + print_status $YELLOW "Found act volumes to remove" + echo "$volumes" | xargs docker volume rm 2>/dev/null || true + print_status $GREEN "βœ… Removed act volumes" +fi + +# Remove dangling images created during act runs (optional - be careful) +echo "πŸ–ΌοΈ Cleaning up dangling images..." +dangling_images=$(docker images -f "dangling=true" -q 2>/dev/null || true) +if [ -n "$dangling_images" ]; then + print_status $YELLOW "Found dangling images to remove" + echo "$dangling_images" | xargs docker rmi 2>/dev/null || true + print_status $GREEN "βœ… Removed dangling images" +fi + +# Prune unused Docker resources +echo "πŸ”„ Running Docker system prune..." +docker system prune -f --volumes 2>/dev/null || true + +# Show current Docker resource usage +echo "" +print_status $GREEN "πŸ“Š Current Docker resource status:" +echo "Containers:" +docker ps -a --format "table {{.Names}}\t{{.Image}}\t{{.Status}}" 2>/dev/null || true +echo "" +echo "Networks:" +docker network ls --format "table {{.Name}}\t{{.Driver}}\t{{.Scope}}" 2>/dev/null || true +echo "" +echo "Volumes:" +docker volume ls --format "table {{.Name}}\t{{.Driver}}" 2>/dev/null || true + +print_status $GREEN "βœ… Docker cleanup completed!" +print_status $YELLOW "πŸ’‘ Tip: Run 'docker system df' to see disk usage" \ No newline at end of file diff --git a/scripts/docker-cleanup.sh b/scripts/docker-cleanup.sh new file mode 100755 index 0000000000..19e36f09d1 --- /dev/null +++ b/scripts/docker-cleanup.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "Stopping all containers..." +docker stop $(docker ps -aq) 2>/dev/null + +echo "Removing all containers..." +docker rm -f $(docker ps -aq) 2>/dev/null + +echo "Removing all images..." +docker rmi -f $(docker images -q) 2>/dev/null + +echo "Removing all volumes..." +docker volume rm -f $(docker volume ls -q) 2>/dev/null + +echo "Removing all user-defined networks..." +docker network rm $(docker network ls | grep -v "bridge\|host\|none" | awk '{print $1}') 2>/dev/null + +echo "Docker system prune (just in case)..." +docker system prune -a --volumes -f + +echo "All done!" diff --git a/scripts/validateRuleConsistency.mjs b/scripts/validateRuleConsistency.mjs new file mode 100644 index 0000000000..3280d9251e --- /dev/null +++ b/scripts/validateRuleConsistency.mjs @@ -0,0 +1,371 @@ +#!/usr/bin/env node +/* eslint-disable */ +import fs from "fs"; +import path from "path"; +import { fileURLToPath } from "url"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +/** + * Extract rules from Go lint.go file + * @param {string} goFilePath - Path to the Go lint file + * @returns {Object} - Extracted rules object + */ +function extractRulesFromGoFile(goFilePath) { + try { + const content = fs.readFileSync(goFilePath, "utf8"); + + // Find the Rules map using regex + const rulesMatch = content.match( + /var Rules = map\[string\]string\s*{\s*\/\/ ID -> Description\s*([\s\S]*?)\s*}/ + ); + + if (!rulesMatch) { + throw new Error("Could not find Rules map in Go file"); + } + + const rulesContent = rulesMatch[1]; + const rules = {}; + + // Extract each rule line using regex + const ruleLines = rulesContent.match(/"[^"]+"\s*:\s*"[^"]+"/g); + + if (!ruleLines) { + throw new Error("Could not parse rules from Go file"); + } + + ruleLines.forEach((line) => { + const match = line.match(/"([^"]+)"\s*:\s*"([^"]+)"/); + if (match) { + const [, ruleId, description] = match; + rules[ruleId] = description; + } + }); + + return rules; + } catch (error) { + console.error(`❌ Error reading Go file: ${error.message}`); + process.exit(1); + } +} + +/** + * Extract rules from TypeScript constants file + * @param {string} tsFilePath - Path to the TypeScript constants file + * @returns {Object} - Extracted rules object + */ +function extractRulesFromTSFile(tsFilePath) { + try { + const content = fs.readFileSync(tsFilePath, "utf8"); + + const rulesMatch = content.match(/export const lintViolationRules = \{([\s\S]*?)\};/); + + if (!rulesMatch) { + throw new Error("Could not find lintViolationRules in TypeScript file"); + } + + const rulesContent = rulesMatch[1]; + const rules = {}; + + const ruleLines = rulesContent.match(/[A-Z]\d+:\s*"[^"]+"/g); + + if (!ruleLines) { + throw new Error("Could not parse rules from TypeScript file"); + } + + ruleLines.forEach((line) => { + const match = line.match(/([A-Z]\d+):\s*"([^"]+)"/); + if (match) { + const [, ruleId, description] = match; + rules[ruleId] = description; + } + }); + + return rules; + } catch (error) { + console.error(`❌ Error reading TypeScript file: ${error.message}`); + process.exit(1); + } +} + +/** + * ANSI color codes for terminal output + */ +const colors = { + reset: "\x1b[0m", + bright: "\x1b[1m", + red: "\x1b[31m", + green: "\x1b[32m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + magenta: "\x1b[35m", + cyan: "\x1b[36m", + white: "\x1b[37m", + bgRed: "\x1b[41m", + bgGreen: "\x1b[42m", + bgYellow: "\x1b[43m", +}; + +/** + * Helper function to colorize text + * @param {string} text - Text to colorize + * @param {string} color - Color code + * @returns {string} - Colorized text + */ +function colorize(text, color) { + return `${color}${text}${colors.reset}`; +} + +/** + * Create a proper diff using dynamic programming to find the optimal alignment + * @param {string} str1 - First string (backend) + * @param {string} str2 - Second string (frontend) + * @returns {Object} - Object with highlighted versions of both strings + */ +function createCharDiff(str1, str2) { + // Use Myers' diff algorithm (simplified version) + const diff = computeDiff(str1, str2); + + let highlighted1 = ""; + let highlighted2 = ""; + + for (const operation of diff) { + switch (operation.type) { + case "equal": + // Characters match - show normally + if (operation.char === " ") { + highlighted1 += "Β·"; // Show spaces as dots for clarity + highlighted2 += "Β·"; + } else { + highlighted1 += operation.char; + highlighted2 += operation.char; + } + break; + + case "delete": + // Character exists in backend but not frontend + if (operation.char === " ") { + highlighted1 += colorize("Β·", colors.green); + } else { + highlighted1 += colorize(operation.char, colors.green); + } + highlighted2 += colorize("βˆ…", colors.yellow); + break; + + case "insert": + // Character exists in frontend but not backend + highlighted1 += colorize("βˆ…", colors.yellow); + if (operation.char === " ") { + highlighted2 += colorize("Β·", colors.red); + } else { + highlighted2 += colorize(operation.char, colors.red); + } + break; + + case "replace": + // Characters are different + if (operation.char1 === " ") { + highlighted1 += colorize("Β·", colors.green); + } else { + highlighted1 += colorize(operation.char1, colors.green); + } + if (operation.char2 === " ") { + highlighted2 += colorize("Β·", colors.red); + } else { + highlighted2 += colorize(operation.char2, colors.red); + } + break; + } + } + + return { + backend: highlighted1, + frontend: highlighted2, + }; +} + +/** + * Compute diff operations using a simplified Myers algorithm + * @param {string} str1 - First string + * @param {string} str2 - Second string + * @returns {Array} - Array of diff operations + */ +function computeDiff(str1, str2) { + const len1 = str1.length; + const len2 = str2.length; + + // Create a matrix for dynamic programming + const matrix = Array(len1 + 1) + .fill(null) + .map(() => Array(len2 + 1).fill(0)); + + // Fill the matrix + for (let i = 0; i <= len1; i++) { + for (let j = 0; j <= len2; j++) { + if (i === 0) { + matrix[i][j] = j; + } else if (j === 0) { + matrix[i][j] = i; + } else if (str1[i - 1] === str2[j - 1]) { + matrix[i][j] = matrix[i - 1][j - 1]; + } else { + matrix[i][j] = + 1 + + Math.min( + matrix[i - 1][j], // deletion + matrix[i][j - 1], // insertion + matrix[i - 1][j - 1] // substitution + ); + } + } + } + + // Backtrack to find the operations + const operations = []; + let i = len1, + j = len2; + + while (i > 0 || j > 0) { + if (i > 0 && j > 0 && str1[i - 1] === str2[j - 1]) { + // Characters match + operations.unshift({ type: "equal", char: str1[i - 1] }); + i--; + j--; + } else if (i > 0 && j > 0 && matrix[i][j] === matrix[i - 1][j - 1] + 1) { + // Substitution + operations.unshift({ type: "replace", char1: str1[i - 1], char2: str2[j - 1] }); + i--; + j--; + } else if (i > 0 && matrix[i][j] === matrix[i - 1][j] + 1) { + // Deletion + operations.unshift({ type: "delete", char: str1[i - 1] }); + i--; + } else if (j > 0 && matrix[i][j] === matrix[i][j - 1] + 1) { + // Insertion + operations.unshift({ type: "insert", char: str2[j - 1] }); + j--; + } + } + + return operations; +} + +/** + * Compare two rule objects and return differences + * @param {Object} backendRules - Rules from Go backend + * @param {Object} frontendRules - Rules from TypeScript frontend + * @returns {Array} - Array of error messages + */ +function compareRules(backendRules, frontendRules) { + const errors = []; + + // Check for missing rules in frontend + for (const [ruleId, backendMessage] of Object.entries(backendRules)) { + if (!(ruleId in frontendRules)) { + errors.push( + `${colorize("Missing rule in frontend:", colors.red)} ` + + `${colorize(ruleId, colors.yellow)} - "${colorize(backendMessage, colors.cyan)}"` + ); + continue; + } + + const frontendMessage = frontendRules[ruleId]; + if (frontendMessage !== backendMessage) { + const diff = createCharDiff(backendMessage, frontendMessage); + errors.push( + `${colorize("Rule message mismatch for", colors.red)} ${colorize(ruleId, colors.yellow)}:\n` + + ` ${colorize("Backend: ", colors.blue)} "${diff.backend}"\n` + + ` ${colorize("Frontend:", colors.magenta)} "${diff.frontend}"` + ); + } + } + + // Check for extra rules in frontend + for (const [ruleId, frontendMessage] of Object.entries(frontendRules)) { + if (!(ruleId in backendRules)) { + errors.push( + `${colorize("Extra rule in frontend:", colors.red)} ` + + `${colorize(ruleId, colors.yellow)} - "${colorize(frontendMessage, colors.cyan)}"` + ); + } + } + + return errors; +} + +/** + * Main validation function + */ +function validateRuleConsistency() { + console.log(colorize("πŸ” Validating rule consistency between backend and frontend...", colors.cyan)); + + // Define file paths + const goFilePath = path.resolve(__dirname, "../src/autokitteh/internal/backend/projects/lint.go"); + const tsFilePath = path.resolve(__dirname, "../src/constants/project.constants.ts"); + + // Check if files exist + if (!fs.existsSync(goFilePath)) { + console.error(colorize(`❌ Go lint file not found: ${goFilePath}`, colors.red)); + process.exit(1); + } + + if (!fs.existsSync(tsFilePath)) { + console.error(colorize(`❌ TypeScript constants file not found: ${tsFilePath}`, colors.red)); + process.exit(1); + } + + // Extract rules from both files + console.log(colorize("πŸ“– Extracting rules from Go backend...", colors.blue)); + const backendRules = extractRulesFromGoFile(goFilePath); + console.log(colorize(` Found ${Object.keys(backendRules).length} backend rules`, colors.green)); + + console.log(colorize("πŸ“– Extracting rules from TypeScript frontend...", colors.magenta)); + const frontendRules = extractRulesFromTSFile(tsFilePath); + console.log(colorize(` Found ${Object.keys(frontendRules).length} frontend rules`, colors.green)); + + // Compare rules + const errors = compareRules(backendRules, frontendRules); + + if (errors.length > 0) { + console.error("\n" + colorize("═".repeat(80), colors.red)); + console.error( + colorize("❌ RULE VALIDATION FAILED", colors.red + colors.bright) + + colorize(" - Frontend and Backend rule messages are out of sync!", colors.red) + ); + console.error(colorize("═".repeat(80), colors.red)); + console.error(colorize("\nThe following inconsistencies were found:", colors.yellow + colors.bright)); + console.error(""); + errors.forEach((error, index) => { + console.error(`${colorize(`${index + 1}.`, colors.red + colors.bright)} ${error}`); + if (index < errors.length - 1) console.error(""); // Add spacing between errors + }); + console.error("\n" + colorize("─".repeat(80), colors.yellow)); + console.error( + colorize("Please update the rule messages in one of these files to match:", colors.yellow + colors.bright) + ); + console.error(`${colorize("β€’ Backend: ", colors.blue + colors.bright)} ${colorize(goFilePath, colors.cyan)}`); + console.error( + `${colorize("β€’ Frontend:", colors.magenta + colors.bright)} ${colorize(tsFilePath, colors.cyan)}` + ); + console.error(colorize("─".repeat(80), colors.yellow)); + console.error( + colorize("\n🚫 Build has been stopped to prevent inconsistent behavior.\n", colors.red + colors.bright) + ); + process.exit(1); + } + + console.log("\n" + colorize("═".repeat(80), colors.green)); + console.log( + colorize("βœ… RULE VALIDATION PASSED", colors.green + colors.bright) + + colorize(" - Frontend and Backend rules are synchronized", colors.green) + ); + console.log(colorize("═".repeat(80), colors.green)); + console.log( + colorize(`πŸŽ‰ Successfully validated ${Object.keys(backendRules).length} rules`, colors.green + colors.bright) + ); + console.log(colorize(" All rule messages match perfectly between backend and frontend\n", colors.green)); +} + +// Run validation +validateRuleConsistency();