[KEY_PREFIX]"
+ exit 1
+fi
+
+# Check if required environment variables are set
+if [ -z "$RHCS_TOKEN" ]; then
+ echo "Error: The environment variable RHCS_TOKEN is not set."
+ exit 1
+fi
+
+if [ -z "$AWS_REGION" ]; then
+ echo "Error: The environment variable AWS_REGION is not set."
+ exit 1
+fi
+
+# Variables
+BUCKET=$1
+MODULES_DIR=$2
+TEMP_DIR_PREFIX=$3
+MIN_AGE_IN_HOURS=$4
+ID_OR_ALL=$5
+KEY_PREFIX=${6:-""} # Key prefix is optional
+FAILED=0
+CURRENT_DIR=$(pwd)
+AWS_S3_REGION=${AWS_S3_REGION:-$AWS_REGION}
+
+
+# Detect operating system and set the appropriate date command
+if [[ "$(uname)" == "Darwin" ]]; then
+ date_command="gdate"
+else
+ date_command="date"
+fi
+
+# Function to perform terraform destroy
+destroy_cluster() {
+ local cluster_id=$1
+ local cluster_folder="$KEY_PREFIX$2"
+ # we must add two levels to replicate the "source = ../../modules" relative path presented in the module
+ local temp_dir="${TEMP_DIR_PREFIX}${cluster_id}/1/2"
+ local temp_generic_modules_dir="${TEMP_DIR_PREFIX}${cluster_id}/modules/"
+ local source_generic_modules="$MODULES_DIR/../../modules/"
+
+ echo "Copying generic modules $source_generic_modules in $temp_generic_modules_dir"
+
+ mkdir -p "$temp_generic_modules_dir" || return 1
+ cp -a "$source_generic_modules." "$temp_generic_modules_dir" || return 1
+
+ tree "$source_generic_modules" "$temp_generic_modules_dir" || return 1
+
+ echo "Copying $MODULES_DIR in $temp_dir"
+
+ mkdir -p "$temp_dir" || return 1
+ cp -a "$MODULES_DIR." "$temp_dir" || return 1
+
+ tree "$MODULES_DIR" "$temp_dir" || return 1
+
+ cd "$temp_dir" || return 1
+
+ tree "." || return 1
+
+ echo "tf state: bucket=$BUCKET key=${cluster_folder}/${cluster_id}.tfstate region=$AWS_S3_REGION"
+
+ if ! terraform init -backend-config="bucket=$BUCKET" -backend-config="key=${cluster_folder}/${cluster_id}.tfstate" -backend-config="region=$AWS_S3_REGION"; then return 1; fi
+
+ # Edit the name of the cluster
+ sed -i -e "s/\(rosa_cluster_name\s*=\s*\"\)[^\"]*\(\"\)/\1${cluster_id}\2/" cluster.tf
+
+ if ! terraform destroy -auto-approve; then return 1; fi
+
+ # Cleanup S3
+ echo "Deleting s3://$BUCKET/$cluster_folder"
+ if ! aws s3 rm "s3://$BUCKET/$cluster_folder" --recursive; then return 1; fi
+ if ! aws s3api delete-object --bucket "$BUCKET" --key "$cluster_folder/"; then return 1; fi
+
+ cd - || return 1
+ rm -rf "$temp_dir" || return 1
+}
+
+# List objects in the S3 bucket and parse the cluster IDs
+all_objects=$(aws s3 ls "s3://$BUCKET/$KEY_PREFIX")
+aws_exit_code=$?
+
+# don't fail on folder absent
+if [ $aws_exit_code -ne 0 ] && [ "$all_objects" != "" ]; then
+ echo "Error executing the aws s3 ls command (Exit Code: $aws_exit_code):" >&2
+ exit 1
+fi
+
+if [ "$ID_OR_ALL" == "all" ]; then
+ clusters=$(echo "$all_objects" | awk '{print $2}' | sed -n 's#^tfstate-\(.*\)/$#\1#p')
+else
+ clusters=$(echo "$all_objects" | awk '{print $2}' | grep "tfstate-$ID_OR_ALL/" | sed -n 's#^tfstate-\(.*\)/$#\1#p')
+fi
+
+if [ -z "$clusters" ]; then
+ echo "No objects found in the S3 bucket. Exiting script." >&2
+ exit 0
+fi
+
+current_timestamp=$($date_command +%s)
+
+for cluster_id in $clusters; do
+ cd "$CURRENT_DIR" || return 1
+
+
+ cluster_folder="tfstate-$cluster_id"
+ echo "Checking cluster $cluster_id in $cluster_folder"
+
+ last_modified=$(aws s3api head-object --bucket "$BUCKET" --key "$KEY_PREFIX$cluster_folder/${cluster_id}.tfstate" --output json | grep LastModified | awk -F '"' '{print $4}')
+ if [ -z "$last_modified" ]; then
+ echo "Error: Failed to retrieve last modified timestamp for cluster $cluster_id"
+ exit 1
+ fi
+
+ last_modified_timestamp=$($date_command -d "$last_modified" +%s)
+ if [ -z "$last_modified_timestamp" ]; then
+ echo "Error: Failed to convert last modified timestamp to seconds since epoch for cluster $cluster_id"
+ exit 1
+ fi
+ echo "Cluster $cluster_id last modification: $last_modified ($last_modified_timestamp)"
+
+ file_age_hours=$(( (current_timestamp - last_modified_timestamp) / 3600 ))
+ if [ -z "$file_age_hours" ]; then
+ echo "Error: Failed to calculate file age in hours for cluster $cluster_id"
+ exit 1
+ fi
+ echo "Cluster $cluster_id is $file_age_hours hours old"
+
+ if [ $file_age_hours -ge "$MIN_AGE_IN_HOURS" ]; then
+ echo "Destroying cluster $cluster_id in $cluster_folder"
+
+ if ! destroy_cluster "$cluster_id" "$cluster_folder"; then
+ echo "Error destroying cluster $cluster_id"
+ FAILED=1
+ fi
+ else
+ echo "Skipping cluster $cluster_id as it does not meet the minimum age requirement of $MIN_AGE_IN_HOURS hours"
+ fi
+done
+
+# Exit with the appropriate status
+if [ $FAILED -ne 0 ]; then
+ echo "One or more operations failed."
+ exit 1
+else
+ echo "All operations completed successfully."
+ exit 0
+fi
diff --git a/.github/actions/aws-openshift-rosa-hcp-single-region-create/README.md b/.github/actions/aws-openshift-rosa-hcp-single-region-create/README.md
new file mode 100644
index 000000000..611759950
--- /dev/null
+++ b/.github/actions/aws-openshift-rosa-hcp-single-region-create/README.md
@@ -0,0 +1,138 @@
+# Deploy AWS ROSA HCP Single Region Cluster
+
+## Description
+
+This GitHub Action automates the deployment of the aws/openshift/rosa-hcp-single-region reference architecture cluster using Terraform.
+This action will also install oc, awscli, rosa cli.
+The kube context will be set on the created cluster.
+
+
+## Inputs
+
+| name | description | required | default |
+| --- | --- | --- | --- |
+| `rh-token` | Red Hat Hybrid Cloud Console Token
| `true` | `""` |
+| `cluster-name` | Name of the ROSA cluster to deploy
| `true` | `""` |
+| `admin-password` | Admin password for the ROSA cluster
| `true` | `""` |
+| `admin-username` | Admin username for the ROSA cluster
| `true` | `kube-admin` |
+| `aws-region` | AWS region where the ROSA cluster will be deployed
| `true` | `""` |
+| `availability-zones` | Comma separated list of availability zones (letters only, e.g., a,b,c)
| `true` | `a,b,c` |
+| `rosa-cli-version` | Version of the ROSA CLI to use
| `true` | `latest` |
+| `openshift-version` | Version of the OpenShift to install
| `true` | `4.17.16` |
+| `replicas` | Number of replicas for the ROSA cluster (empty will fallback on default value of the module)
| `false` | `""` |
+| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state
| `true` | `""` |
+| `s3-bucket-region` | Region of the bucket containing the resources states, if not set, will fallback on aws-region
| `false` | `""` |
+| `s3-bucket-key-prefix` | Key prefix of the bucket containing the resources states. It must contain a / at the end e.g 'my-prefix/'.
| `false` | `""` |
+| `tf-modules-revision` | Git revision of the tf modules to use
| `true` | `main` |
+| `tf-modules-path` | Path where the tf rosa modules will be cloned
| `true` | `./.action-tf-modules/aws-openshift-rosa-hcp-single-region-create/` |
+| `login` | Authenticate the current kube context on the created cluster
| `true` | `true` |
+
+
+## Outputs
+
+| name | description |
+| --- | --- |
+| `openshift-server-api` | The server API URL of the deployed ROSA cluster
|
+| `openshift-cluster-id` | The ID of the deployed ROSA cluster
|
+| `terraform-state-url` | URL of the Terraform state file in the S3 bucket
|
+
+
+## Runs
+
+This action is a `composite` action.
+
+## Usage
+
+```yaml
+- uses: camunda/camunda-deployment-references/.github/actions/aws-openshift-rosa-hcp-single-region-create@main
+ with:
+ rh-token:
+ # Red Hat Hybrid Cloud Console Token
+ #
+ # Required: true
+ # Default: ""
+
+ cluster-name:
+ # Name of the ROSA cluster to deploy
+ #
+ # Required: true
+ # Default: ""
+
+ admin-password:
+ # Admin password for the ROSA cluster
+ #
+ # Required: true
+ # Default: ""
+
+ admin-username:
+ # Admin username for the ROSA cluster
+ #
+ # Required: true
+ # Default: kube-admin
+
+ aws-region:
+ # AWS region where the ROSA cluster will be deployed
+ #
+ # Required: true
+ # Default: ""
+
+ availability-zones:
+ # Comma separated list of availability zones (letters only, e.g., a,b,c)
+ #
+ # Required: true
+ # Default: a,b,c
+
+ rosa-cli-version:
+ # Version of the ROSA CLI to use
+ #
+ # Required: true
+ # Default: latest
+
+ openshift-version:
+ # Version of the OpenShift to install
+ #
+ # Required: true
+ # Default: 4.17.16
+
+ replicas:
+ # Number of replicas for the ROSA cluster (empty will fallback on default value of the module)
+ #
+ # Required: false
+ # Default: ""
+
+ s3-backend-bucket:
+ # Name of the S3 bucket to store Terraform state
+ #
+ # Required: true
+ # Default: ""
+
+ s3-bucket-region:
+ # Region of the bucket containing the resources states, if not set, will fallback on aws-region
+ #
+ # Required: false
+ # Default: ""
+
+ s3-bucket-key-prefix:
+ # Key prefix of the bucket containing the resources states. It must contain a / at the end e.g 'my-prefix/'.
+ #
+ # Required: false
+ # Default: ""
+
+ tf-modules-revision:
+ # Git revision of the tf modules to use
+ #
+ # Required: true
+ # Default: main
+
+ tf-modules-path:
+ # Path where the tf rosa modules will be cloned
+ #
+ # Required: true
+ # Default: ./.action-tf-modules/aws-openshift-rosa-hcp-single-region-create/
+
+ login:
+ # Authenticate the current kube context on the created cluster
+ #
+ # Required: true
+ # Default: true
+```
diff --git a/.github/actions/aws-openshift-rosa-hcp-single-region-create/action.yml b/.github/actions/aws-openshift-rosa-hcp-single-region-create/action.yml
new file mode 100644
index 000000000..3e7ac6ffa
--- /dev/null
+++ b/.github/actions/aws-openshift-rosa-hcp-single-region-create/action.yml
@@ -0,0 +1,276 @@
+---
+name: Deploy AWS ROSA HCP Single Region Cluster
+
+description: |
+ This GitHub Action automates the deployment of the aws/openshift/rosa-hcp-single-region reference architecture cluster using Terraform.
+ This action will also install oc, awscli, rosa cli.
+ The kube context will be set on the created cluster.
+
+inputs:
+ rh-token:
+ description: Red Hat Hybrid Cloud Console Token
+ required: true
+ cluster-name:
+ description: Name of the ROSA cluster to deploy
+ required: true
+ admin-password:
+ description: Admin password for the ROSA cluster
+ required: true
+ admin-username:
+ description: Admin username for the ROSA cluster
+ default: kube-admin
+ required: true
+ aws-region:
+ description: AWS region where the ROSA cluster will be deployed
+ required: true
+ availability-zones:
+ description: Comma separated list of availability zones (letters only, e.g., a,b,c)
+ required: true
+ default: a,b,c
+ rosa-cli-version:
+ description: Version of the ROSA CLI to use
+ required: true
+ default: latest
+ openshift-version:
+ description: Version of the OpenShift to install
+ required: true
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=semver
+ default: 4.17.16
+ replicas:
+ description: Number of replicas for the ROSA cluster (empty will fallback on default value of the module)
+ default: ''
+ s3-backend-bucket:
+ description: Name of the S3 bucket to store Terraform state
+ required: true
+ s3-bucket-region:
+ description: Region of the bucket containing the resources states, if not set, will fallback on aws-region
+ s3-bucket-key-prefix:
+ description: Key prefix of the bucket containing the resources states. It must contain a / at the end e.g 'my-prefix/'.
+ default: ''
+ tf-modules-revision:
+ description: Git revision of the tf modules to use
+ default: main
+ required: true
+ tf-modules-path:
+ description: Path where the tf rosa modules will be cloned
+ default: ./.action-tf-modules/aws-openshift-rosa-hcp-single-region-create/
+ required: true
+ login:
+ description: Authenticate the current kube context on the created cluster
+ default: 'true'
+ required: true
+
+outputs:
+ openshift-server-api:
+ description: The server API URL of the deployed ROSA cluster
+ value: ${{ steps.cluster_info.outputs.cluster_api }}
+
+ openshift-cluster-id:
+ description: The ID of the deployed ROSA cluster
+ value: ${{ steps.apply.outputs.cluster_id }}
+
+ terraform-state-url:
+ description: URL of the Terraform state file in the S3 bucket
+ value: ${{ steps.set-terraform-variables.outputs.terraform-state-url }}
+
+runs:
+ using: composite
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ repository: camunda/camunda-deployment-references
+ ref: ${{ inputs.tf-modules-revision }}
+ path: ${{ inputs.tf-modules-path }}
+ fetch-depth: 0
+
+ - name: Install asdf tools with cache for the project
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6158b7c7534f6dbeb3fa7c3c836f6e6aa6881b2e # 1.3.0
+
+ # TODO: when available on asdf, migrate this to it
+ - name: Install ROSA CLI
+ shell: bash
+ run: |
+ curl -LO "https://mirror.openshift.com/pub/openshift-v4/clients/rosa/${{ inputs.rosa-cli-version }}/rosa-linux.tar.gz"
+ tar -xvf rosa-linux.tar.gz
+ sudo mv rosa /usr/local/bin/rosa
+ chmod +x /usr/local/bin/rosa
+ rm -f rosa-linux.tar.gz
+ rosa version
+
+ - name: Install CLI tools from OpenShift Mirror
+ uses: redhat-actions/openshift-tools-installer@144527c7d98999f2652264c048c7a9bd103f8a82 # v1
+ with:
+ oc: ${{ inputs.openshift-version }}
+
+ - name: Set AWS Region and associated AZs
+ id: construct_azs
+ shell: bash
+ run: |
+ echo "AWS_REGION=${{ inputs.aws-region }}" | tee -a "$GITHUB_ENV"
+
+ IFS=',' read -r -a az_letters <<< "${{ inputs.availability-zones }}"
+ FULL_AZS=()
+ for az in "${az_letters[@]}"; do
+ FULL_AZS+=("\"${AWS_REGION}${az}\"") # Add double quotes around each AZ
+ done
+
+ # Join the AZs with commas
+ FULL_AZS_STRING=$(IFS=,; echo "${FULL_AZS[*]}")
+ echo "FULL_AZS=$FULL_AZS_STRING" | tee -a "$GITHUB_ENV"
+
+
+ - name: Login to Red Hat Hybrid Cloud Console
+ shell: bash
+ run: |
+ rosa login --token="${{ inputs.rh-token }}"
+ rosa whoami
+
+ - name: Verify and enable HCP ROSA on AWS Marketplace
+ shell: bash
+ run: |
+ rosa verify quota
+ rosa verify permissions
+ rosa create account-roles --mode auto
+
+ - name: Set Terraform variables
+ id: set-terraform-variables
+ shell: bash
+ run: |
+ export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}"
+ export TFSTATE_KEY="${{ inputs.s3-bucket-key-prefix }}tfstate-${{ inputs.cluster-name }}/${{ inputs.cluster-name }}.tfstate"
+
+ if [ -z "${{ inputs.s3-bucket-region }}" ]; then
+ export TFSTATE_REGION="$AWS_REGION"
+ else
+ export TFSTATE_REGION="${{ inputs.s3-bucket-region }}"
+ fi
+
+ echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT"
+ echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT"
+ echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT"
+
+ terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}"
+ echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT"
+
+ - name: Check if S3 bucket exists
+ id: create-s3-bucket
+ shell: bash
+ run: |
+ if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then
+ echo "Bucket already exists"
+ else
+ echo "Bucket does not exist, creating..."
+ aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} \
+ --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} \
+ --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}
+ fi
+
+ aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} \
+ --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} \
+ --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"
+
+ - name: Terraform Init
+ id: init
+ working-directory: ${{ inputs.tf-modules-path }}/aws/openshift/rosa-hcp-single-region/
+ env:
+ RHCS_TOKEN: ${{ inputs.rh-token }}
+ shell: bash
+ run: |
+ set -euxo pipefail
+
+ terraform version
+
+ terraform init \
+ -backend-config="bucket=${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" \
+ -backend-config="key=${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" \
+ -backend-config="region=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}"
+
+ terraform validate -no-color
+
+ - name: Terraform Plan
+ id: plan
+ working-directory: ${{ inputs.tf-modules-path }}/aws/openshift/rosa-hcp-single-region/
+ env:
+ RHCS_TOKEN: ${{ inputs.rh-token }}
+ shell: bash
+ run: |
+ echo "Adapting the files with input values"
+ pwd
+ ls
+
+ # We use sed instead of -var because the module presented to the user
+ # uses locals for simplicity. Locals cannot be overwritten with the CLI.
+ sed -i -e 's/\(rosa_cluster_name\s*=\s*"\)[^"]*\("\)/\1${{ inputs.cluster-name }}\2/' \
+ -e 's/\(rosa_admin_password\s*=\s*"\)[^"]*\("\)/\1${{ inputs.admin-password }}\2/' \
+ -e 's/\(rosa_admin_username\s*=\s*"\)[^"]*\("\)/\1${{ inputs.admin-username }}\2/' \
+ -e 's/\(openshift_version\s*=\s*"\)[^"]*\("\)/\1${{ inputs.openshift-version }}\2/' \
+ -e "s/\(rosa_cluster_zones\s*=\s*\)[^]]*\]/\1[$FULL_AZS]/" \
+ cluster.tf
+
+ if [ -n "${{ inputs.replicas }}" ]; then
+ sed -i -e 's/\(replicas\s*=\s*\)[0-9]\+/\1${{ inputs.replicas }}/' cluster.tf
+ else
+ echo "No replicas value provided, skipping replica modification."
+ fi
+
+ echo "Displaying templated cluster.tf file:"
+ cat cluster.tf
+
+ terraform plan -no-color -out rosa.plan
+
+ - name: Terraform Apply
+ id: apply
+ working-directory: ${{ inputs.tf-modules-path }}/aws/openshift/rosa-hcp-single-region/
+ env:
+ RHCS_TOKEN: ${{ inputs.rh-token }}
+ shell: bash
+ run: |
+ terraform apply -no-color rosa.plan
+
+ export cluster_id="$(terraform output -raw cluster_id)"
+ echo "cluster_id=$cluster_id" >> "$GITHUB_OUTPUT"
+
+ - name: Retrieve cluster information
+ id: cluster_info
+ shell: bash
+ run: |
+ rosa describe cluster --output=json -c "${{ steps.apply.outputs.cluster_id }}"
+ export cluster_api=$(rosa describe cluster --output=json -c "${{ steps.apply.outputs.cluster_id }}" | jq -r '.api.url')
+ echo "cluster_api=$cluster_api"
+ echo "cluster_api=$cluster_api" >> "$GITHUB_OUTPUT"
+
+ - name: Login and generate kubeconfig
+ # we need to retry due as the cluster has just been created and the OIDC provider may not be available yet
+ uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3
+ id: kube_config
+ if: inputs.login == 'true'
+ with:
+ timeout_minutes: 10
+ max_attempts: 40
+ shell: bash
+ retry_wait_seconds: 15
+ command: |
+ : # see https://github.com/nick-fields/retry/issues/133
+ set -o errexit
+ set -o pipefail
+
+ oc login --username "${{ inputs.admin-username }}" --password "${{ inputs.admin-password }}" "${{ steps.cluster_info.outputs.cluster_api }}"
+
+ # Check if the user is already a cluster-admin
+ if ! rosa list users --cluster="${{ inputs.cluster-name }}" | grep -q "${{ inputs.admin-username }}"; then
+ rosa grant user cluster-admin --cluster="${{ inputs.cluster-name }}" --user="${{ inputs.admin-username }}"
+ else
+ echo "✅ User '${{ inputs.admin-username }}' is already a cluster-admin on '${{ inputs.cluster-name }}'."
+ fi
+
+ oc whoami
+
+ kubectl config rename-context $(oc config current-context) "${{ inputs.cluster-name }}"
+ kubectl config use "${{ inputs.cluster-name }}"
+
+ - name: Clean up cloned modules
+ if: always()
+ shell: bash
+ run: |
+ rm -rf "${{ inputs.tf-modules-path }}"
diff --git a/.github/renovate.json5 b/.github/renovate.json5
index a697afad5..258f0fd70 100644
--- a/.github/renovate.json5
+++ b/.github/renovate.json5
@@ -1,11 +1,4 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
extends: ["github>camunda/infraex-common-config:default.json5"],
- packageRules: [
- {
- // The versioning is a bit strange, so we need to help a bit with parsing it correctly
- matchPackageNames: ["amazon/cloudwatch-agent"],
- versioning: "regex:^(?\\d)(\\.(?\\d+))(\\.(?\\db\\d+))$",
- }
- ]
}
diff --git a/.github/workflows-config/aws-openshift-rosa-hcp-single-region/test_matrix.yml b/.github/workflows-config/aws-openshift-rosa-hcp-single-region/test_matrix.yml
new file mode 100644
index 000000000..a28a67640
--- /dev/null
+++ b/.github/workflows-config/aws-openshift-rosa-hcp-single-region/test_matrix.yml
@@ -0,0 +1,46 @@
+---
+matrix:
+ distro:
+ # /!\ BEFORE ADDING/REMOVING A VERSION:
+ # /!\ Please keep this matrix synced with the official documentation:
+ # https://github.com/camunda/camunda-docs/blob/main/docs/self-managed/setup/deploy/openshift/redhat-openshift.md?plain=1#L2
+ # According to https://access.redhat.com/support/policy/updates/openshift, this matrix should reference the last 4 (may change) supported versions of OpenShift
+ - name: OpenShift 4.18
+ type: openshift
+ schedule_only: false
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=regex:^4(\.(?\d+))?(\.(?\d+))?$
+ version: 4.18.1
+ # /!\ Warning: When a new minor version of OpenShift is released,
+ # you must update all N-1, N-2, and N-3 versions in this matrix.
+ # rationale: Red Hat supports the last four minor versions of OpenShift.
+ # Therefore, to ensure compatibility and support, we must test against these versions.
+ # For more details, refer to the official support policy at https://endoflife.date/red-hat-openshift.
+ platform: rosa
+ # According to https://access.redhat.com/support/policy/updates/openshift,
+ # this matrix should reference the last 4 (may change) supported versions of OpenShift
+
+ - name: OpenShift 4.17
+ schedule_only: true # Old versions are only checked during schedule workflows
+ type: openshift
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=regex:^4.17(\.(?\d+))?$
+ version: 4.17.16
+ platform: rosa
+
+ - name: OpenShift 4.16
+ schedule_only: true
+ type: openshift
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=regex:^4.16(\.(?\d+))?$
+ version: 4.16.8
+ platform: rosa
+
+ - name: OpenShift 4.15
+ schedule_only: true
+ type: openshift
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=regex:^4.15(\.(?\d+))?$
+ version: 4.15.21
+ platform: rosa
+
+ scenario:
+ - name: Chart Setup
+ desc: Setup chart in production-like setup with Ingress and TLS.
+ flow: install
diff --git a/.github/workflows/aws_ec2_golden.yml b/.github/workflows/aws_ec2_golden.yml
index 8658dc662..88a46ee2d 100644
--- a/.github/workflows/aws_ec2_golden.yml
+++ b/.github/workflows/aws_ec2_golden.yml
@@ -1,5 +1,6 @@
---
-name: AWS EC2 Golden Files
+name: Tests - Golden - AWS EC2
+
on:
workflow_dispatch:
@@ -20,6 +21,8 @@ concurrency:
cancel-in-progress: true
env:
+ IS_SCHEDULE: ${{ contains(github.ref, 'refs/heads/schedule/') || github.event_name == 'schedule' && 'true' || 'false' }}
+
AWS_PROFILE: infex
AWS_REGION: eu-west-2
TF_PATH: ${{ github.workspace }}/aws/ec2/terraform
@@ -36,11 +39,11 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Install asdf tools with cache
- uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6158b7c7534f6dbeb3fa7c3c836f6e6aa6881b2e # 1.3.0
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
- name: Import Secrets
id: secrets
- uses: hashicorp/vault-action@v3
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
with:
url: ${{ secrets.VAULT_ADDR }}
method: approle
diff --git a/.github/workflows/aws_ec2_tests.yml b/.github/workflows/aws_ec2_tests.yml
index 0526b9d4f..874653816 100644
--- a/.github/workflows/aws_ec2_tests.yml
+++ b/.github/workflows/aws_ec2_tests.yml
@@ -1,5 +1,5 @@
---
-name: AWS EC2 Tests
+name: Tests - Integration - AWS EC2
on:
# Disabling for the time being due to focus shift with 8.7
@@ -18,9 +18,11 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
# in case of renovate we don't cancel the previous run, so it can finish it
# otherwise weekly renovate PRs with tf docs updates result in broken clusters
- cancel-in-progress: ${{ github.actor == 'renovate[bot]' && false || true }}
+ cancel-in-progress: ${{ !contains('renovate[bot]', github.actor) }}
env:
+ IS_SCHEDULE: ${{ contains(github.ref, 'refs/heads/schedule/') || github.event_name == 'schedule' && 'true' || 'false' }}
+
AWS_PROFILE: infex
AWS_REGION: eu-west-2
S3_BACKEND_BUCKET: tf-state-multi-reg
@@ -54,11 +56,11 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Install asdf tools with cache
- uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6158b7c7534f6dbeb3fa7c3c836f6e6aa6881b2e # 1.3.0
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
- name: Import Secrets
id: secrets
- uses: hashicorp/vault-action@v3
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
with:
url: ${{ secrets.VAULT_ADDR }}
method: approle
@@ -198,13 +200,14 @@ jobs:
notify-on-failure:
runs-on: ubuntu-latest
# if only snapshot error was detected, don't report
- if: github.event_name == 'schedule' && failure() && needs.test-report.outputs.SNAPSHOT_ERROR == 'false'
+ if: failure() && needs.test-report.outputs.SNAPSHOT_ERROR == 'false'
needs:
- test
- test-report
steps:
- name: Notify in Slack in case of failure
id: slack-notification
+ if: ${{ env.IS_SCHEDULE == 'true' }}
uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@6158b7c7534f6dbeb3fa7c3c836f6e6aa6881b2e # 1.3.0
with:
vault_addr: ${{ secrets.VAULT_ADDR }}
diff --git a/.github/workflows/aws_openshift_rosa_hcp_single_region_daily_cleanup.yml b/.github/workflows/aws_openshift_rosa_hcp_single_region_daily_cleanup.yml
new file mode 100644
index 000000000..76f14edb3
--- /dev/null
+++ b/.github/workflows/aws_openshift_rosa_hcp_single_region_daily_cleanup.yml
@@ -0,0 +1,118 @@
+---
+name: Tests - Daily Cleanup - AWS OpenShift ROSA HCP Single Region
+
+on:
+ workflow_dispatch:
+ inputs:
+ max_age_hours_cluster:
+ description: Maximum age of clusters in hours
+ required: true
+ default: '20'
+ pull_request:
+ paths:
+ - .github/workflows/aws_openshift_rosa_hcp_single_region_daily_cleanup.yml
+ - .tool-versions
+ - aws/openshift/rosa-hcp-single-region/**
+ - '!aws/openshift/rosa-hcp-single-region/test/golden/**'
+ - .github/actions/aws-openshift-rosa-hcp-single-region-cleanup/**
+
+ schedule:
+ - cron: 0 1 * * * # At 01:00 everyday.
+
+# limit to a single execution per actor of this workflow
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ # in case of renovate we don't cancel the previous run, so it can finish it
+ # otherwise weekly renovate PRs with tf docs updates result in broken clusters
+ cancel-in-progress: ${{ !contains('renovate[bot]', github.actor) }}
+
+env:
+ IS_SCHEDULE: ${{ contains(github.ref, 'refs/heads/schedule/') || github.event_name == 'schedule' && 'true' || 'false' }}
+
+ MAX_AGE_HOURS_CLUSTER: ${{ github.event.inputs.max_age_hours_cluster || '0' }}
+
+ # please keep those variables synced with aws_rosa_hcp_tests.yml
+ AWS_PROFILE: infex
+ S3_BACKEND_BUCKET: tests-ra-aws-rosa-hcp-tf-state-eu-central-1
+ S3_BUCKET_REGION: eu-central-1
+ AWS_REGION: eu-west-2
+
+
+jobs:
+
+ cleanup-clusters:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Install asdf tools with cache
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
+
+ - name: Use repo .tool-version as global version
+ run: cp .tool-versions ~/.tool-versions
+
+ - name: Set current Camunda version
+ id: camunda-version
+ run: |
+ CAMUNDA_VERSION=$(cat .camunda-version)
+ echo "CAMUNDA_VERSION=$CAMUNDA_VERSION" | tee -a "$GITHUB_OUTPUT"
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@a1b77a09293a4366e48a5067a86692ac6e94fdc0 # v3
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY;
+ secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY;
+ secret/data/products/infrastructure-experience/ci/common RH_OPENSHIFT_TOKEN;
+
+ # Official action does not support profiles
+ - name: Add profile credentials to ~/.aws/credentials
+ run: |
+ aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }}
+
+ - name: Delete clusters
+ id: delete_clusters
+ timeout-minutes: 125
+ uses: ./.github/actions/aws-openshift-rosa-hcp-single-region-cleanup
+ env:
+ RHCS_TOKEN: ${{ steps.secrets.outputs.RH_OPENSHIFT_TOKEN }}
+ with:
+ tf-bucket: ${{ env.S3_BACKEND_BUCKET }}
+ tf-bucket-region: ${{ env.S3_BUCKET_REGION }}
+ max-age-hours-cluster: ${{ env.MAX_AGE_HOURS_CLUSTER }}
+ tf-bucket-key-prefix: ${{ steps.camunda-version.outputs.CAMUNDA_VERSION }}/
+
+ # There are cases where the deletion of resources fails due to dependencies.
+ - name: Retry delete clusters
+ id: retry_delete_clusters
+ if: failure() && steps.delete_clusters.outcome == 'failure'
+ timeout-minutes: 125
+ uses: ./.github/actions/aws-openshift-rosa-hcp-single-region-cleanup
+ env:
+ RHCS_TOKEN: ${{ steps.secrets.outputs.RH_OPENSHIFT_TOKEN }}
+ with:
+ tf-bucket: ${{ env.S3_BACKEND_BUCKET }}
+ tf-bucket-region: ${{ env.S3_BUCKET_REGION }}
+ max-age-hours-cluster: 0 # the previous step alters the age and resets it to 0
+ tf-bucket-key-prefix: ${{ steps.camunda-version.outputs.CAMUNDA_VERSION }}/
+
+ - name: Notify in Slack in case of failure
+ id: slack-notification
+ if: ${{ failure() && env.IS_SCHEDULE == 'true' && steps.retry_delete_clusters.outcome == 'failure' }}
+ uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@e9a9f33ab193348a82a79bd9250fdf12f708390a # 1.2.19
+ with:
+ vault_addr: ${{ secrets.VAULT_ADDR }}
+ vault_role_id: ${{ secrets.VAULT_ROLE_ID }}
+ vault_secret_id: ${{ secrets.VAULT_SECRET_ID }}
diff --git a/.github/workflows/aws_openshift_rosa_hcp_single_region_golden.yml b/.github/workflows/aws_openshift_rosa_hcp_single_region_golden.yml
new file mode 100644
index 000000000..c5fd85a00
--- /dev/null
+++ b/.github/workflows/aws_openshift_rosa_hcp_single_region_golden.yml
@@ -0,0 +1,98 @@
+---
+name: Tests - Golden - AWS OpenShift ROSA HCP Single Region
+
+on:
+ workflow_dispatch:
+ pull_request:
+ paths:
+ - .github/workflows/aws_openshift_rosa_hcp_single_region_golden.yml
+ - .tool-versions
+ - aws/modules/rosa-hcp/**
+ - aws/openshift/rosa-hcp-single-region/**
+
+# limit to a single execution per actor of this workflow
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ IS_SCHEDULE: ${{ contains(github.ref, 'refs/heads/schedule/') || github.event_name == 'schedule' && 'true' || 'false' }}
+
+ # keep this synced with other workflows
+ AWS_PROFILE: infex
+ AWS_REGION: eu-west-2
+ S3_BACKEND_BUCKET: tests-ra-aws-rosa-hcp-tf-state-eu-central-1
+ S3_BUCKET_REGION: eu-central-1
+ S3_BUCKET_KEY: golden.tfstate
+
+ MODULE_DIR: ./aws/openshift/rosa-hcp-single-region/
+
+jobs:
+ compare:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+
+ - name: Install asdf tools with cache
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY;
+ secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY;
+
+ - name: Add profile credentials to ~/.aws/credentials
+ shell: bash
+ run: |
+ aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }}
+
+ - name: Compute golden
+ run: |
+ set -euxo pipefail
+ just regenerate-golden-file "${{ env.MODULE_DIR }}" "${{ env.S3_BUCKET_REGION }}" "${{ env.S3_BACKEND_BUCKET }}" "${{ env.S3_BUCKET_KEY }}" "./compare/"
+
+ - name: Upload pipeline golden result
+ uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
+ with:
+ name: plan-artifact-json
+ path: ${{ env.MODULE_DIR }}compare/tfplan-golden.json
+ retention-days: 1
+
+ - name: Compare Terraform plan with golden file
+ run: |
+ delta "${{ env.MODULE_DIR }}test/golden/tfplan-golden.json" "${{ env.MODULE_DIR }}compare/tfplan-golden.json"
+ exit $?
+
+ - name: Post diff on PR
+ if: failure() && github.event_name == 'pull_request'
+ uses: int128/diff-action@db6cce01542cb26e181798736eea1e71f5d36706 # v1
+ with:
+ base: ${{ env.MODULE_DIR }}test/golden/tfplan-golden.json
+ head: ${{ env.MODULE_DIR }}compare/tfplan-golden.json
+ comment-header: |
+ ## Module ${{ env.MODULE_DIR }} - Terraform Golden Plan Diff
+
+ comment-footer: |
+ 🖲 **Check the delta diff**
+ in the [workflow run](${{github.server_url}}/${{github.repository}}/actions/runs/${{github.run_id}}) for a detailed comparison.
+
+ If the changes are expected, you can use the uploaded artifact on the workflow to update the golden file on your branch.
+
+ 📟 **Alternatively, run the following command locally** to update the golden file:
+ ```sh
+ just regenerate-golden-file "${{ env.MODULE_DIR }}" "${{ env.S3_BUCKET_REGION }}" "${{ env.S3_BACKEND_BUCKET }}" "${{ env.S3_BUCKET_KEY }}"
+ ```
diff --git a/.github/workflows/aws_openshift_rosa_hcp_single_region_tests.yml b/.github/workflows/aws_openshift_rosa_hcp_single_region_tests.yml
new file mode 100644
index 000000000..31c573b0b
--- /dev/null
+++ b/.github/workflows/aws_openshift_rosa_hcp_single_region_tests.yml
@@ -0,0 +1,658 @@
+---
+name: Tests - Integration - AWS OpenShift ROSA HCP Single Region
+
+# description: This workflow perform integration tests against ROSA HCP platform
+
+on:
+ schedule:
+ - cron: 0 3 * * 1 # Runs at 3 AM on Monday
+ pull_request:
+ paths:
+ - .github/workflows/aws_openshift_rosa_hcp_single_region_tests.yml
+ - .github/workflows-config/aws-openshift-rosa-hcp-single-region/test_matrix.yml
+ - .tool-versions
+ - generic/kubernetes/single-region/**
+ - generic/openshift/single-region/**
+ - aws/openshift/rosa-hcp-single-region/**
+ - '!aws/openshift/rosa-hcp-single-region/test/golden/**'
+ - .github/actions/aws-openshift-rosa-hcp-single-region-create/**
+ - .github/actions/aws-openshift-rosa-hcp-single-region-cleanup/**
+
+ workflow_dispatch:
+ inputs:
+ cluster_name:
+ description: Cluster name.
+ required: false
+ type: string
+ delete_clusters:
+ description: Whether to delete the clusters.
+ type: boolean
+ default: true
+ enable_tests:
+ description: Whether to enable the tests.
+ type: boolean
+ default: true
+
+# limit to a single execution per actor of this workflow
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ # in case of renovate we don't cancel the previous run, so it can finish it
+ # otherwise weekly renovate PRs with tf docs updates result in broken clusters
+ cancel-in-progress: ${{ !contains('renovate[bot]', github.actor) }}
+
+env:
+ IS_SCHEDULE: ${{ contains(github.ref, 'refs/heads/schedule/') || github.event_name == 'schedule' && 'true' || 'false' }}
+
+ AWS_PROFILE: infex
+ AWS_REGION: eu-west-2
+ S3_BACKEND_BUCKET: tests-ra-aws-rosa-hcp-tf-state-eu-central-1
+ S3_BUCKET_REGION: eu-central-1
+
+ CLEANUP_CLUSTERS: ${{ github.event.inputs.delete_clusters || 'true' }}
+
+ # TEST VARIABLES
+
+ # Vars with "CI_" prefix are used in the CI workflow only.
+ CI_MATRIX_FILE: .github/workflows-config/aws-openshift-rosa-hcp-single-region/test_matrix.yml
+
+ # Docker Hub auth to avoid image pull rate limit.
+ # Vars with "TEST_" prefix are used in the test runner tool (Task).
+ TESTS_ENABLED: ${{ github.event.inputs.enable_tests || 'true' }}
+ TESTS_CAMUNDA_HELM_CHART_REPO_REF: main # git reference used to clone the camunda/camunda-platform-helm repository to perform the tests
+ TESTS_CAMUNDA_HELM_CHART_REPO_PATH: ./.camunda_helm_repo # where to clone it
+
+ # Components that are not enabled by default in the doc, but enabled in our tests to have a better coverage
+ WEBMODELER_ENABLED: 'true'
+ CONSOLE_ENABLED: 'true'
+
+ ROSA_CLI_VERSION: latest
+
+jobs:
+ clusters-info:
+ name: Define Matrix
+ runs-on: ubuntu-latest
+ outputs:
+ platform-matrix: ${{ steps.matrix.outputs.platform-matrix }}
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ fetch-depth: 0
+
+ - name: Install asdf tools with cache
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
+
+ - id: matrix
+ # we define a global matrix in an external file due to https://github.com/orgs/community/discussions/26284
+ run: |
+ set -euxo pipefail # tolerate, nothing.
+
+ # Generate cluster name.
+ # shellcheck disable=SC2086
+ distro_indexes="$(yq '.matrix.distro | to_entries | .[] | .key' ${CI_MATRIX_FILE})"
+
+ # Loop over clusters.
+ # Vars are exported to pass them to yq instead of local inline syntax.
+ # shellcheck disable=SC2086
+ for distro_index in ${distro_indexes}; do
+ cluster_name_input="${{ inputs.cluster_name }}"
+ cluster_name_fallback="hci-$(uuidgen | head -c 8)"
+ export cluster_name="${cluster_name_input:-${cluster_name_fallback}}"
+ export distro_index="${distro_index}"
+ yq -i '.matrix.distro[env(distro_index)].clusterName = env(cluster_name)' "${CI_MATRIX_FILE}"
+ done
+
+ echo "Filtering the matrix with strategy IS_SCHEDULE=$IS_SCHEDULE"
+ if [[ "$IS_SCHEDULE" == "true" ]]; then
+ # shellcheck disable=SC2086
+ platform_matrix="$(yq '.matrix |= (.distro |= map(select(.schedule_only == true)))' \
+ --indent=0 --output-format json ${CI_MATRIX_FILE})"
+ else
+ # shellcheck disable=SC2086
+ platform_matrix="$(yq '.matrix |= (.distro |= map(select(.schedule_only == null or .schedule_only == false)))' \
+ --indent=0 --output-format json ${CI_MATRIX_FILE})"
+ fi
+
+ platform_matrix="$(echo "$platform_matrix" | yq '.matrix' --indent=0 --output-format json)"
+ echo "${platform_matrix}" | jq
+ echo "platform-matrix=${platform_matrix}" > "$GITHUB_OUTPUT"
+
+ prepare-clusters:
+ name: Prepare clusters
+ needs:
+ - clusters-info
+ strategy:
+ fail-fast: false
+ matrix:
+ distro: ${{ fromJson(needs.clusters-info.outputs.platform-matrix).distro }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Install asdf tools with cache
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY;
+ secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY;
+ secret/data/products/infrastructure-experience/ci/common RH_OPENSHIFT_TOKEN;
+ secret/data/products/infrastructure-experience/ci/common CI_OPENSHIFT_MAIN_PASSWORD;
+ secret/data/products/infrastructure-experience/ci/common CI_OPENSHIFT_MAIN_USERNAME;
+
+ - name: Add profile credentials to ~/.aws/credentials
+ shell: bash
+ run: |
+ aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }}
+
+ - name: Set current Camunda version
+ id: camunda-version
+ run: |
+ CAMUNDA_VERSION=$(cat .camunda-version)
+ echo "CAMUNDA_VERSION=$CAMUNDA_VERSION" | tee -a "$GITHUB_OUTPUT"
+
+ # Also remove the versioning
+ - name: Create ROSA cluster and login
+ uses: ./.github/actions/aws-openshift-rosa-hcp-single-region-create
+ id: create_cluster
+ # Do not interrupt tests; otherwise, the Terraform state may become inconsistent.
+ if: always() && success()
+ with:
+ rh-token: ${{ steps.secrets.outputs.RH_OPENSHIFT_TOKEN }}
+ cluster-name: ${{ matrix.distro.clusterName }}
+ admin-username: ${{ steps.secrets.outputs.CI_OPENSHIFT_MAIN_USERNAME }}
+ admin-password: ${{ steps.secrets.outputs.CI_OPENSHIFT_MAIN_PASSWORD }}
+ aws-region: ${{ env.AWS_REGION }}
+ s3-backend-bucket: ${{ env.S3_BACKEND_BUCKET }}
+ s3-bucket-region: ${{ env.S3_BUCKET_REGION }}
+ s3-bucket-key-prefix: ${{ steps.camunda-version.outputs.CAMUNDA_VERSION }}/
+ openshift-version: ${{ matrix.distro.version }}
+ tf-modules-revision: ${{ github.ref }}
+
+ - name: Export kubeconfig and encrypt it # this is required to pass matrix outputs securely using artifacts
+ id: export_kube_config
+ run: |
+ # shellcheck disable=SC2005
+ echo "$(kubectl config view --raw)" > kubeconfig.yaml 2>/dev/null
+ openssl enc -aes-256-cbc -salt -in kubeconfig.yaml -out encrypted_kubeconfig.enc -pass pass:"${GITHUB_TOKEN}" -pbkdf2
+ encrypted_kubeconfig_base64=$(base64 -w 0 encrypted_kubeconfig.enc)
+ echo "kubeconfig_raw=${encrypted_kubeconfig_base64}" >> "$GITHUB_OUTPUT"
+
+ ## Write for matrix outputs workaround
+ - uses: cloudposse/github-action-matrix-outputs-write@ed06cf3a6bf23b8dce36d1cf0d63123885bb8375 # v1
+ id: out
+ with:
+ matrix-step-name: ${{ github.job }}
+ matrix-key: ${{ matrix.distro.name }}
+ outputs: |-
+ kubeconfig_raw: ${{ steps.export_kube_config.outputs.kubeconfig_raw }}
+
+ access-info:
+ name: Read kube configs from matrix
+ runs-on: ubuntu-latest
+ needs: prepare-clusters
+ outputs:
+ kubeconfig: ${{ steps.read-workflow.outputs.result }}
+ steps:
+ - uses: cloudposse/github-action-matrix-outputs-read@33cac12fa9282a7230a418d859b93fdbc4f27b5a # v1
+ id: read-workflow
+ with:
+ matrix-step-name: prepare-clusters
+
+ integration-tests:
+ name: Run integration tests - ${{ matrix.distro.name }}
+ runs-on: ubuntu-latest
+ needs:
+ - clusters-info
+ - access-info
+ strategy:
+ fail-fast: false
+ matrix:
+ distro: ${{ fromJson(needs.clusters-info.outputs.platform-matrix).distro }}
+ scenario: ${{ fromJson(needs.clusters-info.outputs.platform-matrix).scenario }}
+ env:
+ TEST_NAMESPACE: camunda # This namespace is hard-coded in the documentation
+ # https://github.com/camunda/camunda-platform-helm/blob/test/integration/scenarios/chart-full-setup/Taskfile.yaml#L12C15-L12C32
+ TEST_CLUSTER_TYPE: openshift
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+
+ - name: Install asdf tools with cache for the project
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6158b7c7534f6dbeb3fa7c3c836f6e6aa6881b2e # 1.3.0
+
+ - name: Install CLI tools from OpenShift Mirror
+ uses: redhat-actions/openshift-tools-installer@144527c7d98999f2652264c048c7a9bd103f8a82 # v1
+ with:
+ oc: ${{ matrix.distro.version }}
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common DOCKERHUB_USER;
+ secret/data/products/infrastructure-experience/ci/common DOCKERHUB_PASSWORD;
+ secret/data/products/infrastructure-experience/ci/common CI_CAMUNDA_USER_TEST_CLIENT_ID;
+ secret/data/products/infrastructure-experience/ci/common CI_CAMUNDA_USER_TEST_CLIENT_SECRET;
+
+ - name: 🔐 Login into the cluster
+ run: |
+ mkdir -p "$HOME/.kube"
+ echo "${{ fromJson(needs.access-info.outputs.kubeconfig).kubeconfig_raw[matrix.distro.name] }}" | base64 --decode > encrypted_kubeconfig.enc
+ openssl enc -aes-256-cbc -d -in encrypted_kubeconfig.enc -out "$HOME/.kube/config" -pass pass:"${GITHUB_TOKEN}" -pbkdf2
+ rm encrypted_kubeconfig.enc
+ chmod 600 "$HOME/.kube/config"
+
+ - name: 📁 Get a copy of the reference architecture
+ run: |
+ # run it as specified in the doc
+ set -euxo pipefail # tolerate, nothing.
+
+ ./aws/openshift/rosa-hcp-single-region/procedure/get-your-copy.sh
+ tree
+
+ - name: 🏗️ Prepare a fresh namespace for the tests
+ # we need to retry due as the cluster has just been created and the OIDC provider may not be available yet
+ uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3
+ with:
+ timeout_minutes: 10
+ max_attempts: 40
+ shell: bash
+ retry_wait_seconds: 15
+ command: |
+ set -o errexit # this is required https://github.com/nick-fields/retry/issues/133
+ set -o pipefail
+
+ source ./aws/openshift/rosa-hcp-single-region/procedure/gather-cluster-login-id.sh
+ echo "CLUSTER_NAME=$CLUSTER_NAME"
+ echo "CLUSTER_API_URL=$CLUSTER_API_URL"
+ echo "CLUSTER_ADMIN_USERNAME=$CLUSTER_ADMIN_USERNAME"
+
+ # Delete the namespace to ensure a fresh start
+ if kubectl get namespace "$TEST_NAMESPACE" &>/dev/null; then
+ kubectl delete namespace "$TEST_NAMESPACE" --wait
+ while kubectl get namespace "$TEST_NAMESPACE" &>/dev/null; do
+ echo "Namespace $TEST_NAMESPACE still being deleted, waiting..."
+ sleep 5
+ done
+ fi
+
+ kubectl create namespace "$TEST_NAMESPACE"
+
+ - name: 🛠️ Assemble deployment values of generic/openshift/single-region
+ # we need to retry due as the cluster has just been created and the OIDC provider may not be available yet
+ uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3
+ with:
+ timeout_minutes: 10
+ max_attempts: 40
+ shell: bash
+ retry_wait_seconds: 15
+ command: |
+ set -o errexit
+ set -euxo pipefail # tolerate, nothing.
+
+ # As this action can be retried due to OpenShift API error, it must
+ # be stateless (all commmands can be rerun without issue)
+
+ echo "Construct the values.yml file"
+
+ cp -f generic/openshift/single-region/helm-values/base.yml ./values.yml
+
+ source ./generic/openshift/single-region/procedure/setup-application-domain.sh
+ echo "CAMUNDA_DOMAIN=$DOMAIN_NAME" | tee -a "$GITHUB_ENV"
+
+ source ./generic/openshift/single-region/procedure/get-ingress-http2-status.sh
+
+ ./generic/openshift/single-region/procedure/enable-ingress-http2.sh
+
+ # Enable Routes
+ for file in core-route.yml connectors-route.yml domain.yml; do
+ yq ". *d load(\"generic/openshift/single-region/helm-values/$file\")" values.yml > values-result.yml
+ cat values-result.yml && mv values-result.yml values.yml
+ done
+
+ # Enable SCC
+ yq '. *d load("generic/openshift/single-region/helm-values/scc.yml")' values.yml > values-result.yml
+ cat values-result.yml && mv values-result.yml values.yml
+
+ if [ "$WEBMODELER_ENABLED" == "true" ]; then
+ echo "Enabling WebModeler"
+ yq -i '.webModeler.enabled = true' values.yml
+ yq -i '.webModelerPostgresql.enabled = true' values.yml
+ fi
+
+ if [ "$CONSOLE_ENABLED" == "true" ]; then
+ echo "Enabling Console"
+ yq -i '.console.enabled = true' values.yml
+ fi
+
+ # Add integration tests values
+ if [ "$TESTS_ENABLED" == "true" ]; then
+ for file in registry.yml identity.yml; do
+ yq ". *d load(\"generic/kubernetes/single-region/tests/helm-values/$file\")" values.yml > values-result.yml
+ cat values-result.yml && mv values-result.yml values.yml
+ done
+ fi
+
+ ./generic/openshift/single-region/procedure/assemble-envsubst-values.sh
+
+
+ - name: 🏁 Install Camunda 8 using the generic/openshift helm chart procedure
+ run: |
+ set -euxo pipefail # tolerate, nothing.
+
+ source generic/openshift/single-region/procedure/chart-env.sh
+ source generic/openshift/single-region/procedure/generate-passwords.sh
+
+ ./generic/openshift/single-region/procedure/create-identity-secret.sh
+
+ # Generate tests objects
+ if [ "$TESTS_ENABLED" == "true" ]; then
+ # Create the pull secrets described in generic/kubernetes/single-region/tests/helm-values/registry.yml
+ kubectl create secret docker-registry index-docker-io \
+ --docker-server=index.docker.io \
+ --docker-username="${{ steps.secrets.outputs.DOCKERHUB_USER }}" \
+ --docker-password="${{ steps.secrets.outputs.DOCKERHUB_PASSWORD }}" \
+ --namespace="$TEST_NAMESPACE"
+
+ kubectl create secret generic identity-secret-for-components-integration \
+ --from-literal=identity-admin-client-id="${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_ID }}" \
+ --from-literal=identity-admin-client-secret="${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_SECRET }}" \
+ --namespace="$TEST_NAMESPACE"
+ fi
+
+ ./generic/openshift/single-region/procedure/install-chart.sh
+
+ - name: 👀⏳ Wait for the deployment to be healthy using generic/kubernetes/single-region
+ timeout-minutes: 10
+ run: |
+ ./generic/kubernetes/single-region/procedure/check-deployment-ready.sh
+
+ - name: 🧙♂️ Clone camunda/camunda-platform-helm
+ if: env.TESTS_ENABLED == 'true'
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ repository: camunda/camunda-platform-helm
+ ref: ${{ env.TESTS_CAMUNDA_HELM_CHART_REPO_REF }}
+ path: ${{ env.TESTS_CAMUNDA_HELM_CHART_REPO_PATH }}
+ fetch-depth: 0
+
+ - name: 🧪 TESTS - Set variables
+ if: env.TESTS_ENABLED == 'true'
+ run: |
+ set -euxo pipefail # tolerate, nothing.
+
+ CAMUNDA_VERSION=$(cat .camunda-version)
+
+ export TEST_CHART_DIR_STATIC="$TESTS_CAMUNDA_HELM_CHART_REPO_PATH/charts/camunda-platform-$CAMUNDA_VERSION"
+ echo "TEST_CHART_DIR_STATIC=$TEST_CHART_DIR_STATIC" | tee -a "$GITHUB_ENV"
+
+ TEST_INGRESS_HOST="$CAMUNDA_DOMAIN"
+ echo "TEST_INGRESS_HOST=$TEST_INGRESS_HOST" | tee -a "$GITHUB_ENV"
+
+ # shellcheck disable=SC2002
+ TEST_CHART_VERSION=$(cat "$TEST_CHART_DIR_STATIC/Chart.yaml" | yq '.version')
+ echo "TEST_CHART_VERSION=$TEST_CHART_VERSION" | tee -a "$GITHUB_ENV"
+
+ # setup docker registry secret for tests
+ echo "TEST_CREATE_DOCKER_LOGIN_SECRET=true" | tee -a "$GITHUB_ENV"
+ echo "TEST_DOCKER_USERNAME_CAMUNDA_CLOUD=${{ steps.secrets.outputs.DOCKERHUB_USERNAME }}" | tee -a "$GITHUB_ENV"
+ echo "TEST_DOCKER_PASSWORD_CAMUNDA_CLOUD=${{ steps.secrets.outputs.DOCKERHUB_PASSWORD }}" | tee -a "$GITHUB_ENV"
+
+ CI_TASKS_BASE_DIR="$TESTS_CAMUNDA_HELM_CHART_REPO_PATH/test/integration/scenarios/"
+ echo "CI_TASKS_BASE_DIR=$CI_TASKS_BASE_DIR" | tee -a "$GITHUB_ENV"
+ export TEST_CHART_DIR="../../../../charts/camunda-platform-$CAMUNDA_VERSION"
+ echo "TEST_CHART_DIR=$TEST_CHART_DIR" | tee -a "$GITHUB_ENV"
+ export TEST_VALUES_BASE_DIR="$TESTS_CAMUNDA_HELM_CHART_REPO_PATH/test/integration/scenarios"
+ echo "TEST_VALUES_BASE_DIR=$TEST_VALUES_BASE_DIR" | tee -a "$GITHUB_ENV"
+
+ # replace integration with the camunda release name as part of adaptation required to run the tests in our environment
+ find "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/" -type f -print0 | xargs -0 sed -i 's/integration/camunda/g'
+
+ echo "Configure Venom tests"
+ # (adapted from https://github.com/camunda/camunda-platform-helm/blob/test/integration/scenarios/chart-full-setup/Taskfile.yaml#L56)
+ export VARIABLES_ENV_FILE="$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/variables.env"
+ # Enable the ingress tests
+ # shellcheck disable=SC2129
+ echo "VENOM_VAR_TEST_INGRESS_HOST=$TEST_INGRESS_HOST" >> "$VARIABLES_ENV_FILE"
+ echo "VENOM_VAR_TEST_CLIENT_ID=${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_ID }}" >> "$VARIABLES_ENV_FILE"
+ echo "VENOM_VAR_TEST_CLIENT_SECRET=${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_SECRET }}" >> "$VARIABLES_ENV_FILE"
+ echo "VENOM_EXTRA_ARGS=--var-from-file=./vars/variables-ingress-combined.yaml" >> "$VARIABLES_ENV_FILE"
+ ZEEBE_VERSION=$(yq '.core.image.tag' "$TEST_CHART_DIR_STATIC/values.yaml")
+ # shellcheck disable=SC2129
+ echo "ZEEBE_VERSION=$ZEEBE_VERSION" >> "$VARIABLES_ENV_FILE"
+ # In case the Zeebe version has not been released officially yet.
+ echo "ZEEBE_VERSION_FALLBACK=8.5.6" >> "$VARIABLES_ENV_FILE"
+
+ # Some variables are not working correctly, patching it with yq directly
+ echo "VENOM_VAR_SKIP_TEST_INGRESS=true" >> "$VARIABLES_ENV_FILE"
+
+ echo "Patch the test files..."
+
+ # TODO: [BUG] remove the patchs when https://github.com/camunda/camunda-platform-helm/issues/3081 is fixed
+ echo "Patch expression ShoudBeFalse"
+ sed -i "s/ ShouldBeFalse/ ShouldEqual 'false'/g" \
+ "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-core.yaml" \
+ "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-preflight.yaml"
+
+ echo "Skip test ingress is also broken, fixing it"
+ yq eval '(.testcases[].steps[].skip |= map(select(test("skiptestingress", "i") | not)))' \
+ -i "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-core.yaml"
+ yq eval 'del(.. | select(has("skip") and .skip | length == 0).skip)' \
+ -i "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-core.yaml"
+
+ if [ "$WEBMODELER_ENABLED" != "true" ]; then
+ echo "Disable Webmodeler in the core tests as it's not enabled"
+
+ echo "VENOM_VAR_SKIP_TEST_WEBMODELER=false" >> "$VARIABLES_ENV_FILE"
+ yq eval 'del(.. | select(has("component") and .component == "WebModeler"))' \
+ -i "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-core.yaml"
+ fi
+
+ if [ "$CONSOLE_ENABLED" != "true" ]; then
+ echo "Disable Console in the core tests as it's not enabled"
+
+ yq eval 'del(.. | select(has("component") and .component == "Console"))' \
+ -i "$TEST_CHART_DIR_STATIC/test/integration/testsuites/vars/files/testsuite-core.yaml"
+ fi
+
+ # remove venom var client secret as we define it in the file
+ yq e 'del(.spec.template.spec.containers[].env[] | select(.name == "VENOM_VAR_TEST_CLIENT_SECRET"))' \
+ -i "$TEST_CHART_DIR_STATIC/test/integration/testsuites/core/patches/job.yaml"
+
+ echo "Ensure asdf tool is available in the test suite by using our global one"
+ cp .tool-versions "$TEST_VALUES_BASE_DIR"
+
+ - name: 🧪 TESTS - Run Preflight TestSuite
+ if: env.TESTS_ENABLED == 'true'
+ timeout-minutes: 10
+ run: |
+ task -d "${CI_TASKS_BASE_DIR}/chart-full-setup" test.preflight
+
+ - name: 🧪 TESTS - Run Core TestSuite
+ if: env.TESTS_ENABLED == 'true'
+ timeout-minutes: 20
+ run: |
+ task -d "${CI_TASKS_BASE_DIR}/chart-full-setup" test.core
+
+ - name: 🧪 TESTS - Run additional tests
+ if: env.TESTS_ENABLED == 'true'
+ timeout-minutes: 20
+ run: |
+ set -euxo pipefail # tolerate, nothing.
+
+ echo "Show zeebe cluster topology using generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology.sh:"
+ export ZEEBE_CLIENT_ID="${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_ID }}"
+ export ZEEBE_CLIENT_SECRET="${{ steps.secrets.outputs.CI_CAMUNDA_USER_TEST_CLIENT_SECRET }}"
+ export DOMAIN_NAME="$CAMUNDA_DOMAIN"
+
+ # Execute the script and capture the output in a variable
+ check_zeebe_topology_output=$(./generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology.sh)
+
+ echo "$check_zeebe_topology_output" | jq
+
+ # Checks
+ error_found=false
+ check_zeebe_topology_all_healthy=$(echo "$check_zeebe_topology_output" | jq '[.brokers[].partitions[].health == "healthy"] | all')
+ check_zeebe_topology_cluster_size=$(echo "$check_zeebe_topology_output" | jq '.clusterSize')
+ check_zeebe_topology_partitions_count=$(echo "$check_zeebe_topology_output" | jq '.partitionsCount')
+
+ if [ "$check_zeebe_topology_all_healthy" = "true" ]; then
+ echo "✅ All partitions are healthy."
+ else
+ echo "❌ Not all partitions are healthy"
+ error_found=true
+ fi
+
+ if [ "$check_zeebe_topology_cluster_size" -eq 3 ]; then
+ echo "✅ Cluster size is 3."
+ else
+ echo "❌ Cluster size is not 3."
+ error_found=true
+ fi
+
+ if [ "$check_zeebe_topology_partitions_count" -eq 3 ]; then
+ echo "✅ Partitions count is 3."
+ else
+ echo "❌ Partitions count is not 3."
+ error_found=true
+ fi
+
+ echo "Comparing golden file of the zeebe topology output..."
+
+ reference_file="./generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology-output.json"
+ # Save the output to a temporary file
+ temp_output=$(mktemp)
+ echo "$check_zeebe_topology_output" > "$temp_output"
+
+ # Replace patch version
+ yq e '.brokers[].version |= sub("[.][0-9]+$", ".z") | .gatewayVersion |= sub("[.][0-9]+$", ".z")' -i "$temp_output"
+ yq e '.brokers[].version |= sub("[.][0-9]+$", ".z") | .gatewayVersion |= sub("[.][0-9]+$", ".z")' -i "$reference_file"
+
+ # Order each file also remove not predictable fields
+ yq e '.brokers |= sort_by(.host) | .brokers[] |= (.partitions |= sort_by(.partitionId) | .partitions[].role = "NOT_PREDICTABLE")' -i "$temp_output"
+ yq e '.brokers |= sort_by(.host) | .brokers[] |= (.partitions |= sort_by(.partitionId) | .partitions[].role = "NOT_PREDICTABLE")' -i "$reference_file"
+
+ # Compare the two files using diff (in compacted JSON format)
+ diff_output=$(delta <(jq -S . "$temp_output") <(jq -S . "$reference_file"))
+
+ if [[ -n "$diff_output" ]]; then
+ # If differences are found, print the error and the diff
+ echo "❌ Error: The golden files of zeebe topology files do not match."
+ echo "Differences found:"
+ echo "$diff_output"
+
+ # Display the new generated version
+ echo "New version:"
+ cat "$temp_output"
+
+ error_found=true
+ fi
+
+ if [ "$error_found" = true ]; then
+ echo "❌ Some tests failed."
+ exit 1
+ fi
+ echo "✅ The cluster meets all the expected criteria."
+
+ - name: 🔬🚨 Get failed Pods info
+ if: failure()
+ uses: camunda/camunda-platform-helm/./.github/actions/failed-pods-info@52f7c04dc9817a2f8a5b7b1c5450a80a8a6996ae # main
+
+ cleanup-clusters:
+ name: Cleanup ROSA clusters
+ if: always()
+ runs-on: ubuntu-latest
+ needs:
+ - clusters-info
+ - integration-tests
+ strategy:
+ fail-fast: false
+ matrix:
+ distro: ${{ fromJson(needs.clusters-info.outputs.platform-matrix).distro }}
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ if: env.CLEANUP_CLUSTERS == 'true'
+ with:
+ fetch-depth: 0
+
+ - name: Install asdf tools with cache
+ if: env.CLEANUP_CLUSTERS == 'true'
+ uses: camunda/infraex-common-config/./.github/actions/asdf-install-tooling@6dc218bf7ee3812a4b6b13c305bce60d5d1d46e5 # 1.3.1
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@7709c609789c5e27b757a85817483caadbb5939a # v3
+ if: env.CLEANUP_CLUSTERS == 'true'
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY;
+ secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY;
+ secret/data/products/infrastructure-experience/ci/common RH_OPENSHIFT_TOKEN;
+
+ - name: Add profile credentials to ~/.aws/credentials
+ shell: bash
+ if: env.CLEANUP_CLUSTERS == 'true'
+ run: |
+ aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }}
+ aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }}
+
+ - name: Set current Camunda version
+ id: camunda-version
+ if: env.CLEANUP_CLUSTERS == 'true'
+ run: |
+ CAMUNDA_VERSION=$(cat .camunda-version)
+ echo "CAMUNDA_VERSION=$CAMUNDA_VERSION" | tee -a "$GITHUB_OUTPUT"
+
+ - name: Delete on-demand ROSA HCP Cluster
+ uses: ./.github/actions/aws-openshift-rosa-hcp-single-region-cleanup
+ if: always() && env.CLEANUP_CLUSTERS == 'true'
+ timeout-minutes: 125
+ env:
+ RHCS_TOKEN: ${{ steps.secrets.outputs.RH_OPENSHIFT_TOKEN }}
+ with:
+ tf-bucket: ${{ env.S3_BACKEND_BUCKET }}
+ tf-bucket-region: ${{ env.S3_BUCKET_REGION }}
+ max-age-hours-cluster: 0
+ target: ${{ matrix.distro.clusterName }}
+ tf-bucket-key-prefix: ${{ steps.camunda-version.outputs.CAMUNDA_VERSION }}/
+
+ report:
+ name: Report failures
+ if: failure()
+ runs-on: ubuntu-latest
+ needs:
+ - integration-tests
+ - cleanup-clusters
+ steps:
+ - name: Notify in Slack in case of failure
+ id: slack-notification
+ if: ${{ env.IS_SCHEDULE == 'true' }}
+ uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@e9a9f33ab193348a82a79bd9250fdf12f708390a # 1.2.19
+ with:
+ vault_addr: ${{ secrets.VAULT_ADDR }}
+ vault_role_id: ${{ secrets.VAULT_ROLE_ID }}
+ vault_secret_id: ${{ secrets.VAULT_SECRET_ID }}
diff --git a/.github/workflows/internal_global_links.yml b/.github/workflows/internal_global_links.yml
new file mode 100644
index 000000000..76827276a
--- /dev/null
+++ b/.github/workflows/internal_global_links.yml
@@ -0,0 +1,59 @@
+---
+name: Internal - Global - Check external links
+
+on:
+ push:
+ workflow_dispatch:
+ schedule:
+ - cron: 0 3 1 * *
+ pull_request:
+ paths:
+ - .github/workflows/internal_global_links.yml
+
+jobs:
+ lint:
+ name: links-check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+
+ - name: Get Current Date
+ id: dateofday
+ run: echo "DATEOFDAY=$(date +%Y-%m-%d)" >> "$GITHUB_ENV"
+
+ - name: Restore lychee cache
+ uses: actions/cache/restore@d4323d4df104b026a6aa633fdb11d772146be0bf # v4
+ with:
+ path: .lycheecache
+ key: cache-lychee-${{ env.DATEOFDAY }}
+
+ - name: Link Checker
+ uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 # v2.3.0
+ with:
+ fail: true
+ args: -c ./lychee-links.toml --base . --cache --max-cache-age 1d . --verbose --no-progress '*.md' './**/*.md'
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Cache links
+ uses: actions/cache/save@d4323d4df104b026a6aa633fdb11d772146be0bf # v4
+ with:
+ path: .lycheecache
+ key: cache-lychee-${{ env.DATEOFDAY }}
+
+ - name: Create Issue From File
+ if: failure() && github.event_name == 'schedule'
+ uses: peter-evans/create-issue-from-file@e8ef132d6df98ed982188e460ebb3b5d4ef3a9cd # v5
+ with:
+ title: Link Checker Report
+ content-filepath: ./lychee/out.md
+ labels: report, automated issue
+ assignees: '@camunda/infraex-medic'
+
+ - name: Notify in Slack in case of failure
+ id: slack-notification
+ if: failure() && github.event_name == 'schedule'
+ uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@e9a9f33ab193348a82a79bd9250fdf12f708390a # 1.2.19
+ with:
+ vault_addr: ${{ secrets.VAULT_ADDR }}
+ vault_role_id: ${{ secrets.VAULT_ROLE_ID }}
+ vault_secret_id: ${{ secrets.VAULT_SECRET_ID }}
diff --git a/.github/workflows/lint.yml b/.github/workflows/internal_global_lint.yml
similarity index 89%
rename from .github/workflows/lint.yml
rename to .github/workflows/internal_global_lint.yml
index 0f0b758d0..332b3ff41 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/internal_global_lint.yml
@@ -1,5 +1,5 @@
---
-name: lint
+name: Internal - Global - Lint
on:
workflow_dispatch:
diff --git a/.github/workflows/renovate-automerge.yml b/.github/workflows/internal_global_renovate_automerge.yml
similarity index 88%
rename from .github/workflows/renovate-automerge.yml
rename to .github/workflows/internal_global_renovate_automerge.yml
index cfc68126a..08912765e 100644
--- a/.github/workflows/renovate-automerge.yml
+++ b/.github/workflows/internal_global_renovate_automerge.yml
@@ -1,5 +1,5 @@
---
-name: Renovate Auto Merge
+name: Internal - Global - Renovate Auto Merge
on:
pull_request:
diff --git a/.github/workflows/internal_openshift_artifact_rosa_versions.yml b/.github/workflows/internal_openshift_artifact_rosa_versions.yml
new file mode 100644
index 000000000..27ee6e73a
--- /dev/null
+++ b/.github/workflows/internal_openshift_artifact_rosa_versions.yml
@@ -0,0 +1,68 @@
+---
+# This workflow updates an artifact containing the ROSA versions,
+# it's used by renovate and published at https://camunda.github.io/camunda-deployment-references/rosa_versions.txt
+name: Internal - OpenShift - Save ROSA Versions as an artifact
+
+on:
+ schedule:
+ - cron: 0 0 * * *
+ workflow_dispatch:
+ pull_request:
+ paths:
+ - .github/workflows/internal_openshift_artifact_rosa_versions.yml
+
+jobs:
+ save-rosa-versions:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
+ with:
+ ref: gh-pages
+
+ - name: Import Secrets
+ id: secrets
+ uses: hashicorp/vault-action@a1b77a09293a4366e48a5067a86692ac6e94fdc0 # v3
+ with:
+ url: ${{ secrets.VAULT_ADDR }}
+ method: approle
+ roleId: ${{ secrets.VAULT_ROLE_ID }}
+ secretId: ${{ secrets.VAULT_SECRET_ID }}
+ exportEnv: false
+ secrets: |
+ secret/data/products/infrastructure-experience/ci/common RH_OPENSHIFT_TOKEN;
+
+ - name: Install ROSA CLI and output rosa versions
+ shell: bash
+ run: |
+ curl -LO "https://mirror.openshift.com/pub/openshift-v4/clients/rosa/latest/rosa-linux.tar.gz"
+ tar -xvf rosa-linux.tar.gz
+ sudo mv rosa /usr/local/bin/rosa
+ chmod +x /usr/local/bin/rosa
+ rm -f rosa-linux.tar.gz
+ rosa version
+ rosa login --token=${{ steps.secrets.outputs.RH_OPENSHIFT_TOKEN }}
+ mkdir -p docs
+ rosa list versions --output json | jq '.[].raw_id' --raw-output > docs/rosa_versions.txt
+
+ - name: Commit and push ROSA versions file to gh-pages
+ shell: bash
+ run: |
+ git diff --exit-code docs/rosa_versions.txt || {
+ git config --local user.name "github-actions[bot]"
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
+ git add docs/rosa_versions.txt
+ git commit -m "Update ROSA versions"
+ git push origin gh-pages
+ }
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Notify in Slack in case of failure
+ id: slack-notification
+ if: failure() && github.event_name == 'schedule'
+ uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@e9a9f33ab193348a82a79bd9250fdf12f708390a # 1.2.19
+ with:
+ vault_addr: ${{ secrets.VAULT_ADDR }}
+ vault_role_id: ${{ secrets.VAULT_ROLE_ID }}
+ vault_secret_id: ${{ secrets.VAULT_SECRET_ID }}
diff --git a/.gitignore b/.gitignore
index 4cb3b8d86..79f7cd0fc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -58,6 +58,8 @@ Temporary Items
*.tfstate.*
.terraform.lock.hcl
*.plan
+tfplan.json
+tfplan-redacted.json
# Crash log files
crash.log
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 25fc296a0..ab7e4b0e8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -64,7 +64,7 @@ repos:
rev: v0.9.0.6
hooks:
- id: shellcheck
- args: [--external-sources, --source-path, .github/workflows/scripts, '--exclude=SC2154,SC2034,SC1091']
+ args: [--external-sources]
- repo: https://github.com/adrienverge/yamllint
rev: v1.35.1
diff --git a/.tool-versions b/.tool-versions
index 28ec43f8c..1c80e4724 100644
--- a/.tool-versions
+++ b/.tool-versions
@@ -28,14 +28,22 @@ jq 1.7.1
just 1.39.0
+helm 3.17.1
+
kubectl 1.32.2
+kustomize 5.6.0 # used by the tests
+
pre-commit 4.1.0
python 3.13.2
shellcheck 0.10.0
+task 3.30.1 # used by the tests
+
+task 3.30.1 # used by the tests
+
terraform 1.11.1
terraform-docs 0.19.0
@@ -45,5 +53,3 @@ tflint 0.55.1
trivy 0.60.0
yq 4.45.1
-
-zbctl 8.5.8
diff --git a/DEVELOPER.md b/DEVELOPER.md
new file mode 100644
index 000000000..773f87843
--- /dev/null
+++ b/DEVELOPER.md
@@ -0,0 +1,43 @@
+# Developer Documentation
+
+## Workflow Naming Convention
+
+Our workflows follow a standardized naming convention to ensure clarity and consistency across internal and external processes.
+
+### Internal Workflows
+All internal workflows are prefixed with `internal_` followed by:
+1. **Scope**: Either `global`, `openshift`, or any related component name.
+2. **Workflow Purpose**: A description of the workflow's function.
+
+#### Examples:
+- `internal_global_lint.yml`: Linting workflow for global scope.
+- `internal_openshift_lint.yml`: Linting workflow for OpenShift scope.
+- `internal_openshift_artifact_rosa_versions.yml`: Workflow for managing ROSA artifact versions in OpenShift.
+
+### Test Workflows
+For architecture reference tests, the naming follows the folder structure where the tests reside.
+
+#### Example:
+For a test located in `aws/openshift/rosa-hcp-single-region`, the corresponding workflow file is named:
+```
+aws_openshift_rosa_hcp_single_region_tests.yml
+```
+
+## Standardized Workflow Naming
+Inside each workflow file, the `name` field is also standardized to maintain uniformity.
+
+#### Examples:
+- **Linting Workflow:**
+ ```yaml
+ name: Internal - Global - Lint
+ ```
+- **Integration Test Workflow:**
+ ```yaml
+ name: Tests - Integration - AWS OpenShift ROSA HCP Single Region
+ ```
+- **Daily Cleanup Workflow:**
+ ```yaml
+ name: Tests - Daily Cleanup - AWS OpenShift ROSA HCP Single Region
+ ```
+
+By following these conventions, we ensure a clear and structured approach to workflow management, making it easier to understand, maintain, and scale our CI/CD pipelines.
diff --git a/MAINTENANCE.md b/MAINTENANCE.md
new file mode 100644
index 000000000..21aeef046
--- /dev/null
+++ b/MAINTENANCE.md
@@ -0,0 +1,42 @@
+# Maintenance of this repository
+
+TODO: write the complete maintenance guide (https://github.com/camunda/camunda-deployment-references/issues/110)
+
+## Branching Strategy for camunda-deployment-references
+
+The repository [https://github.com/camunda/camunda-deployment-references](https://github.com/camunda/camunda-deployment-references) follows the logic of maintaining only the [next unreleased version of Camunda](https://docs.camunda.io/docs/8.7/reference/release-notes/) on the `main` branch.
+
+=> Most of the time, we work on the next unreleased version, we should then merge into `main`.
+
+For example, consider the following branches:
+
+- `main/`
+- `stable/8.6`
+- `stable/8.5`
+
+Where `8.6` is the latest stable version and `8.7` is the next one. The branch to target for merge requests should be `main` since it represents the upcoming version.
+
+When `8.7` becomes the new stable version, we create the `stable/8.7` branch from `main` and then `main` will be used for the next unrelease version (`8.8`).
+
+## Release duty
+
+When a new version is ready for release, we need to cut the `main` branch to create a new stable branch (`stable/8.x`). Follow these steps:
+
+0. Update the value of the release in `.camunda-version`
+0. Add all the schedules for the version in `.github/workflows-config/workflow-scheduler.yml`
+1. **Create the stable branch**
+ - From `main`, create a new branch `stable/8.x`.
+ - Example: If the current stable version is `8.6` and we are preparing to release `8.7`, run:
+ ```sh
+ git checkout main
+ git checkout -b stable/8.7
+ git push origin stable/8.7
+ ```
+
+1. **Ensure all release tasks are completed**
+ - Resolve all `TODO [release-duty]` items in the codebase.
+ - Verify that documentation, configurations, and dependencies are up to date.
+
+1. **Prepare `main` for the next version**
+ - The `main` branch now represents the next unreleased version (`8.8`).
+ - Update version references in relevant files to reflect the new development cycle.
diff --git a/README.md b/README.md
index eb1766f8a..3aea521d8 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,75 @@ For more details, refer to the official [Camunda Reference Architecture document
## Structure
-The repository is structured into cloud providers (`aws`, `azure`, `general`) and internal-only reusable modules (`modules`).
+The repository is organized into different cloud providers (`aws`, `azure`, `general`) and internal reusable modules (`modules`) that are associated with each cloud provider.
+
+### Naming Convention
+
+The directory structure follows a standardized naming pattern:
+
+```
+- {cloud_provider}
+ - modules
+ - {category}
+ - {solution}-{declination}-{feature}
+```
+
+Where:
+- `{cloud_provider}`: The cloud provider (`aws`, `azure`, `generic`).
+- `{category}`: The type of service or technology (e.g., `kubernetes`, `compute`).
+- `{solution}`: The specific solution, such as `eks` (Amazon EKS), `gke` (Google Kubernetes Engine), or `ec2` (Amazon EC2).
+- `{declination}`: A variation of the solution, such as:
+ - `spot-instances` (for EC2 cost optimization).
+ - `on-demand` (for standard EC2 instances).
+- `{feature}`: A specific feature or deployment model, particularly in relation to **Camunda 8**, such as:
+ - `single-region` (deployment in a single region).
+ - `dual-region` (high availability across two regions).
+
+### Modules
+
+The `modules` directory is tied to specific cloud providers. Each cloud provider may include reusable modules that can be utilized across multiple solutions within that cloud environment.
+
+### Example Structure
+
+For AWS Kubernetes and EC2 solutions:
+
+```
+- aws
+ - kubernetes
+ - eks-single-region
+ - eks-single-region-spot-instances
+ - eks-dual-region
+ - eks-dual-region-karpenter
+ - compute
+ - ec2-single-region
+ - ec2-single-region-spot-instances
+ - modules
+ - networking
+ - monitoring
+```
+
+## Requirements
+
+To manage the specific versions of this project, we use the following tools:
+
+- **[asdf](https://asdf-vm.com/)** version manager (see the [installation guide](https://asdf-vm.com/guide/getting-started.html)).
+- **[just](https://github.com/casey/just)** as a command runner
+ You can install it using asdf with the following commands:
+ ```bash
+ asdf plugin add just
+ asdf install just
+ ```
+
+### Installing Tooling
+
+Once these tools are installed, you can set up the necessary tooling listed in the `.tool-versions` file located at the root of the project by running the following:
+
+```bash
+just install-tooling
+
+# To list all available recipes:
+just --list
+```
## Support
diff --git a/aws/ec2/scripts/.shellcheckrc b/aws/ec2/scripts/.shellcheckrc
new file mode 100644
index 000000000..5806607be
--- /dev/null
+++ b/aws/ec2/scripts/.shellcheckrc
@@ -0,0 +1 @@
+disable=SC2154,SC1091,SC2034
diff --git a/aws/modules/rosa-hcp/README.md b/aws/modules/rosa-hcp/README.md
new file mode 100644
index 000000000..7a90ca995
--- /dev/null
+++ b/aws/modules/rosa-hcp/README.md
@@ -0,0 +1,124 @@
+# rosa-hcp
+
+This module automates the creation of a ROSA HCP cluster with an opinionated configuration targeting Camunda 8 on AWS using Terraform.
+
+## Requirements
+
+Requirements not installed by asdf:
+
+* ROSA CLI ([installation guide](https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-installing-rosa.html))
+* OpenShift CLI ([installation guide](https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html))
+
+
+### Enable ROSA in AWS Marketplace
+
+1. Login onto AWS
+2. Check if ELB role exists
+```bash
+# To check if the role exists for your account, run this command in your terminal:
+aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing"
+
+# If the role doesn't exist, create it by running the following command:
+aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com"
+
+```
+3. Login onto [Red Hat Hybrid Cloud Console](https://console.redhat.com/openshift/token)
+4. Generate an Offline token, click on "Load Token"
+```bash
+export RHCS_TOKEN=yourToken
+rosa login --token="$RHCS_TOKEN"
+
+rosa whoami
+
+rosa verify quota --region="$AWS_REGION"
+
+# this may fail due to org policy
+rosa verify permissions --region="$AWS_REGION"
+
+rosa create account-roles --mode auto
+```
+5. Enable HCP ROSA on [AWS MarkePlace](https://docs.openshift.com/rosa/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.html)
+ * Navigate to the ROSA console : https://console.aws.amazon.com/rosa
+ * Choose Get started.
+ * On the Verify ROSA prerequisites page, select I agree to share my contact information with Red Hat.
+ * Choose Enable ROSA
+
+Please note that **Only a single AWS account that will be used for service billing can be associated with a Red Hat account.**
+
+Base tutorial https://aws.amazon.com/blogs/containers/build-rosa-clusters-with-terraform/
+
+## Retrieve cluster informations
+
+1. In the output, you will have the created cluster id:
+```bash
+cluster_id = "2b3sq2r4geb7b6htaibb4uqk9qc9c3fa"
+```
+2. Describe the cluster
+```bash
+export CLUSTER_ID="2b3sq2r4geb7b6htaibb4uqk9qc9c3fa"
+
+rosa describe cluster --output=json -c $CLUSTER_ID
+```
+3. Generate the kubeconfig:
+```bash
+export NAMESPACE="myNs"
+export SERVER_API=$(rosa describe cluster --output=json -c "$CLUSTER_ID" | jq -r '.api.url')
+oc login --username "$ADMIN_USER" --password "$ADMIN_PASS" --server=$SERVER_API
+
+kubectl config rename-context $(oc config current-context) "$CLUSTER_NAME"
+kubectl config use "$CLUSTER_NAME"
+
+# create a new project
+oc new-project "$NAMESPACE"
+```
+
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [htpasswd\_idp](#module\_htpasswd\_idp) | terraform-redhat/rosa-hcp/rhcs//modules/idp | 1.6.5 |
+| [rosa\_hcp](#module\_rosa\_hcp) | terraform-redhat/rosa-hcp/rhcs | 1.6.5 |
+| [vpc](#module\_vpc) | terraform-redhat/rosa-hcp/rhcs//modules/vpc | 1.6.5 |
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eips.current_usage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eips) | data source |
+| [aws_servicequotas_service_quota.elastic_ip_quota](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/servicequotas_service_quota) | data source |
+| [aws_vpcs.current_vpcs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpcs) | data source |
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [availability\_zones](#input\_availability\_zones) | A list of availability zone names in the region. By default, this is set to `null` and is not used; instead, `availability_zones_count` manages the number of availability zones. This value should not be updated directly. To make changes, please create a new resource. | `list(string)` | `null` | no |
+| [availability\_zones\_count](#input\_availability\_zones\_count) | The count of availability (minimum 2) zones to utilize within the specified AWS Region, where pairs of public and private subnets will be generated. Valid only when availability\_zones variable is not provided. This value should not be updated, please create a new resource instead. | `number` | `2` | no |
+| [aws\_availability\_zones](#input\_aws\_availability\_zones) | The AWS availability zones where instances of the default worker machine pool are deployed. Leave empty for the installer to pick availability zones from the VPC `availability_zones` or `availability_zones_count` | `list(string)` | `[]` | no |
+| [cluster\_name](#input\_cluster\_name) | The name of the ROSA cluster to create | `string` | `"my-ocp-cluster"` | no |
+| [compute\_node\_instance\_type](#input\_compute\_node\_instance\_type) | The EC2 instance type to use for compute nodes | `string` | `"m7i.xlarge"` | no |
+| [host\_prefix](#input\_host\_prefix) | The subnet mask to assign to each compute node in the cluster | `string` | `"23"` | no |
+| [htpasswd\_password](#input\_htpasswd\_password) | htpasswd password | `string` | n/a | yes |
+| [htpasswd\_username](#input\_htpasswd\_username) | htpasswd username | `string` | `"kubeadmin"` | no |
+| [machine\_cidr\_block](#input\_machine\_cidr\_block) | value of the CIDR block to use for the machine | `string` | `"10.0.0.0/18"` | no |
+| [openshift\_version](#input\_openshift\_version) | The version of ROSA to be deployed | `string` | `"4.18.1"` | no |
+| [pod\_cidr\_block](#input\_pod\_cidr\_block) | value of the CIDR block to use for the pods | `string` | `"10.0.64.0/18"` | no |
+| [private](#input\_private) | Restrict master API endpoint and application routes to direct, private connectivity. | `bool` | `false` | no |
+| [replicas](#input\_replicas) | The number of computer nodes to create. Must be a minimum of 2 for a single-AZ cluster, 3 for multi-AZ. | `string` | `"2"` | no |
+| [service\_cidr\_block](#input\_service\_cidr\_block) | value of the CIDR block to use for the services | `string` | `"10.0.128.0/18"` | no |
+| [vpc\_cidr\_block](#input\_vpc\_cidr\_block) | value of the CIDR block to use for the VPC | `string` | `"10.0.0.0/16"` | no |
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [all\_subnets](#output\_all\_subnets) | A comma-separated list of all subnet IDs (both public and private) in the VPC. This list can be used with the '--subnet-ids' parameter in ROSA commands for configuring cluster networking. |
+| [aws\_caller\_identity\_account\_id](#output\_aws\_caller\_identity\_account\_id) | The AWS account ID of the caller. This is the account under which the Terraform code is being executed. |
+| [cluster\_console\_url](#output\_cluster\_console\_url) | The URL endpoint for accessing the OpenShift web console. This endpoint provides a web-based user interface for managing the OpenShift cluster. |
+| [cluster\_id](#output\_cluster\_id) | The unique identifier of the OpenShift cluster created on Red Hat OpenShift Service on AWS (ROSA). This ID is used to reference the cluster in subsequent operations. |
+| [oidc\_provider\_id](#output\_oidc\_provider\_id) | OIDC provider for the OpenShift ROSA cluster. Allows to add additional IRSA mappings. |
+| [openshift\_api\_url](#output\_openshift\_api\_url) | The URL endpoint for accessing the OpenShift API. This endpoint is used to interact with the OpenShift cluster's API server. |
+| [private\_subnet\_ids](#output\_private\_subnet\_ids) | A comma-separated list of private subnet IDs in the VPC. These subnets are typically used for internal resources that do not require direct internet access. |
+| [public\_subnet\_ids](#output\_public\_subnet\_ids) | A comma-separated list of public subnet IDs in the VPC. These subnets are typically used for resources that require internet access. |
+| [vpc\_availability\_zones](#output\_vpc\_availability\_zones) | The availability zones in which the VPC is located. This provides information about the distribution of resources across different physical locations within the AWS region. |
+| [vpc\_id](#output\_vpc\_id) | The ID of the Virtual Private Cloud (VPC) where the OpenShift cluster and related resources are deployed. |
+
diff --git a/aws/modules/rosa-hcp/outputs.tf b/aws/modules/rosa-hcp/outputs.tf
new file mode 100644
index 000000000..fadbe4c82
--- /dev/null
+++ b/aws/modules/rosa-hcp/outputs.tf
@@ -0,0 +1,49 @@
+output "public_subnet_ids" {
+ value = join(",", module.vpc.public_subnets)
+ description = "A comma-separated list of public subnet IDs in the VPC. These subnets are typically used for resources that require internet access."
+}
+
+output "private_subnet_ids" {
+ value = join(",", module.vpc.private_subnets)
+ description = "A comma-separated list of private subnet IDs in the VPC. These subnets are typically used for internal resources that do not require direct internet access."
+}
+
+output "all_subnets" {
+ value = join(",", concat(module.vpc.private_subnets, module.vpc.public_subnets))
+ description = "A comma-separated list of all subnet IDs (both public and private) in the VPC. This list can be used with the '--subnet-ids' parameter in ROSA commands for configuring cluster networking."
+}
+
+output "cluster_id" {
+ value = module.rosa_hcp.cluster_id
+ description = "The unique identifier of the OpenShift cluster created on Red Hat OpenShift Service on AWS (ROSA). This ID is used to reference the cluster in subsequent operations."
+}
+
+output "openshift_api_url" {
+ value = module.rosa_hcp.cluster_api_url
+ description = "The URL endpoint for accessing the OpenShift API. This endpoint is used to interact with the OpenShift cluster's API server."
+}
+
+output "cluster_console_url" {
+ value = module.rosa_hcp.cluster_console_url
+ description = "The URL endpoint for accessing the OpenShift web console. This endpoint provides a web-based user interface for managing the OpenShift cluster."
+}
+
+output "vpc_id" {
+ value = module.vpc.vpc_id
+ description = "The ID of the Virtual Private Cloud (VPC) where the OpenShift cluster and related resources are deployed."
+}
+
+output "vpc_availability_zones" {
+ value = module.vpc.availability_zones
+ description = "The availability zones in which the VPC is located. This provides information about the distribution of resources across different physical locations within the AWS region."
+}
+
+output "aws_caller_identity_account_id" {
+ value = data.aws_caller_identity.current.account_id
+ description = "The AWS account ID of the caller. This is the account under which the Terraform code is being executed."
+}
+
+output "oidc_provider_id" {
+ value = module.rosa_hcp.oidc_config_id
+ description = "OIDC provider for the OpenShift ROSA cluster. Allows to add additional IRSA mappings."
+}
diff --git a/aws/rosa-hcp/camunda-versions/8.7/config.tf b/aws/modules/rosa-hcp/providers.tf
similarity index 52%
rename from aws/rosa-hcp/camunda-versions/8.7/config.tf
rename to aws/modules/rosa-hcp/providers.tf
index 78ac718f0..b57501ebe 100644
--- a/aws/rosa-hcp/camunda-versions/8.7/config.tf
+++ b/aws/modules/rosa-hcp/providers.tf
@@ -1,3 +1,5 @@
+data "aws_caller_identity" "current" {}
+
terraform {
required_version = ">= 1.0"
@@ -7,15 +9,8 @@ terraform {
version = ">= 5.35.0"
}
rhcs = {
- version = "1.6.8"
+ version = ">= 1.6.0"
source = "terraform-redhat/rhcs"
}
}
-
- backend "s3" {
- encrypt = true
- }
}
-
-# ensure RHCS_TOKEN env variable is set with a value from https://console.redhat.com/openshift/token/rosa
-provider "rhcs" {}
diff --git a/aws/modules/rosa-hcp/rosa.tf b/aws/modules/rosa-hcp/rosa.tf
new file mode 100644
index 000000000..0b5b5d11f
--- /dev/null
+++ b/aws/modules/rosa-hcp/rosa.tf
@@ -0,0 +1,97 @@
+locals {
+ account_role_prefix = "${var.cluster_name}-account"
+ operator_role_prefix = "${var.cluster_name}-operator"
+
+ tags = {
+ "owner" = data.aws_caller_identity.current.arn
+ }
+
+ availability_zones_count_computed = var.availability_zones == null ? var.availability_zones_count : (length(var.availability_zones) > 0 ? length(var.availability_zones) : var.availability_zones_count)
+}
+
+data "aws_servicequotas_service_quota" "elastic_ip_quota" {
+ service_code = "ec2"
+ quota_code = "L-0263D0A3" # Quota code for Elastic IP addresses per region
+}
+
+
+data "aws_eips" "current_usage" {}
+
+# Data source to check if the VPC exists
+data "aws_vpcs" "current_vpcs" {
+ tags = {
+ Name = "${var.cluster_name}-vpc"
+ }
+}
+
+check "elastic_ip_quota_check" {
+
+ # Only check the condition when no existing vpc is there.
+ assert {
+ condition = length(data.aws_vpcs.current_vpcs.ids) > 0 || (data.aws_servicequotas_service_quota.elastic_ip_quota.value - length(data.aws_eips.current_usage.public_ips)) >= local.availability_zones_count_computed
+ error_message = "Not enough available Elastic IPs to cover all local availability zones (need: ${local.availability_zones_count_computed}, have: ${(data.aws_servicequotas_service_quota.elastic_ip_quota.value - length(data.aws_eips.current_usage.public_ips))})."
+ }
+}
+
+module "rosa_hcp" {
+ source = "terraform-redhat/rosa-hcp/rhcs"
+ version = "1.6.5"
+
+ openshift_version = var.openshift_version
+ cluster_name = var.cluster_name
+ private = var.private
+
+ compute_machine_type = var.compute_node_instance_type
+ tags = local.tags
+
+ machine_cidr = var.machine_cidr_block
+ service_cidr = var.service_cidr_block
+ pod_cidr = var.pod_cidr_block
+ properties = { rosa_creator_arn = data.aws_caller_identity.current.arn }
+
+
+ replicas = var.replicas
+ aws_availability_zones = length(var.aws_availability_zones) > 0 ? var.aws_availability_zones : module.vpc.availability_zones
+
+ aws_subnet_ids = concat(
+ module.vpc.public_subnets, module.vpc.private_subnets,
+ )
+
+ host_prefix = var.host_prefix
+
+ // STS configuration
+ create_account_roles = true
+ account_role_prefix = local.account_role_prefix
+ create_oidc = true
+ create_operator_roles = true
+ operator_role_prefix = local.operator_role_prefix
+
+ wait_for_create_complete = true
+ wait_for_std_compute_nodes_complete = true
+
+ depends_on = [
+ module.vpc,
+ ]
+}
+
+module "htpasswd_idp" {
+ source = "terraform-redhat/rosa-hcp/rhcs//modules/idp"
+ version = "1.6.5"
+
+ cluster_id = module.rosa_hcp.cluster_id
+ name = "htpasswd-idp"
+ idp_type = "htpasswd"
+ htpasswd_idp_users = [{ username = var.htpasswd_username, password = var.htpasswd_password }]
+}
+
+module "vpc" {
+ source = "terraform-redhat/rosa-hcp/rhcs//modules/vpc"
+ version = "1.6.5"
+
+ name_prefix = var.cluster_name
+
+ availability_zones_count = var.availability_zones != null ? null : var.availability_zones_count
+ availability_zones = var.availability_zones
+
+ vpc_cidr = var.vpc_cidr_block
+}
diff --git a/aws/modules/rosa-hcp/vars.tf b/aws/modules/rosa-hcp/vars.tf
new file mode 100644
index 000000000..d1ed6804f
--- /dev/null
+++ b/aws/modules/rosa-hcp/vars.tf
@@ -0,0 +1,95 @@
+
+variable "cluster_name" {
+ type = string
+ description = "The name of the ROSA cluster to create"
+ default = "my-ocp-cluster"
+}
+
+variable "openshift_version" {
+ type = string
+ description = "The version of ROSA to be deployed"
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=semver
+ default = "4.18.1"
+ validation {
+ condition = can(regex("^[0-9]*[0-9]+.[0-9]*[0-9]+.[0-9]*[0-9]+$", var.openshift_version))
+ error_message = "openshift_version must be with structure .. (for example 4.13.6)."
+ }
+}
+
+variable "replicas" {
+ type = string
+ description = "The number of computer nodes to create. Must be a minimum of 2 for a single-AZ cluster, 3 for multi-AZ."
+ default = "2"
+}
+
+variable "private" {
+ type = bool
+ description = "Restrict master API endpoint and application routes to direct, private connectivity."
+ default = false
+}
+
+variable "compute_node_instance_type" {
+ type = string
+ description = "The EC2 instance type to use for compute nodes"
+ default = "m7i.xlarge"
+}
+
+variable "host_prefix" {
+ type = string
+ description = "The subnet mask to assign to each compute node in the cluster"
+ default = "23"
+}
+
+variable "availability_zones_count" {
+ type = number
+ description = "The count of availability (minimum 2) zones to utilize within the specified AWS Region, where pairs of public and private subnets will be generated. Valid only when availability_zones variable is not provided. This value should not be updated, please create a new resource instead."
+ default = 2
+}
+
+variable "availability_zones" {
+ type = list(string)
+ description = "A list of availability zone names in the region. By default, this is set to `null` and is not used; instead, `availability_zones_count` manages the number of availability zones. This value should not be updated directly. To make changes, please create a new resource."
+ default = null
+}
+
+
+variable "aws_availability_zones" {
+ type = list(string)
+ description = "The AWS availability zones where instances of the default worker machine pool are deployed. Leave empty for the installer to pick availability zones from the VPC `availability_zones` or `availability_zones_count`"
+ default = []
+}
+
+variable "vpc_cidr_block" {
+ type = string
+ description = "value of the CIDR block to use for the VPC"
+ default = "10.0.0.0/16"
+}
+
+variable "machine_cidr_block" {
+ type = string
+ description = "value of the CIDR block to use for the machine"
+ default = "10.0.0.0/18"
+}
+
+variable "service_cidr_block" {
+ type = string
+ description = "value of the CIDR block to use for the services"
+ default = "10.0.128.0/18"
+}
+variable "pod_cidr_block" {
+ type = string
+ description = "value of the CIDR block to use for the pods"
+ default = "10.0.64.0/18"
+}
+
+variable "htpasswd_username" {
+ type = string
+ description = "htpasswd username"
+ default = "kubeadmin"
+}
+
+variable "htpasswd_password" {
+ type = string
+ description = "htpasswd password"
+ sensitive = true
+}
diff --git a/aws/rosa-hcp-dual-region/README.md b/aws/openshift/rosa-hcp-dual-region/README.md
similarity index 64%
rename from aws/rosa-hcp-dual-region/README.md
rename to aws/openshift/rosa-hcp-dual-region/README.md
index 3dcabe341..7956b1c8a 100644
--- a/aws/rosa-hcp-dual-region/README.md
+++ b/aws/openshift/rosa-hcp-dual-region/README.md
@@ -1,4 +1,4 @@
# Camunda on AWS ROSA dual-region
This folder describes the IaC of Camunda on AWS ROSA in a dual-region setup.
-Instructions can be found on the official documentation: https://docs.camunda.io/docs/self-managed/setup/deploy/amazon/openshift/terraform-setup-dual-region/
+Instructions can be found on the official documentation: https://docs.camunda.io/docs/8.7/self-managed/setup/deploy/amazon/openshift/terraform-setup-dual-region/
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/auto-import-cluster-secret.yml.tpl b/aws/openshift/rosa-hcp-dual-region/procedure/acm/auto-import-cluster-secret.yml.tpl
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/auto-import-cluster-secret.yml.tpl
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/auto-import-cluster-secret.yml.tpl
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/initiate_cluster_set.sh b/aws/openshift/rosa-hcp-dual-region/procedure/acm/initiate_cluster_set.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/initiate_cluster_set.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/initiate_cluster_set.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/install-manifest.yml b/aws/openshift/rosa-hcp-dual-region/procedure/acm/install-manifest.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/install-manifest.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/install-manifest.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/klusterlet-config.yml.tpl b/aws/openshift/rosa-hcp-dual-region/procedure/acm/klusterlet-config.yml.tpl
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/klusterlet-config.yml.tpl
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/klusterlet-config.yml.tpl
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/managed-cluster-set.yml b/aws/openshift/rosa-hcp-dual-region/procedure/acm/managed-cluster-set.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/managed-cluster-set.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/managed-cluster-set.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/managed-cluster.yml.tpl b/aws/openshift/rosa-hcp-dual-region/procedure/acm/managed-cluster.yml.tpl
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/managed-cluster.yml.tpl
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/managed-cluster.yml.tpl
diff --git a/aws/rosa-hcp-dual-region/procedure/acm/multi-cluster-hub.yml b/aws/openshift/rosa-hcp-dual-region/procedure/acm/multi-cluster-hub.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/acm/multi-cluster-hub.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/acm/multi-cluster-hub.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/export_environment_prerequisites.sh b/aws/openshift/rosa-hcp-dual-region/procedure/export_environment_prerequisites.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/export_environment_prerequisites.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/export_environment_prerequisites.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/export_services_submariner.sh b/aws/openshift/rosa-hcp-dual-region/procedure/export_services_submariner.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/export_services_submariner.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/export_services_submariner.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/generate_helm_values.sh b/aws/openshift/rosa-hcp-dual-region/procedure/generate_helm_values.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/generate_helm_values.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/generate_helm_values.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/generate_zeebe_helm_values.sh b/aws/openshift/rosa-hcp-dual-region/procedure/generate_zeebe_helm_values.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/generate_zeebe_helm_values.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/generate_zeebe_helm_values.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-base.yml b/aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-base.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-base.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-base.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-region-1.yml b/aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-region-1.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-region-1.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-region-1.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-region-2.yml b/aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-region-2.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/helm-values/values-region-2.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/helm-values/values-region-2.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/install_chart.sh b/aws/openshift/rosa-hcp-dual-region/procedure/install_chart.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/install_chart.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/install_chart.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/setup_ns_secrets.sh b/aws/openshift/rosa-hcp-dual-region/procedure/setup_ns_secrets.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/setup_ns_secrets.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/setup_ns_secrets.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/submariner/debug-utils-submariner.yml b/aws/openshift/rosa-hcp-dual-region/procedure/submariner/debug-utils-submariner.yml
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/submariner/debug-utils-submariner.yml
rename to aws/openshift/rosa-hcp-dual-region/procedure/submariner/debug-utils-submariner.yml
diff --git a/aws/rosa-hcp-dual-region/procedure/submariner/label_nodes_brokers.sh b/aws/openshift/rosa-hcp-dual-region/procedure/submariner/label_nodes_brokers.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/submariner/label_nodes_brokers.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/submariner/label_nodes_brokers.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/submariner/output.txt b/aws/openshift/rosa-hcp-dual-region/procedure/submariner/output.txt
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/submariner/output.txt
rename to aws/openshift/rosa-hcp-dual-region/procedure/submariner/output.txt
diff --git a/aws/rosa-hcp-dual-region/procedure/submariner/submariner.yml.tpl b/aws/openshift/rosa-hcp-dual-region/procedure/submariner/submariner.yml.tpl
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/submariner/submariner.yml.tpl
rename to aws/openshift/rosa-hcp-dual-region/procedure/submariner/submariner.yml.tpl
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/verify_exported_services.sh b/aws/openshift/rosa-hcp-dual-region/procedure/verify_exported_services.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/verify_exported_services.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/verify_exported_services.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/verify_installation_completed.sh b/aws/openshift/rosa-hcp-dual-region/procedure/verify_installation_completed.sh
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/verify_installation_completed.sh
rename to aws/openshift/rosa-hcp-dual-region/procedure/verify_installation_completed.sh
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/zbctl-output.txt b/aws/openshift/rosa-hcp-dual-region/procedure/zbctl-output.txt
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/zbctl-output.txt
rename to aws/openshift/rosa-hcp-dual-region/procedure/zbctl-output.txt
diff --git a/aws/rosa-hcp-dual-region/procedure/camunda/8.7/zeebe-http-output.txt b/aws/openshift/rosa-hcp-dual-region/procedure/zeebe-http-output.txt
similarity index 100%
rename from aws/rosa-hcp-dual-region/procedure/camunda/8.7/zeebe-http-output.txt
rename to aws/openshift/rosa-hcp-dual-region/procedure/zeebe-http-output.txt
diff --git a/aws/rosa-hcp-dual-region/terraform/backup_bucket/README.md b/aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/README.md
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/backup_bucket/README.md
rename to aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/README.md
diff --git a/aws/rosa-hcp-dual-region/terraform/backup_bucket/backup_bucket.tf b/aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/backup_bucket.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/backup_bucket/backup_bucket.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/backup_bucket.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/backup_bucket/config.tf b/aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/config.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/backup_bucket/config.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/backup_bucket/config.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/clusters/README.md b/aws/openshift/rosa-hcp-dual-region/terraform/clusters/README.md
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/clusters/README.md
rename to aws/openshift/rosa-hcp-dual-region/terraform/clusters/README.md
diff --git a/aws/rosa-hcp-dual-region/terraform/clusters/cluster_region_1.tf b/aws/openshift/rosa-hcp-dual-region/terraform/clusters/cluster_region_1.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/clusters/cluster_region_1.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/clusters/cluster_region_1.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/clusters/cluster_region_2.tf b/aws/openshift/rosa-hcp-dual-region/terraform/clusters/cluster_region_2.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/clusters/cluster_region_2.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/clusters/cluster_region_2.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/clusters/config.tf b/aws/openshift/rosa-hcp-dual-region/terraform/clusters/config.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/clusters/config.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/clusters/config.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/peering/README.md b/aws/openshift/rosa-hcp-dual-region/terraform/peering/README.md
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/peering/README.md
rename to aws/openshift/rosa-hcp-dual-region/terraform/peering/README.md
diff --git a/aws/rosa-hcp-dual-region/terraform/peering/config.tf b/aws/openshift/rosa-hcp-dual-region/terraform/peering/config.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/peering/config.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/peering/config.tf
diff --git a/aws/rosa-hcp-dual-region/terraform/peering/peering.tf b/aws/openshift/rosa-hcp-dual-region/terraform/peering/peering.tf
similarity index 100%
rename from aws/rosa-hcp-dual-region/terraform/peering/peering.tf
rename to aws/openshift/rosa-hcp-dual-region/terraform/peering/peering.tf
diff --git a/aws/rosa-hcp/camunda-versions/8.6/README.md b/aws/openshift/rosa-hcp-single-region/README.md
similarity index 86%
rename from aws/rosa-hcp/camunda-versions/8.6/README.md
rename to aws/openshift/rosa-hcp-single-region/README.md
index 2d7ee2ac8..63678888f 100644
--- a/aws/rosa-hcp/camunda-versions/8.6/README.md
+++ b/aws/openshift/rosa-hcp-single-region/README.md
@@ -1,11 +1,14 @@
-# 8.6
+# Camunda on AWS ROSA single-region
+
+This folder describes the IaC of Camunda on AWS ROSA in a single-region setup.
+Instructions can be found on the official documentation: https://docs.camunda.io/docs/8.7/self-managed/setup/deploy/amazon/openshift/terraform-setup/
## Modules
| Name | Source | Version |
|------|--------|---------|
-| [rosa\_cluster](#module\_rosa\_cluster) | git::https://github.com/camunda/camunda-tf-rosa//modules/rosa-hcp | v2.2.0 |
+| [rosa\_cluster](#module\_rosa\_cluster) | ../../modules/rosa-hcp | n/a |
## Resources
No resources.
diff --git a/aws/rosa-hcp/camunda-versions/8.6/cluster.tf b/aws/openshift/rosa-hcp-single-region/cluster.tf
similarity index 94%
rename from aws/rosa-hcp/camunda-versions/8.6/cluster.tf
rename to aws/openshift/rosa-hcp-single-region/cluster.tf
index b857d8dbd..e977150d2 100644
--- a/aws/rosa-hcp/camunda-versions/8.6/cluster.tf
+++ b/aws/openshift/rosa-hcp-single-region/cluster.tf
@@ -8,7 +8,7 @@ locals {
}
module "rosa_cluster" {
- source = "git::https://github.com/camunda/camunda-tf-rosa//modules/rosa-hcp?ref=v2.2.0"
+ source = "../../modules/rosa-hcp"
cluster_name = local.rosa_cluster_name
@@ -27,6 +27,9 @@ module "rosa_cluster" {
# Default node type for the OpenShift cluster
compute_node_instance_type = "m7i.xlarge"
replicas = 6
+
+ # renovate: datasource=custom.rosa-camunda depName=red-hat-openshift versioning=semver
+ openshift_version = "4.17.16"
}
# Outputs of the parent module
diff --git a/aws/rosa-hcp/camunda-versions/8.6/config.tf b/aws/openshift/rosa-hcp-single-region/config.tf
similarity index 100%
rename from aws/rosa-hcp/camunda-versions/8.6/config.tf
rename to aws/openshift/rosa-hcp-single-region/config.tf
diff --git a/aws/openshift/rosa-hcp-single-region/procedure/.shellcheckrc b/aws/openshift/rosa-hcp-single-region/procedure/.shellcheckrc
new file mode 100644
index 000000000..40e02e71c
--- /dev/null
+++ b/aws/openshift/rosa-hcp-single-region/procedure/.shellcheckrc
@@ -0,0 +1 @@
+disable=SC2155
diff --git a/aws/openshift/rosa-hcp-single-region/procedure/gather-cluster-login-id.sh b/aws/openshift/rosa-hcp-single-region/procedure/gather-cluster-login-id.sh
new file mode 100755
index 000000000..0424cc69f
--- /dev/null
+++ b/aws/openshift/rosa-hcp-single-region/procedure/gather-cluster-login-id.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+export CLUSTER_NAME="$(terraform console <<\d+))?(\.(?\d+))?$ registryUrl=https://helm.camunda.io
-export CAMUNDA_HELM_CHART_VERSION="11.0.4"
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/create-identity-secret.sh b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/create-identity-secret.sh
deleted file mode 100644
index a5b074a26..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/create-identity-secret.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-kubectl create secret generic identity-secret-for-components \
- --namespace camunda \
- --from-literal=connectors-secret="$CONNECTORS_SECRET" \
- --from-literal=console-secret="$CONSOLE_SECRET" \
- --from-literal=operate-secret="$OPERATE_SECRET" \
- --from-literal=optimize-secret="$OPTIMIZE_SECRET" \
- --from-literal=tasklist-secret="$TASKLIST_SECRET" \
- --from-literal=zeebe-secret="$ZEEBE_SECRET" \
- --from-literal=admin-password="$ADMIN_PASSWORD" \
- --from-literal=smtp-password=""
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/generate-passwords.sh b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/generate-passwords.sh
deleted file mode 100644
index 098ed07b6..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/generate-passwords.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-export CONNECTORS_SECRET="$(openssl rand -hex 16)"
-export CONSOLE_SECRET="$(openssl rand -hex 16)"
-export OPERATE_SECRET="$(openssl rand -hex 16)"
-export OPTIMIZE_SECRET="$(openssl rand -hex 16)"
-export TASKLIST_SECRET="$(openssl rand -hex 16)"
-export ZEEBE_SECRET="$(openssl rand -hex 16)"
-export ADMIN_PASSWORD="$(openssl rand -hex 16)"
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/base.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/base.yml
deleted file mode 100644
index 361b19c17..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/base.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-global:
- elasticsearch:
- enabled: true # use the embbeded elasticsearch
-
-identityKeycloak:
- postgresql:
- enabled: true # use the embbeded database
- auth:
- existingSecret: identity-secret-for-components
-
-console:
- enabled: false # by default, console is not enabled
-
-webModeler:
- enabled: false # by default, webModeler is not enabled
-
- restapi:
- mail:
- existingSecret: identity-secret-for-components # reference the smtp password
- fromAddress: changeme@example.com # change this required value
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/connectors-route.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/connectors-route.yml
deleted file mode 100644
index a3b8454d0..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/connectors-route.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-connectors:
- inbound:
- mode: oauth
- env:
- - name: ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- - name: ZEEBE_CLIENT_SECURITY_PLAINTEXT
- value: 'false'
- - name: CAMUNDA_CLIENT_ZEEBE_CACERTIFICATEPATH
- value: /usr/local/certificates/tls.crt
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/certificates/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/domain.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/domain.yml
deleted file mode 100644
index 05c0535c8..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/domain.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-global:
-
- # the ingress is manages directly by the Route manifests ../manifest/routes.yml
- ingress:
- enabled: true
- className: openshift-default
- host: ${DOMAIN_NAME}
- tls:
- enabled: true
- # explicitely no secret as we rely on the IngressOperator to handle the TLS secret, if you use a custom certificate, you might want to fill this value
- secretName: ''
- annotations:
- route.openshift.io/termination: edge
- # enforce HSTS
- haproxy.router.openshift.io/hsts_header: max-age=31536000; includeSubDomains;preload
-
- identity:
-
- auth:
- publicIssuerUrl: https://${DOMAIN_NAME}/auth/realms/camunda-platform # replace this with a port of your choice when you will do port forwarding
-
- zeebe:
- existingSecret:
- name: identity-secret-for-components
- connectors:
- existingSecret:
- name: identity-secret-for-components
- operate:
- redirectUrl: https://${DOMAIN_NAME}/operate # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- tasklist:
- redirectUrl: https://${DOMAIN_NAME}/tasklist # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- optimize:
- redirectUrl: https://${DOMAIN_NAME}/optimize # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- webModeler:
- redirectUrl: https://${DOMAIN_NAME}/modeler
- console:
- redirectUrl: https://${DOMAIN_NAME}/console
- existingSecret:
- name: identity-secret-for-components
-
-identity:
- contextPath: /identity
- fullURL: https://${DOMAIN_NAME}/identity
-
-operate:
- contextPath: /operate
-
-tasklist:
- contextPath: /tasklist
-
-webModeler:
- contextPath: /modeler
-
-optimize:
- contextPath: /optimize
-
-zeebeGateway:
- contextPath: /zeebe
-
-console:
- contextPath: /console
-
-connectors:
- contextPath: /connectors
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-domain.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-domain.yml
deleted file mode 100644
index d619e1c8d..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-domain.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-global:
- identity:
-
- auth:
- publicIssuerUrl: http://localhost:18080/auth/realms/camunda-platform # replace this with a port of your choice when you will do port forwarding
-
- zeebe:
- existingSecret:
- name: identity-secret-for-components
- connectors:
- existingSecret:
- name: identity-secret-for-components
- operate:
- redirectUrl: http://localhost:8081 # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- tasklist:
- redirectUrl: http://localhost:8082 # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- optimize:
- redirectUrl: http://localhost:8083 # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
- webModeler:
- redirectUrl: http://localhost:8084
- console:
- redirectUrl: http://localhost:8085
- existingSecret:
- name: identity-secret-for-components
-
-identity:
- fullURL: http://localhost:8080 # replace this with a port of your choice when you will do port forwarding
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/operate-route.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/operate-route.yml
deleted file mode 100644
index 97b8cd532..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/operate-route.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-operate:
- env:
- - name: CAMUNDA_OPERATE_ZEEBE_SECURE
- value: 'true'
- - name: CAMUNDA_OPERATE_ZEEBE_CERTIFICATEPATH
- value: /usr/local/operate/config/tls.crt
- - name: CAMUNDA_OPERATE_ZEEBE_GATEWAYADDRESS
- # camunda-zeebe-gateway..svc.cluster.local
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/operate/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/tasklist-route.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/tasklist-route.yml
deleted file mode 100644
index d21d85a9c..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/tasklist-route.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-tasklist:
- env:
- - name: CAMUNDA_TASKLIST_ZEEBE_SECURE
- value: 'true'
- - name: CAMUNDA_TASKLIST_ZEEBE_CERTIFICATEPATH
- value: /usr/local/tasklist/config/tls.crt
- - name: CAMUNDA_TASKLIST_ZEEBE_GATEWAYADDRESS
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/tasklist/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/zeebe-gateway-route.yml b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/zeebe-gateway-route.yml
deleted file mode 100644
index e8f72b3f3..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/zeebe-gateway-route.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-zeebeGateway:
-
- service:
- annotations:
- # enerate a TLS certificate for the Zeebe Gateway Service
- service.beta.openshift.io/serving-cert-secret-name: camunda-platform-internal-service-certificate
-
- ingress:
- enabled: true
- grpc:
- annotations:
- haproxy.router.openshift.io/timeout: 300s
- route.openshift.io/termination: reencrypt
- # reference the re-encrypt secret
- route.openshift.io/destination-ca-certificate-secret: camunda-platform-internal-service-certificate
- className: openshift-default
- tls:
- enabled: true
- # explicitely no secret as we rely on the IngressOperator to handle the TLS secret, if you use a custom certificate, you might want to fill this value
- secretName: ''
- host: zeebe-${DOMAIN_NAME}
-
- # mount the Service certificate in the pod
- env:
- - name: ZEEBE_GATEWAY_SECURITY_ENABLED
- value: 'true'
- - name: ZEEBE_GATEWAY_SECURITY_CERTIFICATECHAINPATH
- value: /usr/local/zeebe/config/tls.crt
- - name: ZEEBE_GATEWAY_SECURITY_PRIVATEKEYPATH
- value: /usr/local/zeebe/config/tls.key
-
- - name: ZEEBE_GATEWAY_CLUSTER_SECURITY_ENABLED
- value: 'true'
- - name: ZEEBE_GATEWAY_CLUSTER_SECURITY_CERTIFICATECHAINPATH
- value: /usr/local/zeebe/config/tls.crt
- - name: ZEEBE_GATEWAY_CLUSTER_SECURITY_PRIVATEKEYPATH
- value: /usr/local/zeebe/config/tls.key
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/zeebe/config/tls.crt
- subPath: tls.crt
- - name: key
- mountPath: /usr/local/zeebe/config/tls.key
- subPath: tls.key
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- - name: key
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.key
- path: tls.key
- defaultMode: 420
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/install-chart.sh b/aws/rosa-hcp/camunda-versions/8.6/procedure/install/install-chart.sh
deleted file mode 100644
index 90055512a..000000000
--- a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/install-chart.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-helm upgrade --install \
- camunda camunda-platform \
- --repo https://helm.camunda.io \
- --version "$CAMUNDA_HELM_CHART_VERSION" \
- --namespace camunda \
- -f generated-values.yml
diff --git a/aws/rosa-hcp/camunda-versions/8.7/.tool-versions b/aws/rosa-hcp/camunda-versions/8.7/.tool-versions
deleted file mode 100644
index a947d071f..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/.tool-versions
+++ /dev/null
@@ -1 +0,0 @@
-# TODO: must be filled for tests integration
diff --git a/aws/rosa-hcp/camunda-versions/8.7/README.md b/aws/rosa-hcp/camunda-versions/8.7/README.md
deleted file mode 100644
index 78672bf7c..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# 8.7
-
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [rosa\_cluster](#module\_rosa\_cluster) | git::https://github.com/camunda/camunda-tf-rosa//modules/rosa-hcp | v2.2.0 |
-## Resources
-
-No resources.
-## Inputs
-
-No inputs.
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_caller\_identity\_account\_id](#output\_aws\_caller\_identity\_account\_id) | The AWS account ID of the caller. This is the account under which the Terraform code is being executed. |
-| [cluster\_console\_url](#output\_cluster\_console\_url) | The URL endpoint for accessing the OpenShift web console. This endpoint provides a web-based user interface for managing the OpenShift cluster. |
-| [cluster\_id](#output\_cluster\_id) | The unique identifier of the OpenShift cluster created on Red Hat OpenShift Service on AWS (ROSA). This ID is used to reference the cluster in subsequent operations. |
-| [oidc\_provider\_id](#output\_oidc\_provider\_id) | OIDC provider for the ROSA cluster. Allows adding additional IAM Role for Service Accounts (IRSA) mappings. |
-| [openshift\_api\_url](#output\_openshift\_api\_url) | The endpoint URL for accessing the OpenShift API. This endpoint is used to interact with the OpenShift cluster's API server. |
-| [private\_subnet\_ids](#output\_private\_subnet\_ids) | A comma-separated list of private subnet IDs in the VPC. These subnets are typically used for internal resources that do not require direct internet access. |
-| [public\_subnet\_ids](#output\_public\_subnet\_ids) | A comma-separated list of public subnet IDs in the VPC. These subnets are typically used for resources that require internet access. |
-
diff --git a/aws/rosa-hcp/camunda-versions/8.7/cluster.tf b/aws/rosa-hcp/camunda-versions/8.7/cluster.tf
deleted file mode 100644
index b857d8dbd..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/cluster.tf
+++ /dev/null
@@ -1,67 +0,0 @@
-locals {
- rosa_cluster_name = "my-rosa" # Change this to a name of your choice
-
- rosa_cluster_zones = ["eu-north-1a", "eu-north-1b", "eu-north-1c"] # Adjust to your needs and align with your value of AWS_REGION
-
- rosa_admin_username = "kubeadmin"
- rosa_admin_password = "CHANGEME1234r!" # Change the password of your admin password
-}
-
-module "rosa_cluster" {
- source = "git::https://github.com/camunda/camunda-tf-rosa//modules/rosa-hcp?ref=v2.2.0"
-
- cluster_name = local.rosa_cluster_name
-
- availability_zones = local.rosa_cluster_zones
-
- # Set CIDR ranges or use the defaults
- vpc_cidr_block = "10.0.0.0/16"
- machine_cidr_block = "10.0.0.0/18"
- service_cidr_block = "10.0.128.0/18"
- pod_cidr_block = "10.0.64.0/18"
-
- # admin access
- htpasswd_username = local.rosa_admin_username
- htpasswd_password = local.rosa_admin_password
-
- # Default node type for the OpenShift cluster
- compute_node_instance_type = "m7i.xlarge"
- replicas = 6
-}
-
-# Outputs of the parent module
-
-output "public_subnet_ids" {
- value = module.rosa_cluster.public_subnet_ids
- description = "A comma-separated list of public subnet IDs in the VPC. These subnets are typically used for resources that require internet access."
-}
-
-output "private_subnet_ids" {
- value = module.rosa_cluster.private_subnet_ids
- description = "A comma-separated list of private subnet IDs in the VPC. These subnets are typically used for internal resources that do not require direct internet access."
-}
-
-output "cluster_id" {
- value = module.rosa_cluster.cluster_id
- description = "The unique identifier of the OpenShift cluster created on Red Hat OpenShift Service on AWS (ROSA). This ID is used to reference the cluster in subsequent operations."
-}
-
-output "oidc_provider_id" {
- value = module.rosa_cluster.oidc_provider_id
- description = "OIDC provider for the ROSA cluster. Allows adding additional IAM Role for Service Accounts (IRSA) mappings."
-}
-
-output "aws_caller_identity_account_id" {
- value = module.rosa_cluster.aws_caller_identity_account_id
- description = "The AWS account ID of the caller. This is the account under which the Terraform code is being executed."
-}
-
-output "openshift_api_url" {
- value = module.rosa_cluster.openshift_api_url
- description = "The endpoint URL for accessing the OpenShift API. This endpoint is used to interact with the OpenShift cluster's API server."
-}
-
-output "cluster_console_url" {
- value = module.rosa_cluster.cluster_console_url
- description = "The URL endpoint for accessing the OpenShift web console. This endpoint provides a web-based user interface for managing the OpenShift cluster."
-}
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/.shellcheckrc b/aws/rosa-hcp/camunda-versions/8.7/procedure/install/.shellcheckrc
deleted file mode 100644
index 5f4b7a68f..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/.shellcheckrc
+++ /dev/null
@@ -1 +0,0 @@
-disable=SC2148,SC2155
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-scc.yml b/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-scc.yml
deleted file mode 100644
index 9750dc3a7..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-scc.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-global:
- compatibility:
- openshift:
- adaptSecurityContext: disabled # do not enforce SCCs, default
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/scc.yml b/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/scc.yml
deleted file mode 100644
index 0f9942fec..000000000
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/scc.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-global:
- compatibility:
- openshift:
- adaptSecurityContext: force # enforce compatibility with SCCs
diff --git a/generic/kubernetes/single-region/procedure/assemble-envsubst-values.sh b/generic/kubernetes/single-region/procedure/assemble-envsubst-values.sh
new file mode 100755
index 000000000..b25f42d9c
--- /dev/null
+++ b/generic/kubernetes/single-region/procedure/assemble-envsubst-values.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Generate the final values
+envsubst < values.yml > generated-values.yml
+
+echo "Final generated-values.yml result"
+cat generated-values.yml
diff --git a/generic/kubernetes/single-region/procedure/check-deployment-ready.sh b/generic/kubernetes/single-region/procedure/check-deployment-ready.sh
new file mode 100755
index 000000000..803991f42
--- /dev/null
+++ b/generic/kubernetes/single-region/procedure/check-deployment-ready.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+while true; do
+ kubectl get pods -n camunda --output=wide
+
+ if [ "$(kubectl get pods -n camunda --field-selector=status.phase!=Running -o name | wc -l)" -eq 0 ] &&
+ [ "$(kubectl get pods -n camunda -o json | jq -r '.items[] | select(.status.containerStatuses[]?.ready == false)' | wc -l)" -eq 0 ]; then
+ echo "All pods are Running and Healthy - Installation completed!"
+ exit 0
+ else
+ echo "Some pods are not Running or Healthy, please wait..."
+ sleep 5
+ fi
+done
diff --git a/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology-output.json b/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology-output.json
new file mode 100644
index 000000000..dd1fd85cf
--- /dev/null
+++ b/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology-output.json
@@ -0,0 +1,77 @@
+{
+ "brokers": [
+ {
+ "nodeId": 0,
+ "host": "camunda-zeebe-0.camunda-zeebe",
+ "port": 26501,
+ "partitions": [
+ {
+ "partitionId": 1,
+ "role": "leader",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 2,
+ "role": "follower",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 3,
+ "role": "follower",
+ "health": "healthy"
+ }
+ ],
+ "version": "8.6.z"
+ },
+ {
+ "nodeId": 1,
+ "host": "camunda-zeebe-1.camunda-zeebe",
+ "port": 26501,
+ "partitions": [
+ {
+ "partitionId": 1,
+ "role": "follower",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 2,
+ "role": "leader",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 3,
+ "role": "follower",
+ "health": "healthy"
+ }
+ ],
+ "version": "8.6.z"
+ },
+ {
+ "nodeId": 2,
+ "host": "camunda-zeebe-2.camunda-zeebe",
+ "port": 26501,
+ "partitions": [
+ {
+ "partitionId": 1,
+ "role": "follower",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 2,
+ "role": "follower",
+ "health": "healthy"
+ },
+ {
+ "partitionId": 3,
+ "role": "leader",
+ "health": "healthy"
+ }
+ ],
+ "version": "8.6.z"
+ }
+ ],
+ "clusterSize": 3,
+ "partitionsCount": 3,
+ "replicationFactor": 3,
+ "gatewayVersion": "8.6.z"
+}
diff --git a/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology.sh b/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology.sh
new file mode 100755
index 000000000..cdc30b6f5
--- /dev/null
+++ b/generic/kubernetes/single-region/procedure/check-zeebe-cluster-topology.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+ZEEBE_ADDRESS_REST="https://$DOMAIN_NAME/core"
+ZEEBE_AUTHORIZATION_SERVER_URL="https://$DOMAIN_NAME/auth/realms/camunda-platform/protocol/openid-connect/token"
+
+# Generate a temporary token from the authorization server (keycloak)
+TOKEN=$(curl --location --request POST "${ZEEBE_AUTHORIZATION_SERVER_URL}" \
+--header "Content-Type: application/x-www-form-urlencoded" \
+--data-urlencode "client_id=${ZEEBE_CLIENT_ID}" \
+--data-urlencode "client_secret=${ZEEBE_CLIENT_SECRET}" \
+--data-urlencode "grant_type=client_credentials" | jq '.access_token' -r)
+
+# Show the zeebe cluster topology
+curl --header "Authorization: Bearer ${TOKEN}" "${ZEEBE_ADDRESS_REST}/v2/topology"
diff --git a/generic/kubernetes/single-region/tests/helm-values/identity.yml b/generic/kubernetes/single-region/tests/helm-values/identity.yml
new file mode 100644
index 000000000..210f0d265
--- /dev/null
+++ b/generic/kubernetes/single-region/tests/helm-values/identity.yml
@@ -0,0 +1,79 @@
+---
+# TODO: [release-duty] when release update the link with 8.7
+# keep it synced with https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform-alpha/test/integration/scenarios/common/values-integration-test.yaml
+# it generates the CI user used to connect to the platform
+
+identity:
+ # Keycloak client seed which is used to query Camunda APIs.
+ env:
+ - name: KEYCLOAK_CLIENTS_2_ID
+ valueFrom:
+ secretKeyRef:
+ name: identity-secret-for-components-integration
+ key: identity-admin-client-id
+ - name: KEYCLOAK_CLIENTS_2_NAME
+ valueFrom:
+ secretKeyRef:
+ name: identity-secret-for-components-integration
+ key: identity-admin-client-id
+ - name: KEYCLOAK_CLIENTS_2_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: identity-secret-for-components-integration
+ key: identity-admin-client-secret
+ - name: KEYCLOAK_CLIENTS_2_REDIRECT_URIS_0
+ value: /dummy
+ - name: KEYCLOAK_CLIENTS_2_ROOT_URL
+ value: http://dummy
+ - name: KEYCLOAK_CLIENTS_2_TYPE
+ value: CONFIDENTIAL
+ # Identity access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_0_RESOURCE_SERVER_ID
+ value: camunda-identity-resource-server
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_0_DEFINITION
+ value: read
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_1_RESOURCE_SERVER_ID
+ value: camunda-identity-resource-server
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_1_DEFINITION
+ value: write
+ # Operate access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_2_RESOURCE_SERVER_ID
+ value: operate-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_2_DEFINITION
+ value: read:*
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_3_RESOURCE_SERVER_ID
+ value: operate-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_3_DEFINITION
+ value: write:*
+ # Tasklist access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_4_RESOURCE_SERVER_ID
+ value: tasklist-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_4_DEFINITION
+ value: read:*
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_5_RESOURCE_SERVER_ID
+ value: tasklist-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_5_DEFINITION
+ value: write:*
+ # Optimize access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_6_RESOURCE_SERVER_ID
+ value: optimize-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_6_DEFINITION
+ value: write:*
+ # Zeebe access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_7_RESOURCE_SERVER_ID
+ value: zeebe-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_7_DEFINITION
+ value: write:*
+ # WebModeler access.
+ # NOTE: This actually should be only in the chart-with-web-modeler scenarios,
+ # but since Helm doesn't support merge lists it's added here.
+ # It could be removed later when the env vars could be configured via ConfigMap.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_8_RESOURCE_SERVER_ID
+ value: web-modeler-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_8_DEFINITION
+ value: write:*
+ # Console access.
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_9_RESOURCE_SERVER_ID
+ value: console-api
+ - name: KEYCLOAK_CLIENTS_2_PERMISSIONS_9_DEFINITION
+ value: write:*
diff --git a/generic/kubernetes/single-region/tests/helm-values/registry.yml b/generic/kubernetes/single-region/tests/helm-values/registry.yml
new file mode 100644
index 000000000..77635dce9
--- /dev/null
+++ b/generic/kubernetes/single-region/tests/helm-values/registry.yml
@@ -0,0 +1,20 @@
+---
+# This file contains specific values used during the integration tests
+
+
+# Auth to avoid Docker download rate limit.
+# https://docs.docker.com/docker-hub/download-rate-limit/
+identityKeycloak:
+ image:
+ pullSecrets:
+ - name: index-docker-io
+
+global:
+ image:
+ pullSecrets:
+ - name: index-docker-io
+
+elasticsearch:
+ global:
+ imagePullSecrets:
+ - name: index-docker-io
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/base.yml b/generic/openshift/single-region/helm-values/base.yml
similarity index 57%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/base.yml
rename to generic/openshift/single-region/helm-values/base.yml
index 4953c48bd..45d45db35 100644
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/base.yml
+++ b/generic/openshift/single-region/helm-values/base.yml
@@ -26,4 +26,15 @@ webModeler:
restapi:
mail:
+ existingSecret: identity-secret-for-components # reference the smtp password
fromAddress: changeme@example.com # change this required value
+
+webModelerPostgresql:
+ # Will deploy a postgresql datbase for webModeler.
+ # If you enable webModeler, you either need to turn it true or use an external database
+ enabled: false
+ auth:
+ existingSecret: identity-secret-for-components
+ secretKeys:
+ adminPasswordKey: identity-webmodeler-postgres-admin-password
+ userPasswordKey: identity-webmodeler-postgres-user-password
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/connectors-route.yml b/generic/openshift/single-region/helm-values/connectors-route.yml
similarity index 100%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/connectors-route.yml
rename to generic/openshift/single-region/helm-values/connectors-route.yml
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/core-route.yml b/generic/openshift/single-region/helm-values/core-route.yml
similarity index 100%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/core-route.yml
rename to generic/openshift/single-region/helm-values/core-route.yml
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/domain.yml b/generic/openshift/single-region/helm-values/domain.yml
similarity index 70%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/domain.yml
rename to generic/openshift/single-region/helm-values/domain.yml
index 93e54f979..eeb83aa67 100644
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/domain.yml
+++ b/generic/openshift/single-region/helm-values/domain.yml
@@ -1,6 +1,5 @@
---
global:
-
# the ingress is manages directly by the Route manifests ../manifest/routes.yml
ingress:
enabled: true
@@ -15,31 +14,27 @@ global:
# enforce HSTS
haproxy.router.openshift.io/hsts_header: max-age=31536000; includeSubDomains;preload
-
identity:
+
auth:
publicIssuerUrl: https://${DOMAIN_NAME}/auth/realms/camunda-platform # replace this with a port of your choice when you will do port forwarding
optimize:
redirectUrl: https://${DOMAIN_NAME}/optimize # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
webModeler:
redirectUrl: https://${DOMAIN_NAME}/modeler
console:
- redirectUrl: https://${DOMAIN_NAME}/console
- existingSecret:
- name: identity-secret-for-components
+ redirectUrl: https://${DOMAIN_NAME}/
+ existingSecret: identity-secret-for-components
core:
- redirectUrl: http://${DOMAIN_NAME}
- existingSecret:
- name: identity-secret-for-components
+ redirectUrl: http://${DOMAIN_NAME}/core
+ existingSecret: identity-secret-for-components
connectors:
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
admin:
- existingSecret:
- name: identity-secret-for-components
+ enabled: true
+ existingSecret: identity-secret-for-components
identity:
contextPath: /identity
@@ -51,11 +46,11 @@ webModeler:
optimize:
contextPath: /optimize
-core:
+console:
contextPath: /
-console:
- contextPath: /console
+core:
+ contextPath: /core
connectors:
contextPath: /connectors
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-domain.yml b/generic/openshift/single-region/helm-values/no-domain.yml
similarity index 62%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-domain.yml
rename to generic/openshift/single-region/helm-values/no-domain.yml
index cee21da60..2e493725b 100644
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-domain.yml
+++ b/generic/openshift/single-region/helm-values/no-domain.yml
@@ -7,24 +7,20 @@ global:
optimize:
redirectUrl: http://localhost:8083 # replace this with a port of your choice when you will do port forwarding
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
webModeler:
redirectUrl: http://localhost:8084
console:
redirectUrl: http://localhost:8085
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
core:
redirectUrl: http://localhost:8082
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
connectors:
- existingSecret:
- name: identity-secret-for-components
+ existingSecret: identity-secret-for-components
admin:
- existingSecret:
- name: identity-secret-for-components
+ enabled: true
+ existingSecret: identity-secret-for-components
identity:
fullURL: http://localhost:8080 # replace this with a port of your choice when you will do port forwarding
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-scc.yml b/generic/openshift/single-region/helm-values/no-scc.yml
similarity index 100%
rename from aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-scc.yml
rename to generic/openshift/single-region/helm-values/no-scc.yml
diff --git a/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/scc.yml b/generic/openshift/single-region/helm-values/scc.yml
similarity index 100%
rename from aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/scc.yml
rename to generic/openshift/single-region/helm-values/scc.yml
diff --git a/generic/openshift/single-region/procedure/.shellcheckrc b/generic/openshift/single-region/procedure/.shellcheckrc
new file mode 100644
index 000000000..40e02e71c
--- /dev/null
+++ b/generic/openshift/single-region/procedure/.shellcheckrc
@@ -0,0 +1 @@
+disable=SC2155
diff --git a/generic/openshift/single-region/procedure/assemble-envsubst-values.sh b/generic/openshift/single-region/procedure/assemble-envsubst-values.sh
new file mode 100755
index 000000000..b25f42d9c
--- /dev/null
+++ b/generic/openshift/single-region/procedure/assemble-envsubst-values.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Generate the final values
+envsubst < values.yml > generated-values.yml
+
+echo "Final generated-values.yml result"
+cat generated-values.yml
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/chart-env.sh b/generic/openshift/single-region/procedure/chart-env.sh
old mode 100644
new mode 100755
similarity index 58%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/chart-env.sh
rename to generic/openshift/single-region/procedure/chart-env.sh
index c2ec1d239..4922e133a
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/chart-env.sh
+++ b/generic/openshift/single-region/procedure/chart-env.sh
@@ -1,3 +1,6 @@
+#!/bin/bash
+
# The Camunda 8 Helm Chart version
# renovate: datasource=helm depName=camunda-platform versioning=regex:^12(\.(?\d+))?(\.(?\d+))?$ registryUrl=https://helm.camunda.io
-export CAMUNDA_HELM_CHART_VERSION="0.0.0-snapshot-alpha"
+export CAMUNDA_HELM_CHART_VERSION="0.0.0-snapshot-alpha-8.8"
+# TODO: [release-duty] before the release, update this!
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/create-identity-secret.sh b/generic/openshift/single-region/procedure/create-identity-secret.sh
old mode 100644
new mode 100755
similarity index 71%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/create-identity-secret.sh
rename to generic/openshift/single-region/procedure/create-identity-secret.sh
index 2ae8ab49b..6e1f09e5f
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/create-identity-secret.sh
+++ b/generic/openshift/single-region/procedure/create-identity-secret.sh
@@ -1,4 +1,6 @@
-kubectl create secret generic identity-secret-for-components \
+#!/bin/bash
+
+oc create secret generic identity-secret-for-components \
--namespace camunda \
--from-literal=identity-connectors-client-token="$CONNECTORS_SECRET" \
--from-literal=identity-console-client-token="$CONSOLE_SECRET" \
@@ -8,4 +10,6 @@ kubectl create secret generic identity-secret-for-components \
--from-literal=identity-firstuser-password="$FIRST_USER_PASSWORD" \
--from-literal=identity-keycloak-postgresql-user-password="$KEYCLOAK_PG_USER_PASSWORD" \
--from-literal=identity-keycloak-postgresql-admin-password="$KEYCLOAK_PG_ADMIN_PASSWORD" \
+ --from-literal=identity-webmodeler-postgres-admin-password="$WEBMODELER_PG_ADMIN_PASSWORD" \
+ --from-literal=identity-webmodeler-postgres-user-password="$WEBMODELER_PG_USER_PASSWORD" \
--from-literal=smtp-password=""
diff --git a/generic/openshift/single-region/procedure/enable-ingress-http2.sh b/generic/openshift/single-region/procedure/enable-ingress-http2.sh
new file mode 100755
index 000000000..44567af6a
--- /dev/null
+++ b/generic/openshift/single-region/procedure/enable-ingress-http2.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+oc -n openshift-ingress-operator annotate "ingresscontrollers/$OC_INGRESS_CONTROLLER_NAME" ingress.operator.openshift.io/default-enable-http2=true
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/generate-passwords.sh b/generic/openshift/single-region/procedure/generate-passwords.sh
old mode 100644
new mode 100755
similarity index 75%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/generate-passwords.sh
rename to generic/openshift/single-region/procedure/generate-passwords.sh
index 263c18fa3..66e0534ad
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/generate-passwords.sh
+++ b/generic/openshift/single-region/procedure/generate-passwords.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
export CONNECTORS_SECRET="$(openssl rand -hex 16)"
export CONSOLE_SECRET="$(openssl rand -hex 16)"
export OPTIMIZE_SECRET="$(openssl rand -hex 16)"
@@ -6,3 +8,5 @@ export ADMIN_PASSWORD="$(openssl rand -hex 16)"
export FIRST_USER_PASSWORD="$(openssl rand -hex 16)"
export KEYCLOAK_PG_USER_PASSWORD="$(openssl rand -hex 16)"
export KEYCLOAK_PG_ADMIN_PASSWORD="$(openssl rand -hex 16)"
+export WEBMODELER_PG_ADMIN_PASSWORD="$(openssl rand -hex 16)"
+export WEBMODELER_PG_USER_PASSWORD="$(openssl rand -hex 16)"
diff --git a/generic/openshift/single-region/procedure/get-ingress-http2-status.sh b/generic/openshift/single-region/procedure/get-ingress-http2-status.sh
new file mode 100755
index 000000000..f7358133d
--- /dev/null
+++ b/generic/openshift/single-region/procedure/get-ingress-http2-status.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# List your IngressControllers
+oc -n openshift-ingress-operator get ingresscontrollers
+
+# Replace OC_INGRESS_CONTROLLER_NAME with your IngressController name from the previous command
+export OC_INGRESS_CONTROLLER_NAME=default
+oc -n openshift-ingress-operator get "ingresscontrollers/$OC_INGRESS_CONTROLLER_NAME" -o json | jq '.metadata.annotations."ingress.operator.openshift.io/default-enable-http2"'
diff --git a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/install-chart.sh b/generic/openshift/single-region/procedure/install-chart.sh
old mode 100644
new mode 100755
similarity index 84%
rename from aws/rosa-hcp/camunda-versions/8.7/procedure/install/install-chart.sh
rename to generic/openshift/single-region/procedure/install-chart.sh
index f4c272088..9914184a6
--- a/aws/rosa-hcp/camunda-versions/8.7/procedure/install/install-chart.sh
+++ b/generic/openshift/single-region/procedure/install-chart.sh
@@ -1,13 +1,15 @@
-# helm upgrade --install \
-# camunda camunda-platform \
-# --repo https://helm.camunda.io \
-# --version "$CAMUNDA_HELM_CHART_VERSION" \
-# --namespace camunda \
-# -f generated-values.yml
+#!/bin/bash
-# TODO: before the release, update this!
+# TODO: [release-duty] before the release, update this!
helm upgrade --install \
camunda oci://ghcr.io/camunda/helm/camunda-platform \
--version "$CAMUNDA_HELM_CHART_VERSION" --namespace camunda \
-f generated-values.yml
+
+# helm upgrade --install \
+# camunda camunda-platform \
+# --repo https://helm.camunda.io \
+# --version "$CAMUNDA_HELM_CHART_VERSION" \
+# --namespace camunda \
+# -f generated-values.yml
diff --git a/generic/openshift/single-region/procedure/setup-application-domain.sh b/generic/openshift/single-region/procedure/setup-application-domain.sh
new file mode 100755
index 000000000..8b78ff57e
--- /dev/null
+++ b/generic/openshift/single-region/procedure/setup-application-domain.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+OPENSHIFT_APPS_DOMAIN="$(oc get ingresses.config.openshift.io cluster -o jsonpath='{.spec.domain}')"
+export DOMAIN_NAME="camunda.$OPENSHIFT_APPS_DOMAIN"
+
+echo "Camunda 8 will be reachable from $DOMAIN_NAME"
diff --git a/justfile b/justfile
index 4918430fb..66328ff19 100644
--- a/justfile
+++ b/justfile
@@ -1,6 +1,8 @@
regenerate-aws-ec2-golden-file:
#!/bin/bash
+ set -euxo pipefail
+
cd {{justfile_directory()}}/aws/ec2/terraform
cp {{justfile_directory()}}/aws/ec2/test/fixtures/provider_override.tf .
export AWS_REGION="eu-west-2"
@@ -11,15 +13,76 @@ regenerate-aws-ec2-golden-file:
rm -rf tfplan tfplan.json
rm -rf provider_override.tf
+regenerate-golden-file module_dir backend_bucket_region backend_bucket_name backend_bucket_key relative_output_path="./test/golden/":
+ #!/bin/bash
+ set -euxo pipefail
+
+ cd {{ justfile_directory() }}/{{ module_dir }}
+ terraform init \
+ -backend-config="bucket={{ backend_bucket_name }}" \
+ -backend-config="key={{ backend_bucket_key }}" \
+ -backend-config="region={{ backend_bucket_region }}"
+
+ # we always use the same region and fake rhcs token to have a pre-defined output
+ RHCS_TOKEN="" AWS_REGION="eu-west-2" terraform plan -out=tfplan
+ terraform show -json tfplan | jq > tfplan.json
+ rm -f tfplan
+ mkdir -p {{ relative_output_path }}
+
+ # redact sensible/specific values
+ sed 's/"arn:[^\"]*\"/"ARN_REDACTED"/g' tfplan.json > tfplan-redacted.json
+ rm -f tfplan.json
+ sed -E 's/"arn:([^"\\]|\\.)*"/"ARN_REDACTED"/g; s/'\''arn:([^'\''\\]|\\.)*'\''/'\''ARN_REDACTED'\''/g' tfplan-redacted.json > tfplan.json
+ rm -f tfplan-redacted.json
+
+ # bring order
+ jq --sort-keys '.planned_values.root_module' tfplan.json > tfplan-redacted.json
+ rm -f tfplan.json
+
+ # transform the tfoutput to deal only with keys to keep simple ordering
+ jq 'def transform:
+ if type == "array" then
+ . as $arr |
+ if $arr | length > 0 and (.[0] | type == "object" and has("address")) then
+ # Transform array elements into an object with address as the key
+ map({ (.address): with_entries(select(.key != "address")) | map_values(transform) }) | add
+ else
+ .
+ end
+ elif type == "object" then
+ if has("address") and .address != null then
+ { (.address): with_entries(select(.key != "address")) | map_values(transform) }
+ elif has("resources") then
+ { "resources": map(transform) | add }
+ elif has("child_modules") then
+ { "child_modules": map(transform) | add }
+ else
+ with_entries(.value |= transform)
+ end
+ else
+ .
+ end;
+ transform' tfplan-redacted.json > tfplan.json
+ rm -f tfplan-redacted.json
+
+ # final sort
+ jq --sort-keys '.' tfplan.json > {{ relative_output_path }}tfplan-golden.json
+ rm -f tfplan.json
+
+ if grep -E -q '\b@camunda\.[A-Za-z]{2,}\b' {{ relative_output_path }}tfplan-golden.json; then
+ echo "ERROR: The golden file {{ relative_output_path }}tfplan-golden.json file contains user-specific information."
+ exit 1
+ fi
+
# Install all the tooling
install-tooling: asdf-install
# Install asdf plugins
-asdf-plugins:
+asdf-plugins tool_versions_dir="./":
#!/bin/sh
echo "Installing asdf plugins"
- for plugin in $(awk '{print $1}' .tool-versions); do \
+ for plugin in $(awk '{print $1}' {{tool_versions_dir}}.tool-versions); do \
asdf plugin add ${plugin} 2>&1 | (grep "already added" && exit 0); \
done
@@ -29,3 +92,13 @@ asdf-plugins:
# Install tools using asdf
asdf-install: asdf-plugins
asdf install
+
+# Install tooling of the current dir (https://just.systems/man/en/working-directory.html)
+[no-cd]
+install-tooling-current-dir: asdf-install-current-dir
+
+[no-cd]
+asdf-install-current-dir:
+ #!/bin/sh
+ just asdf-plugins "$(pwd)/"
+ asdf install
diff --git a/lychee-links.toml b/lychee-links.toml
new file mode 100644
index 000000000..8fe7ed815
--- /dev/null
+++ b/lychee-links.toml
@@ -0,0 +1,19 @@
+# Cache the results of Lychee if ran locally in order to minimise the chance of rate limiting
+cache = true
+# Ignore all private link (such as localhost) to avoid errors
+exclude_all_private = true
+# HTTP status code: 429 (Too Many Requests) will also be treated as a valid link if Lychee gets rate limited
+accept = ["200", "403"]
+# retry
+max_retries = 6
+retry_wait_time = 10
+max_concurrency = 3
+
+# Exclude all unsupported versioned_docs
+exclude_path = [
+]
+
+# Explicitly exclude some URLs
+exclude = [
+ "^file:",
+]