From d58030ee69250ae0d1710fcf208ee6843cf0c448 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 23 Jun 2025 14:32:28 +0100 Subject: [PATCH 01/60] v4 helm chart --- hosting/k8s/helm/.gitignore | 4 + hosting/k8s/helm/Chart.yaml | 18 + hosting/k8s/helm/README.md | 554 ++++++++++++++++++ hosting/k8s/helm/templates/NOTES.txt | 82 +++ hosting/k8s/helm/templates/_helpers.tpl | 212 +++++++ hosting/k8s/helm/templates/clickhouse.yaml | 138 +++++ hosting/k8s/helm/templates/configmap.yaml | 15 + hosting/k8s/helm/templates/electric.yaml | 80 +++ hosting/k8s/helm/templates/ingress.yaml | 60 ++ hosting/k8s/helm/templates/minio.yaml | 123 ++++ hosting/k8s/helm/templates/postgresql.yaml | 120 ++++ hosting/k8s/helm/templates/redis.yaml | 109 ++++ hosting/k8s/helm/templates/registry.yaml | 127 ++++ hosting/k8s/helm/templates/secrets.yaml | 46 ++ .../k8s/helm/templates/servicemonitor.yaml | 55 ++ hosting/k8s/helm/templates/supervisor.yaml | 254 ++++++++ .../helm/templates/tests/test-clickhouse.yaml | 21 + .../helm/templates/tests/test-electric.yaml | 21 + .../k8s/helm/templates/tests/test-minio.yaml | 21 + .../helm/templates/tests/test-postgresql.yaml | 21 + .../k8s/helm/templates/tests/test-redis.yaml | 21 + .../helm/templates/tests/test-supervisor.yaml | 21 + .../k8s/helm/templates/tests/test-webapp.yaml | 19 + hosting/k8s/helm/templates/webapp.yaml | 219 +++++++ .../k8s/helm/values-production-example.yaml | 141 +++++ hosting/k8s/helm/values.yaml | 451 ++++++++++++++ hosting/k8s/setup-kind.sh | 75 +++ 27 files changed, 3028 insertions(+) create mode 100644 hosting/k8s/helm/.gitignore create mode 100644 hosting/k8s/helm/Chart.yaml create mode 100644 hosting/k8s/helm/README.md create mode 100644 hosting/k8s/helm/templates/NOTES.txt create mode 100644 hosting/k8s/helm/templates/_helpers.tpl create mode 100644 hosting/k8s/helm/templates/clickhouse.yaml create mode 100644 hosting/k8s/helm/templates/configmap.yaml create mode 100644 hosting/k8s/helm/templates/electric.yaml create mode 100644 hosting/k8s/helm/templates/ingress.yaml create mode 100644 hosting/k8s/helm/templates/minio.yaml create mode 100644 hosting/k8s/helm/templates/postgresql.yaml create mode 100644 hosting/k8s/helm/templates/redis.yaml create mode 100644 hosting/k8s/helm/templates/registry.yaml create mode 100644 hosting/k8s/helm/templates/secrets.yaml create mode 100644 hosting/k8s/helm/templates/servicemonitor.yaml create mode 100644 hosting/k8s/helm/templates/supervisor.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-clickhouse.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-electric.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-minio.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-postgresql.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-redis.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-supervisor.yaml create mode 100644 hosting/k8s/helm/templates/tests/test-webapp.yaml create mode 100644 hosting/k8s/helm/templates/webapp.yaml create mode 100644 hosting/k8s/helm/values-production-example.yaml create mode 100644 hosting/k8s/helm/values.yaml create mode 100755 hosting/k8s/setup-kind.sh diff --git a/hosting/k8s/helm/.gitignore b/hosting/k8s/helm/.gitignore new file mode 100644 index 0000000000..bf09f97207 --- /dev/null +++ b/hosting/k8s/helm/.gitignore @@ -0,0 +1,4 @@ +values-test.yaml +values-*.yaml +!values.yaml +!values-production-example.yaml \ No newline at end of file diff --git a/hosting/k8s/helm/Chart.yaml b/hosting/k8s/helm/Chart.yaml new file mode 100644 index 0000000000..978f4ad363 --- /dev/null +++ b/hosting/k8s/helm/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: trigger +description: The official Trigger.dev Helm chart +type: application +version: 4.0.0-beta.1 +appVersion: v4.0.0-v4-beta.21 +home: https://trigger.dev +sources: + - https://github.com/triggerdotdev/trigger.dev +keywords: + - trigger + - workflow + - background-jobs + - job-scheduler + - task-queue + - automation +annotations: + category: Development diff --git a/hosting/k8s/helm/README.md b/hosting/k8s/helm/README.md new file mode 100644 index 0000000000..e2acc740e8 --- /dev/null +++ b/hosting/k8s/helm/README.md @@ -0,0 +1,554 @@ +# Trigger.dev v4 Helm Chart + +This Helm chart deploys Trigger.dev v4 self-hosting stack to Kubernetes. + +## Quick Start + +### Installation + +```bash +# Deploy with default values (testing/development only) +helm install trigger . + +# Deploy to specific namespace +helm install trigger . -n trigger --create-namespace + +# Deploy with custom values for production +helm install trigger . -f values-production.yaml -n trigger --create-namespace +``` + +### Upgrading + +```bash +# Upgrade existing release +helm upgrade trigger . + +# Upgrade with new values +helm upgrade trigger . -f values-production.yaml +``` + +### Access the dashboard + +```bash +kubectl port-forward svc/trigger-webapp 3040:3030 --address 0.0.0.0 +``` + +Dashboard: http://localhost:3040/ + +### Deploying your tasks + +```bash +# The --push arg is required when testing locally +npx trigger.dev@v4-beta deploy --push +``` + +## ⚠️ Security Requirements + +### Secrets Configuration + +**IMPORTANT**: The default secrets are for **TESTING ONLY** and must be changed for production. + +#### Required Secrets + +All secrets must be exactly **32 hexadecimal characters** (16 bytes): + +- `sessionSecret` - User authentication sessions +- `magicLinkSecret` - Passwordless login tokens +- `encryptionKey` - Sensitive data encryption +- `managedWorkerSecret` - Worker authentication + +#### Generate Production Secrets + +```bash +for i in {1..4}; do openssl rand -hex 16; done +``` + +#### Configure Production Secrets + +```yaml +# values-production.yaml +secrets: + sessionSecret: "your-generated-secret-1" + magicLinkSecret: "your-generated-secret-2" + encryptionKey: "your-generated-secret-3" + managedWorkerSecret: "your-generated-secret-4" + objectStore: + accessKeyId: "your-s3-access-key" + secretAccessKey: "your-s3-secret-key" +``` + +## Architecture + +This chart deploys the following components: + +### Core Services +- **Webapp** - Main Trigger.dev application (port 3030) +- **PostgreSQL** - Primary database with logical replication +- **Redis** - Cache and job queue +- **Electric** - Real-time sync service (ElectricSQL) + +### Worker Services +- **Supervisor** - Kubernetes worker orchestrator for executing runs + +### Supporting Services +- **ClickHouse** - Analytics database +- **MinIO** - S3-compatible object storage +- **Registry** - Private Docker registry for deployed code (EXPERIMENTAL - disabled by default) + +## Configuration + +### Basic Configuration + +```yaml +# Application URLs +config: + appOrigin: "https://trigger.example.com" + loginOrigin: "https://trigger.example.com" + apiOrigin: "https://trigger.example.com" + +# Bootstrap mode (auto-creates worker group) +config: + bootstrap: + enabled: true # Enable for combined setups + workerGroupName: "bootstrap" +``` + +### External Services + +Use external managed services instead of bundled components: + +```yaml +# External PostgreSQL +postgresql: + enabled: false + external: true + externalConnection: + host: "your-postgres.rds.amazonaws.com" + port: 5432 + database: "trigger" + username: "trigger_user" + password: "your-password" + +# External Redis +redis: + enabled: false + external: true + externalConnection: + host: "your-redis.cache.amazonaws.com" + port: 6379 + password: "your-password" + +# External Docker Registry (e.g., Kind local registry) +registry: + enabled: true + external: true + externalConnection: + host: "localhost" + port: 5001 + username: "" + password: "" +``` + +### Ingress Configuration + +```yaml +ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + hosts: + - host: trigger.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: trigger-tls + hosts: + - trigger.example.com +``` + +### Resource Configuration + +```yaml +resources: + webapp: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 1000m + memory: 2Gi + +postgresql: + primary: + resources: + limits: + cpu: 1000m + memory: 2Gi +``` + +## Deployment Modes + +### Testing/Development +- Use default values +- Single replica +- Lower resource limits +- Bootstrap mode enabled + +### Production +- Custom secrets (required) +- Multiple replicas with anti-affinity +- Production resource limits +- External services recommended +- Ingress with TLS +- Persistent storage + +## Persistence + +All services support persistent storage and allow you to control the storage class globally or per service: + +```yaml +global: + storageClass: "fast-ssd" # Default for all services + +postgresql: + primary: + persistence: + enabled: true + size: 10Gi + storageClass: "postgres-nvme" # Optional: override for PostgreSQL + +redis: + master: + persistence: + enabled: true + size: 5Gi + storageClass: "redis-ssd" # Optional: override for Redis + +clickhouse: + persistence: + enabled: true + size: 10Gi + storageClass: "analytics-hdd" # Optional: override for ClickHouse + +minio: + persistence: + enabled: true + size: 10Gi + storageClass: "objectstore-ssd" # Optional: override for MinIO + +registry: + persistence: + enabled: true + size: 10Gi + storageClass: "registry-ssd" # Optional: override for Registry + +# Shared persistent volume for worker token file +persistence: + shared: + enabled: true + size: 5Mi +``` + +- If a per-service `storageClass` is set, it overrides the global value for that service only. +- If neither is set, the cluster's default StorageClass is used. + +## Monitoring + +### Health Checks + +Health checks are configured for all services: +- HTTP endpoints for web services +- Database connection tests +- Readiness and liveness probes + +### Prometheus Integration + +ServiceMonitors are available for webapp and supervisor services: + +```yaml +webapp: + serviceMonitor: + enabled: true + interval: "30s" + path: "/metrics" + labels: + release: prometheus-stack + +supervisor: + serviceMonitor: + enabled: true + interval: "30s" + path: "/metrics" + labels: + release: prometheus-stack +``` + +## Operations + +### Force Pod Restart + +When you need to force all pods to restart (e.g., to pick up updated secrets or config): + +```bash +# Force restart using timestamp annotation (Helm-native approach) +helm upgrade . --set-string podAnnotations.restartedAt="$(date +%s)" + +# Example +helm upgrade trigger . --set-string podAnnotations.restartedAt="$(date +%s)" +``` + +This approach: +- ✅ Uses Helm's built-in annotation mechanism +- ✅ Safe - doesn't recreate immutable resources like PVCs +- ✅ Targeted - only restarts pods that need updates +- ✅ Trackable - increments Helm revision number + +### Configuration Updates + +After changing secrets or ConfigMaps in your values file: + +```bash +# 1. Upgrade with new values +helm upgrade trigger . -f values-production.yaml + +# 2. Force pod restart to pick up changes +helm upgrade trigger . -f values-production.yaml \ + --set-string podAnnotations.restartedAt="$(date +%s)" +``` + +## Troubleshooting + +### Check Pod Status +```bash +kubectl get pods -l app.kubernetes.io/name=trigger.dev +``` + +### View Logs +```bash +# Webapp logs +kubectl logs -l app.kubernetes.io/component=webapp + +# Database logs +kubectl logs -l app.kubernetes.io/component=postgresql +``` + +### Run Tests +```bash +helm test trigger.dev +``` + +## Testing + +### Validate Deployment + +```bash +# Check Helm template syntax +helm template trigger.dev . --dry-run > /dev/null && echo "Template validation successful" + +# Test webapp health endpoint (requires port forwarding) +curl -s -o /dev/null -w "%{http_code}" http://localhost:3040/healthcheck || echo "Connection failed" + +# Port forward to access webapp locally +kubectl port-forward svc/trigger.dev-webapp 3040:3030 --address 0.0.0.0 +``` + +### Common Issues + +1. **Secrets errors**: Ensure all secrets are exactly 32 hex characters +2. **Database connection**: Check PostgreSQL is ready before webapp starts +3. **Resource limits**: Increase limits for ClickHouse in constrained environments +4. **Config not applying**: Use the pod restart technique above to force config reload +5. **Image pull errors**: When testing locally, deploy with `npx trigger.dev@v4-beta deploy --push` + +## Examples + +See `values-production-example.yaml` for a complete production configuration example. + +## Version Management + +### Understanding Versions + +The Helm chart uses three types of versions: + +1. **Chart Version** (`Chart.yaml:version`) - Helm chart packaging version +2. **App Version** (`Chart.yaml:appVersion`) - Trigger.dev application version +3. **Component Versions** (`values.yaml`) - Individual service versions (Electric, ClickHouse, etc.) + +### Release Process + +#### For Chart Maintainers + +1. **Update Chart Version** for chart changes: + ```bash + # Edit Chart.yaml + version: 4.1.0 # Increment for chart changes (semver) + ``` + +2. **Update App Version** when Trigger.dev releases new version: + ```bash + # Edit Chart.yaml + appVersion: "v4.1.0" # Match Trigger.dev release (v-prefixed image tag) + ``` + +3. **Release via GitHub**: + ```bash + # Tag and push + git tag helm-v4.1.0 + git push origin helm-v4.1.0 + + # GitHub Actions will automatically build and publish to GHCR + ``` + +#### For Users + +```bash +# Install specific chart version +helm upgrade --install trigger \ + oci://ghcr.io/triggerdotdev/charts/trigger.dev \ + --version 4.1.0 + +# Install latest chart version +helm upgrade --install trigger \ + oci://ghcr.io/triggerdotdev/charts/trigger.dev + +# Override app version (advanced) +helm upgrade --install trigger . \ + --set webapp.image.tag=v4.0.1 +``` + +## Production Readiness Checklist + +### 🔒 Security (REQUIRED) + +- [ ] **Generate unique secrets** (never use defaults): + ```bash + # Generate 4 secrets + for i in {1..4}; do openssl rand -hex 16; done + ``` + +- [ ] **Configure security contexts**: + ```yaml + webapp: + podSecurityContext: + fsGroup: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: [ALL] + ``` + +- [ ] **Enable network policies** (if supported by cluster) +- [ ] **Configure proper RBAC** for supervisor +- [ ] **Use TLS ingress** with cert-manager + +### 📊 Resource Management (REQUIRED) + +- [ ] **Set resource limits and requests** - for example: + ```yaml + webapp: + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 1000m + memory: 2Gi + + postgresql: + primary: + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + + redis: + master: + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 512Mi + + clickhouse: + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + + supervisor: + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 512Mi + ``` + +- [ ] **Configure persistent storage for all services** - for example: + ```yaml + global: + storageClass: "fast-nvme" # Default for all services + + postgresql: + persistence: + primary: + size: 500Gi + + redis: + persistence: + master: + size: 20Gi + + clickhouse: + persistence: + size: 100Gi + + minio: + persistence: + size: 200Gi + + registry: + persistence: + size: 100Gi + ``` + +### 🏗️ High Availability (RECOMMENDED) + +- [ ] **Multiple replicas** with pod anti-affinity +- [ ] **Pod disruption budgets** +- [ ] **External managed services** (RDS, ElastiCache, etc.) +- [ ] **Multi-AZ storage classes** +- [ ] **Backup strategies** for databases + +### 📈 Monitoring (RECOMMENDED) + +- [ ] **Enable ServiceMonitors** for Prometheus +- [ ] **Configure alerting** for critical services +- [ ] **Set up log aggregation** +- [ ] **Monitor resource usage** and adjust limits + +### 🚀 Performance (OPTIONAL) + +- [ ] **Horizontal Pod Autoscaler** for webapp +- [ ] **Vertical Pod Autoscaler** for data services +- [ ] **Node affinity** for data services +- [ ] **Separate storage classes** for different workloads + +## Support + +- Documentation: https://trigger.dev/docs/self-hosting +- GitHub Issues: https://github.com/triggerdotdev/trigger.dev/issues \ No newline at end of file diff --git a/hosting/k8s/helm/templates/NOTES.txt b/hosting/k8s/helm/templates/NOTES.txt new file mode 100644 index 0000000000..9637a43b29 --- /dev/null +++ b/hosting/k8s/helm/templates/NOTES.txt @@ -0,0 +1,82 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +🔐 SECURITY WARNING: +{{- if or (eq .Values.secrets.sessionSecret "2818143646516f6fffd707b36f334bbb") (eq .Values.secrets.magicLinkSecret "44da78b7bbb0dfe709cf38931d25dcdd") (eq .Values.secrets.encryptionKey "f686147ab967943ebbe9ed3b496e465a") (eq .Values.secrets.managedWorkerSecret "447c29678f9eaf289e9c4b70d3dd8a7f") }} + You are using DEFAULT SECRETS which are NOT SECURE for production! + + For production deployments, generate new secrets: + 1. Run: openssl rand -hex 16 (repeat for each secret) + 2. Override in your values.yaml: + secrets: + sessionSecret: "your-new-32-char-hex-secret" + magicLinkSecret: "your-new-32-char-hex-secret" + encryptionKey: "your-new-32-char-hex-secret" + managedWorkerSecret: "your-new-32-char-hex-secret" +{{- else }} + Custom secrets detected - good for production deployment! +{{- end }} + +To get started: + +1. Wait for all pods to be ready: + kubectl get pods --namespace {{ .Release.Namespace }} -w + +2. Access the webapp: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.webapp.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "trigger-v4.fullname" . }}-webapp) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.webapp.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "trigger-v4.fullname" . }}-webapp' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "trigger-v4.fullname" . }}-webapp --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.webapp.service.port }} +{{- else if contains "ClusterIP" .Values.webapp.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "{{ include "trigger-v4.selectorLabels" . }},app.kubernetes.io/component=webapp" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8030:$CONTAINER_PORT + + The application will be available at http://localhost:8030 +{{- end }} + +For more information about the deployment, run: + kubectl --namespace {{ .Release.Namespace }} get all -l "{{ include "trigger-v4.selectorLabels" . }}" + +{{- if .Values.webapp.bootstrap.enabled }} + +Bootstrap Mode is enabled: +- Worker group "{{ .Values.webapp.bootstrap.workerGroupName }}" will be automatically created +- Worker token will be available at {{ .Values.webapp.bootstrap.workerTokenPath }} +{{- end }} + +Configuration: +{{- if .Values.postgresql.external }} +- Using external PostgreSQL at {{ .Values.postgresql.externalConnection.host }}:{{ .Values.postgresql.externalConnection.port }} +{{- else }} +- Using internal PostgreSQL +{{- end }} +{{- if .Values.redis.external }} +- Using external Redis at {{ .Values.redis.externalConnection.host }}:{{ .Values.redis.externalConnection.port }} +{{- else }} +- Using internal Redis +{{- end }} +{{- if .Values.electric.enabled }} +- Electric sync service enabled +{{- end }} +{{- if .Values.clickhouse.enabled }} +- ClickHouse analytics database enabled +{{- end }} +{{- if .Values.minio.enabled }} +- MinIO object storage enabled +{{- end }} +{{- if .Values.registry.enabled }} +- Docker registry enabled +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/_helpers.tpl b/hosting/k8s/helm/templates/_helpers.tpl new file mode 100644 index 0000000000..074782cc1a --- /dev/null +++ b/hosting/k8s/helm/templates/_helpers.tpl @@ -0,0 +1,212 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "trigger-v4.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "trigger-v4.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "trigger-v4.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "trigger-v4.labels" -}} +helm.sh/chart: {{ include "trigger-v4.chart" . }} +{{ include "trigger-v4.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "trigger-v4.selectorLabels" -}} +app.kubernetes.io/name: {{ include "trigger-v4.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Component labels +*/}} +{{- define "trigger-v4.componentLabels" -}} +{{ include "trigger-v4.labels" . }} +app.kubernetes.io/component: {{ .component }} +{{- end }} + +{{/* +Component selector labels +*/}} +{{- define "trigger-v4.componentSelectorLabels" -}} +{{ include "trigger-v4.selectorLabels" . }} +app.kubernetes.io/component: {{ .component }} +{{- end }} + + +{{/* +Get the full image name for webapp +*/}} +{{- define "trigger-v4.image" -}} +{{- $registry := .Values.global.imageRegistry | default .Values.webapp.image.registry -}} +{{- $repository := .Values.webapp.image.repository -}} +{{- $tag := .Values.webapp.image.tag | default .Chart.AppVersion -}} +{{- if $registry }} +{{- printf "%s/%s:%s" $registry $repository $tag }} +{{- else }} +{{- printf "%s:%s" $repository $tag }} +{{- end }} +{{- end }} + +{{/* +Get the full image name for supervisor +*/}} +{{- define "trigger-v4.supervisor.image" -}} +{{- $registry := .Values.global.imageRegistry | default .Values.supervisor.image.registry -}} +{{- $repository := .Values.supervisor.image.repository -}} +{{- $tag := .Values.supervisor.image.tag | default .Chart.AppVersion -}} +{{- if $registry }} +{{- printf "%s/%s:%s" $registry $repository $tag }} +{{- else }} +{{- printf "%s:%s" $repository $tag }} +{{- end }} +{{- end }} + +{{/* +PostgreSQL connection string for internal PostgreSQL +*/}} +{{- define "trigger-v4.postgresql.connectionString" -}} +{{- if .Values.postgresql.external -}} +postgresql://{{ .Values.postgresql.externalConnection.username }}:{{ .Values.postgresql.externalConnection.password }}@{{ .Values.postgresql.externalConnection.host }}:{{ .Values.postgresql.externalConnection.port }}/{{ .Values.postgresql.externalConnection.database }}?schema=public&sslmode=disable +{{- else -}} +postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ include "trigger-v4.fullname" . }}-postgresql:{{ .Values.postgresql.primary.service.ports.postgresql }}/{{ .Values.postgresql.auth.database }}?schema=public&sslmode=disable +{{- end -}} +{{- end }} + +{{/* +Redis connection details +*/}} +{{- define "trigger-v4.redis.host" -}} +{{- if .Values.redis.external -}} +{{ .Values.redis.externalConnection.host }} +{{- else -}} +{{ include "trigger-v4.fullname" . }}-redis-master +{{- end -}} +{{- end }} + +{{- define "trigger-v4.redis.port" -}} +{{- if .Values.redis.external -}} +{{ .Values.redis.externalConnection.port }} +{{- else -}} +{{ .Values.redis.master.service.ports.redis }} +{{- end -}} +{{- end }} + +{{/* +Electric service URL +*/}} +{{- define "trigger-v4.electric.url" -}} +{{- if .Values.electric.enabled -}} +http://{{ include "trigger-v4.fullname" . }}-electric:{{ .Values.electric.service.port }} +{{- else -}} +{{ .Values.config.electricOrigin }} +{{- end -}} +{{- end }} + +{{/* +MinIO connection details +*/}} +{{- define "trigger-v4.minio.url" -}} +{{- if .Values.minio.enabled -}} +http://{{ include "trigger-v4.fullname" . }}-minio:{{ .Values.minio.service.ports.api }} +{{- else -}} +"" +{{- end -}} +{{- end }} + +{{/* +Registry connection details +*/}} +{{- define "trigger-v4.registry.host" -}} +{{- if .Values.registry.external -}} +{{ .Values.registry.externalConnection.host }}:{{ .Values.registry.externalConnection.port }} +{{- else if .Values.registry.enabled -}} +{{ include "trigger-v4.fullname" . }}-registry:{{ .Values.registry.service.port }} +{{- else -}} +localhost:5000 +{{- end -}} +{{- end }} + +{{/* +Supervisor connection details +*/}} +{{- define "trigger-v4.supervisor.url" -}} +{{- if .Values.supervisor.enabled -}} +http://{{ include "trigger-v4.fullname" . }}-supervisor:{{ .Values.supervisor.service.ports.workload }} +{{- else -}} +"" +{{- end -}} +{{- end }} + +{{/* +Create the name of the supervisor service account to use +*/}} +{{- define "trigger-v4.supervisorServiceAccountName" -}} +{{- if .Values.supervisor.serviceAccount.create }} +{{- default (printf "%s-supervisor" (include "trigger-v4.fullname" .)) .Values.supervisor.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.supervisor.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create the name of the supervisor cluster role to use +*/}} +{{- define "trigger-v4.supervisorClusterRoleName" -}} +{{- default (printf "%s-supervisor" (include "trigger-v4.fullname" .)) .Values.supervisor.rbac.clusterRole.name }} +{{- end }} + +{{/* +Generate docker config for image pull secret +*/}} +{{- define "trigger-v4.imagePullSecret" }} +{{- if and .Values.registry.enabled .Values.registry.auth.enabled }} +{{- $registryHost := include "trigger-v4.registry.host" . }} +{{- $username := .Values.registry.auth.username }} +{{- $password := .Values.registry.auth.password }} +{{- $auth := printf "%s:%s" $username $password | b64enc }} +{{- $config := dict "auths" (dict $registryHost (dict "username" $username "password" $password "auth" $auth)) }} +{{- $config | toJson }} +{{- else if and .Values.registry.external .Values.registry.externalConnection.auth.enabled }} +{{- $registryHost := .Values.registry.externalConnection.host }} +{{- $username := .Values.registry.externalConnection.auth.username }} +{{- $password := .Values.registry.externalConnection.auth.password }} +{{- $auth := printf "%s:%s" $username $password | b64enc }} +{{- $config := dict "auths" (dict $registryHost (dict "username" $username "password" $password "auth" $auth)) }} +{{- $config | toJson }} +{{- end }} +{{- end }} + diff --git a/hosting/k8s/helm/templates/clickhouse.yaml b/hosting/k8s/helm/templates/clickhouse.yaml new file mode 100644 index 0000000000..15282d389d --- /dev/null +++ b/hosting/k8s/helm/templates/clickhouse.yaml @@ -0,0 +1,138 @@ +{{- if .Values.clickhouse.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-clickhouse + labels: + {{- $component := "clickhouse" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.clickhouse.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: clickhouse + {{- with .Values.clickhouse.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.clickhouse.image.registry }}/{{ .Values.clickhouse.image.repository }}:{{ .Values.clickhouse.image.tag }}" + imagePullPolicy: {{ .Values.clickhouse.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.clickhouse.service.targetPort }} + protocol: TCP + env: + - name: CLICKHOUSE_ADMIN_USER + value: {{ .Values.clickhouse.auth.adminUser | quote }} + - name: CLICKHOUSE_ADMIN_PASSWORD + value: {{ .Values.clickhouse.auth.adminPassword | quote }} + {{- with .Values.clickhouse.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + exec: + command: + - clickhouse-client + - --host + - localhost + - --port + - "9000" + - --user + - {{ .Values.clickhouse.auth.adminUser }} + - --password + - {{ .Values.clickhouse.auth.adminPassword }} + - --query + - "SELECT 1" + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 15 + failureThreshold: 5 + readinessProbe: + exec: + command: + - clickhouse-client + - --host + - localhost + - --port + - "9000" + - --user + - {{ .Values.clickhouse.auth.adminUser }} + - --password + - {{ .Values.clickhouse.auth.adminPassword }} + - --query + - "SELECT 1" + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 15 + failureThreshold: 5 + resources: + {{- toYaml .Values.clickhouse.resources | nindent 12 }} + volumeMounts: + - name: clickhouse-data + mountPath: /bitnami/clickhouse + - name: clickhouse-config + mountPath: /bitnami/clickhouse/etc/config.d/override.xml + subPath: override.xml + readOnly: true + volumes: + - name: clickhouse-data + {{- if .Values.clickhouse.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-clickhouse + {{- else }} + emptyDir: {} + {{- end }} + - name: clickhouse-config + configMap: + name: {{ include "trigger-v4.fullname" . }}-clickhouse-config +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-clickhouse + labels: + {{- $component := "clickhouse" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.clickhouse.service.type }} + ports: + - name: http + port: {{ .Values.clickhouse.service.port }} + targetPort: http + protocol: TCP + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.clickhouse.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-clickhouse + labels: + {{- $component := "clickhouse" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.clickhouse.persistence.size }} + {{- $storageClass := .Values.clickhouse.persistence.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/configmap.yaml b/hosting/k8s/helm/templates/configmap.yaml new file mode 100644 index 0000000000..133fef6deb --- /dev/null +++ b/hosting/k8s/helm/templates/configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.clickhouse.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "trigger-v4.fullname" . }}-clickhouse-config + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +data: + override.xml: | + + + warning + + +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/electric.yaml b/hosting/k8s/helm/templates/electric.yaml new file mode 100644 index 0000000000..e6a0ceee40 --- /dev/null +++ b/hosting/k8s/helm/templates/electric.yaml @@ -0,0 +1,80 @@ +{{- if .Values.electric.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-electric + labels: + {{- $component := "electric" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.electric.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: electric + {{- with .Values.electric.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.electric.image.registry }}/{{ .Values.electric.image.repository }}:{{ .Values.electric.image.tag }}" + imagePullPolicy: {{ .Values.electric.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.electric.service.targetPort }} + protocol: TCP + env: + - name: DATABASE_URL + value: {{ include "trigger-v4.postgresql.connectionString" . | quote }} + - name: ELECTRIC_INSECURE + value: {{ .Values.electric.config.insecure | quote }} + - name: ELECTRIC_USAGE_REPORTING + value: {{ .Values.electric.config.usageReporting | quote }} + {{- with .Values.electric.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /v1/health + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /v1/health + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + resources: + {{- toYaml .Values.electric.resources | nindent 12 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-electric + labels: + {{- $component := "electric" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.electric.service.type }} + ports: + - port: {{ .Values.electric.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/ingress.yaml b/hosting/k8s/helm/templates/ingress.yaml new file mode 100644 index 0000000000..8227f15ad4 --- /dev/null +++ b/hosting/k8s/helm/templates/ingress.yaml @@ -0,0 +1,60 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "trigger-v4.fullname" . -}} +{{- $svcPort := .Values.webapp.service.port -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.ingress.certManager.enabled }} + cert-manager.io/cluster-issuer: {{ .Values.ingress.certManager.clusterIssuer | quote }} + {{- end }} + {{- if .Values.ingress.externalDns.enabled }} + external-dns.alpha.kubernetes.io/hostname: {{ .Values.ingress.externalDns.hostname | quote }} + external-dns.alpha.kubernetes.io/ttl: {{ .Values.ingress.externalDns.ttl | quote }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- if .paths }} + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType | default "Prefix" }} + backend: + service: + name: {{ $fullName }}-webapp + port: + number: {{ $svcPort }} + {{- end }} + {{- else }} + - path: / + pathType: Prefix + backend: + service: + name: {{ $fullName }}-webapp + port: + number: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/minio.yaml b/hosting/k8s/helm/templates/minio.yaml new file mode 100644 index 0000000000..2940a3bc8c --- /dev/null +++ b/hosting/k8s/helm/templates/minio.yaml @@ -0,0 +1,123 @@ +{{- if .Values.minio.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-minio + labels: + {{- $component := "minio" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.minio.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: minio + {{- with .Values.minio.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.minio.image.registry }}/{{ .Values.minio.image.repository }}:{{ .Values.minio.image.tag }}" + imagePullPolicy: {{ .Values.minio.image.pullPolicy }} + args: + - server + - --console-address + - ":9001" + - /data + ports: + - name: api + containerPort: {{ .Values.minio.service.ports.api }} + protocol: TCP + - name: console + containerPort: {{ .Values.minio.service.ports.console }} + protocol: TCP + env: + - name: MINIO_ROOT_USER + value: {{ .Values.minio.auth.rootUser | quote }} + - name: MINIO_ROOT_PASSWORD + value: {{ .Values.minio.auth.rootPassword | quote }} + {{- with .Values.minio.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /minio/health/live + port: api + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 10 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /minio/health/live + port: api + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 10 + failureThreshold: 5 + resources: + {{- toYaml .Values.minio.resources | nindent 12 }} + volumeMounts: + - name: minio-data + mountPath: /data + volumes: + - name: minio-data + {{- if .Values.minio.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-minio + {{- else }} + emptyDir: {} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-minio + labels: + {{- $component := "minio" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.minio.service.type }} + ports: + - name: api + port: {{ .Values.minio.service.ports.api }} + targetPort: api + protocol: TCP + - name: console + port: {{ .Values.minio.service.ports.console }} + targetPort: console + protocol: TCP + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.minio.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-minio + labels: + {{- $component := "minio" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.minio.persistence.size }} + {{- $storageClass := .Values.minio.persistence.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/postgresql.yaml b/hosting/k8s/helm/templates/postgresql.yaml new file mode 100644 index 0000000000..2fdd55f102 --- /dev/null +++ b/hosting/k8s/helm/templates/postgresql.yaml @@ -0,0 +1,120 @@ +{{- if and .Values.postgresql.enabled (not .Values.postgresql.external) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-postgresql + labels: + {{- $component := "postgresql" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.postgresql.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: postgresql + {{- with .Values.postgresql.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.postgresql.image.registry }}/{{ .Values.postgresql.image.repository }}:{{ .Values.postgresql.image.tag }}" + imagePullPolicy: {{ .Values.postgresql.image.pullPolicy }} + ports: + - name: postgresql + containerPort: {{ .Values.postgresql.primary.service.ports.postgresql }} + protocol: TCP + env: + - name: POSTGRES_USER + value: {{ .Values.postgresql.auth.username | quote }} + - name: POSTGRES_PASSWORD + value: {{ .Values.postgresql.auth.password | quote }} + - name: POSTGRES_DB + value: {{ .Values.postgresql.auth.database | quote }} + {{- with .Values.postgresql.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - "-c" + - "wal_level=logical" + livenessProbe: + exec: + command: + - pg_isready + - -U + - {{ .Values.postgresql.auth.username }} + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + exec: + command: + - pg_isready + - -U + - {{ .Values.postgresql.auth.username }} + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + resources: + {{- toYaml .Values.postgresql.primary.resources | nindent 12 }} + volumeMounts: + - name: postgresql-data + mountPath: /var/lib/postgresql/data + volumes: + - name: postgresql-data + {{- if .Values.postgresql.primary.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-postgresql + {{- else }} + emptyDir: {} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-postgresql + labels: + {{- $component := "postgresql" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: postgresql + port: {{ .Values.postgresql.primary.service.ports.postgresql }} + targetPort: postgresql + protocol: TCP + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.postgresql.primary.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-postgresql + labels: + {{- $component := "postgresql" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.postgresql.primary.persistence.size }} + {{- $storageClass := .Values.postgresql.primary.persistence.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/redis.yaml b/hosting/k8s/helm/templates/redis.yaml new file mode 100644 index 0000000000..041ae03497 --- /dev/null +++ b/hosting/k8s/helm/templates/redis.yaml @@ -0,0 +1,109 @@ +{{- if and .Values.redis.enabled (not .Values.redis.external) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-redis-master + labels: + {{- $component := "redis" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.redis.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: redis + {{- with .Values.redis.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.redis.image.registry }}/{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + ports: + - name: redis + containerPort: {{ .Values.redis.master.service.ports.redis }} + protocol: TCP + {{- with .Values.redis.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + resources: + {{- toYaml .Values.redis.master.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: /data + volumes: + - name: redis-data + {{- if .Values.redis.master.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-redis + {{- else }} + emptyDir: {} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-redis-master + labels: + {{- $component := "redis" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: redis + port: {{ .Values.redis.master.service.ports.redis }} + targetPort: redis + protocol: TCP + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.redis.master.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-redis + labels: + {{- $component := "redis" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.redis.master.persistence.size }} + {{- $storageClass := .Values.redis.master.persistence.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/registry.yaml b/hosting/k8s/helm/templates/registry.yaml new file mode 100644 index 0000000000..c587daaf60 --- /dev/null +++ b/hosting/k8s/helm/templates/registry.yaml @@ -0,0 +1,127 @@ +{{- if and .Values.registry.enabled (not .Values.registry.external) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry + labels: + {{- $component := "registry" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.registry.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: registry + {{- with .Values.registry.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.registry.image.registry }}/{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}" + imagePullPolicy: {{ .Values.registry.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.registry.service.targetPort }} + protocol: TCP + {{- if or .Values.registry.auth.enabled .Values.registry.extraEnv }} + env: + {{- if .Values.registry.auth.enabled }} + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/auth/htpasswd" + {{- end }} + {{- with .Values.registry.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + volumeMounts: + - name: registry-data + mountPath: /var/lib/registry + {{- if .Values.registry.auth.enabled }} + - name: registry-auth + mountPath: /auth + readOnly: true + {{- end }} + volumes: + - name: registry-data + {{- if .Values.registry.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-registry + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.registry.auth.enabled }} + - name: registry-auth + secret: + secretName: {{ include "trigger-v4.fullname" . }}-registry-auth + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry + labels: + {{- $component := "registry" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.registry.service.type }} + ports: + - name: http + port: {{ .Values.registry.service.port }} + targetPort: http + protocol: TCP + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.registry.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry + labels: + {{- $component := "registry" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.registry.persistence.size }} + {{- $storageClass := .Values.registry.persistence.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/secrets.yaml b/hosting/k8s/helm/templates/secrets.yaml new file mode 100644 index 0000000000..49607116f4 --- /dev/null +++ b/hosting/k8s/helm/templates/secrets.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "trigger-v4.fullname" . }}-secrets + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +type: Opaque +data: + session-secret: {{ .Values.secrets.sessionSecret | b64enc | quote }} + magic-link-secret: {{ .Values.secrets.magicLinkSecret | b64enc | quote }} + encryption-key: {{ .Values.secrets.encryptionKey | b64enc | quote }} + managed-worker-secret: {{ .Values.secrets.managedWorkerSecret | b64enc | quote }} + object-store-access-key-id: {{ .Values.secrets.objectStore.accessKeyId | b64enc | quote }} + object-store-secret-access-key: {{ .Values.secrets.objectStore.secretAccessKey | b64enc | quote }} +--- +{{- if and .Values.registry.enabled .Values.registry.auth.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry-auth + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +type: Opaque +data: + htpasswd: {{ htpasswd .Values.registry.auth.username .Values.registry.auth.password | b64enc | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry-secret + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "trigger-v4.imagePullSecret" . | b64enc }} +{{- else if and .Values.registry.external .Values.registry.externalConnection.auth.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "trigger-v4.fullname" . }}-registry-secret + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "trigger-v4.imagePullSecret" . | b64enc }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/servicemonitor.yaml b/hosting/k8s/helm/templates/servicemonitor.yaml new file mode 100644 index 0000000000..15ca65f1cf --- /dev/null +++ b/hosting/k8s/helm/templates/servicemonitor.yaml @@ -0,0 +1,55 @@ +{{- if .Values.webapp.serviceMonitor.enabled }} +--- +# Webapp ServiceMonitor +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "trigger-v4.fullname" . }}-webapp + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + app.kubernetes.io/component: webapp + {{- with .Values.webapp.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "trigger-v4.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: webapp + endpoints: + - port: http + path: {{ .Values.webapp.serviceMonitor.path }} + interval: {{ .Values.webapp.serviceMonitor.interval }} + {{- with .Values.webapp.serviceMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} + +{{- if .Values.supervisor.serviceMonitor.enabled }} +--- +# Supervisor ServiceMonitor +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "trigger-v4.fullname" . }}-supervisor + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + app.kubernetes.io/component: supervisor + {{- with .Values.supervisor.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "trigger-v4.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: supervisor + endpoints: + - port: metrics + path: {{ .Values.supervisor.serviceMonitor.path }} + interval: {{ .Values.supervisor.serviceMonitor.interval }} + {{- with .Values.supervisor.serviceMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/supervisor.yaml b/hosting/k8s/helm/templates/supervisor.yaml new file mode 100644 index 0000000000..4f78ab7cfe --- /dev/null +++ b/hosting/k8s/helm/templates/supervisor.yaml @@ -0,0 +1,254 @@ +{{- if .Values.supervisor.enabled }} +{{- if .Values.supervisor.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "trigger-v4.supervisorServiceAccountName" . }} + labels: + {{- $component := "supervisor" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} + {{- with .Values.supervisor.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +--- +{{- if .Values.supervisor.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "trigger-v4.supervisorClusterRoleName" . }} + labels: + {{- $component := "supervisor" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["create", "delete", "deletecollection", "get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "trigger-v4.supervisorClusterRoleName" . }}-binding + labels: + {{- $component := "supervisor" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ include "trigger-v4.supervisorServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "trigger-v4.supervisorClusterRoleName" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-supervisor + labels: + {{- $component := "supervisor" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.supervisor.service.ports.metrics }}" + prometheus.io/path: "/metrics" + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "trigger-v4.supervisorServiceAccountName" . }} + {{- with .Values.supervisor.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + initContainers: + - name: init-shared + image: busybox:1.35 + command: ['sh', '-c', 'mkdir -p /home/node/shared && chown 1000:1000 /home/node/shared'] + securityContext: + runAsUser: 0 + volumeMounts: + - name: shared + mountPath: /home/node/shared + containers: + - name: supervisor + image: {{ include "trigger-v4.supervisor.image" . }} + imagePullPolicy: {{ .Values.supervisor.image.pullPolicy }} + ports: + - name: workload + containerPort: {{ .Values.supervisor.service.ports.workload }} + protocol: TCP + - name: metrics + containerPort: {{ .Values.supervisor.service.ports.metrics }} + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: workload + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: workload + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + resources: + {{- toYaml .Values.supervisor.resources | nindent 12 }} + env: + # Core configuration + - name: TRIGGER_API_URL + value: "http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}" + - name: TRIGGER_WORKER_TOKEN + value: "file:///home/node/shared/worker_token" + - name: MANAGED_WORKER_SECRET + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: managed-worker-secret + # Worker instance configuration + - name: TRIGGER_WORKER_INSTANCE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Kubernetes configuration + - name: KUBERNETES_NAMESPACE + value: {{ default .Release.Namespace .Values.supervisor.config.kubernetesNamespace | quote }} + - name: KUBERNETES_FORCE_ENABLED + value: {{ .Values.supervisor.config.kubernetesForceEnabled | quote }} + - name: KUBERNETES_WORKER_NODETYPE_LABEL + value: {{ .Values.supervisor.config.kubernetesWorkerNodetypeLabel | quote }} + {{- if or (and .Values.registry.enabled .Values.registry.auth.enabled) (and .Values.registry.external .Values.registry.externalConnection.auth.enabled) }} + - name: KUBERNETES_IMAGE_PULL_SECRETS + value: "{{ include "trigger-v4.fullname" . }}-registry-secret" + {{- end }} + - name: KUBERNETES_EPHEMERAL_STORAGE_SIZE_LIMIT + value: {{ default "10Gi" .Values.supervisor.config.kubernetesEphemeralStorageSizeLimit | quote }} + - name: KUBERNETES_EPHEMERAL_STORAGE_SIZE_REQUEST + value: {{ default "2Gi" .Values.supervisor.config.kubernetesEphemeralStorageSizeRequest | quote }} + # Pod cleaner configuration + - name: POD_CLEANER_ENABLED + value: {{ .Values.supervisor.config.podCleanerEnabled | quote }} + - name: POD_CLEANER_BATCH_SIZE + value: {{ .Values.supervisor.config.podCleanerBatchSize | quote }} + - name: POD_CLEANER_INTERVAL_MS + value: {{ .Values.supervisor.config.podCleanerIntervalMs | quote }} + # Failed pod handler + - name: FAILED_POD_HANDLER_ENABLED + value: {{ .Values.supervisor.config.failedPodHandlerEnabled | quote }} + - name: FAILED_POD_HANDLER_RECONNECT_INTERVAL_MS + value: {{ .Values.supervisor.config.failedPodHandlerReconnectIntervalMs | quote }} + # Workload API configuration + - name: TRIGGER_WORKLOAD_API_PROTOCOL + value: {{ .Values.supervisor.config.workloadApiProtocol | quote }} + - name: TRIGGER_WORKLOAD_API_DOMAIN + value: "{{ include "trigger-v4.fullname" . }}-supervisor.{{ .Release.Namespace }}.svc.cluster.local" + - name: TRIGGER_WORKLOAD_API_PORT_EXTERNAL + value: {{ .Values.supervisor.config.workloadApiPortExternal | quote }} + - name: TRIGGER_WORKLOAD_API_PORT_INTERNAL + value: {{ .Values.supervisor.config.workloadApiPortInternal | quote }} + - name: TRIGGER_WORKLOAD_API_HOST_INTERNAL + value: {{ .Values.supervisor.config.workloadApiHostInternal | quote }} + - name: TRIGGER_WORKLOAD_API_ENABLED + value: {{ .Values.supervisor.config.workloadApiEnabled | quote }} + # Dequeue configuration + - name: TRIGGER_DEQUEUE_ENABLED + value: {{ .Values.supervisor.config.dequeueEnabled | quote }} + - name: TRIGGER_DEQUEUE_INTERVAL_MS + value: {{ .Values.supervisor.config.dequeueIntervalMs | quote }} + - name: TRIGGER_DEQUEUE_MAX_RUN_COUNT + value: {{ .Values.supervisor.config.dequeueMaxRunCount | quote }} + - name: TRIGGER_DEQUEUE_IDLE_INTERVAL_MS + value: {{ .Values.supervisor.config.dequeueIdleIntervalMs | quote }} + # Heartbeat configuration + - name: RUNNER_HEARTBEAT_INTERVAL_SECONDS + value: {{ .Values.supervisor.config.runnerHeartbeatIntervalSeconds | quote }} + - name: RUNNER_SNAPSHOT_POLL_INTERVAL_SECONDS + value: {{ .Values.supervisor.config.runnerSnapshotPollIntervalSeconds | quote }} + # Metrics configuration + - name: METRICS_ENABLED + value: {{ .Values.supervisor.config.metricsEnabled | quote }} + - name: METRICS_COLLECT_DEFAULTS + value: {{ .Values.supervisor.config.metricsCollectDefaults | quote }} + - name: METRICS_HOST + value: {{ .Values.supervisor.config.metricsHost | quote }} + - name: METRICS_PORT + value: {{ .Values.supervisor.config.metricsPort | quote }} + # Debug + - name: DEBUG + value: {{ .Values.supervisor.config.debug | quote }} + # OTEL + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}/otel" + {{- with .Values.supervisor.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /home/node/shared + {{- with .Values.supervisor.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: shared + {{- if .Values.persistence.shared.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-shared + {{- else }} + emptyDir: {} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-supervisor + labels: + {{- $component := "supervisor" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.supervisor.service.type }} + ports: + - port: {{ .Values.supervisor.service.ports.workload }} + targetPort: workload + protocol: TCP + name: workload + - port: {{ .Values.supervisor.service.ports.metrics }} + targetPort: metrics + protocol: TCP + name: metrics + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-clickhouse.yaml b/hosting/k8s/helm/templates/tests/test-clickhouse.yaml new file mode 100644 index 0000000000..8f44f4ff4d --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-clickhouse.yaml @@ -0,0 +1,21 @@ +{{- if .Values.clickhouse.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-clickhouse" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-clickhouse + image: curlimages/curl:latest + command: ['sh', '-c'] + args: + - | + echo "Testing ClickHouse HTTP interface..." + curl -f "http://{{ .Values.clickhouse.auth.adminUser }}:{{ .Values.clickhouse.auth.adminPassword }}@{{ include "trigger-v4.fullname" . }}-clickhouse:{{ .Values.clickhouse.service.port }}/ping" + echo "ClickHouse test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-electric.yaml b/hosting/k8s/helm/templates/tests/test-electric.yaml new file mode 100644 index 0000000000..8cdbcdb8d9 --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-electric.yaml @@ -0,0 +1,21 @@ +{{- if .Values.electric.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-electric" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-electric + image: curlimages/curl:latest + command: ['sh', '-c'] + args: + - | + echo "Testing Electric health endpoint..." + curl -f http://{{ include "trigger-v4.fullname" . }}-electric:{{ .Values.electric.service.port }}/api/status + echo "Electric test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-minio.yaml b/hosting/k8s/helm/templates/tests/test-minio.yaml new file mode 100644 index 0000000000..b97d5649b3 --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-minio.yaml @@ -0,0 +1,21 @@ +{{- if .Values.minio.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-minio" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-minio + image: curlimages/curl:latest + command: ['sh', '-c'] + args: + - | + echo "Testing MinIO health endpoint..." + curl -f http://{{ include "trigger-v4.fullname" . }}-minio:{{ .Values.minio.service.ports.api }}/minio/health/live + echo "MinIO test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-postgresql.yaml b/hosting/k8s/helm/templates/tests/test-postgresql.yaml new file mode 100644 index 0000000000..4518ba4122 --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-postgresql.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.postgresql.enabled (not .Values.postgresql.external) }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-postgresql" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-postgresql + image: postgres:{{ .Values.postgresql.image.tag }} + command: ['sh', '-c'] + args: + - | + echo "Testing PostgreSQL connection..." + pg_isready -h {{ include "trigger-v4.fullname" . }}-postgresql -p {{ .Values.postgresql.primary.service.ports.postgresql }} -U {{ .Values.postgresql.auth.username }} + echo "PostgreSQL test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-redis.yaml b/hosting/k8s/helm/templates/tests/test-redis.yaml new file mode 100644 index 0000000000..4ba2c46c34 --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-redis.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.redis.enabled (not .Values.redis.external) }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-redis" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-redis + image: redis:{{ .Values.redis.image.tag }} + command: ['sh', '-c'] + args: + - | + echo "Testing Redis connection..." + redis-cli -h {{ include "trigger-v4.fullname" . }}-redis-master -p {{ .Values.redis.master.service.ports.redis }} ping + echo "Redis test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-supervisor.yaml b/hosting/k8s/helm/templates/tests/test-supervisor.yaml new file mode 100644 index 0000000000..9a51dfaebe --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-supervisor.yaml @@ -0,0 +1,21 @@ +{{- if .Values.supervisor.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-supervisor" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-supervisor + image: curlimages/curl:latest + command: ['sh', '-c'] + args: + - | + echo "Testing Supervisor metrics endpoint..." + curl -f http://{{ include "trigger-v4.fullname" . }}-supervisor:{{ .Values.supervisor.service.ports.metrics }}/metrics + echo "Supervisor test completed successfully" +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/templates/tests/test-webapp.yaml b/hosting/k8s/helm/templates/tests/test-webapp.yaml new file mode 100644 index 0000000000..9455479897 --- /dev/null +++ b/hosting/k8s/helm/templates/tests/test-webapp.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "trigger-v4.fullname" . }}-test-webapp" + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + restartPolicy: Never + containers: + - name: test-webapp + image: curlimages/curl:latest + command: ['sh', '-c'] + args: + - | + echo "Testing webapp health endpoint..." + curl -f http://{{ include "trigger-v4.fullname" . }}-webapp:{{ .Values.webapp.service.port }}/healthcheck + echo "Webapp test completed successfully" \ No newline at end of file diff --git a/hosting/k8s/helm/templates/webapp.yaml b/hosting/k8s/helm/templates/webapp.yaml new file mode 100644 index 0000000000..71520b1977 --- /dev/null +++ b/hosting/k8s/helm/templates/webapp.yaml @@ -0,0 +1,219 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "trigger-v4.fullname" . }}-webapp + labels: + {{- $component := "webapp" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + replicas: {{ .Values.webapp.replicaCount }} + selector: + matchLabels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 6 }} + template: + metadata: + {{- with .Values.webapp.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 8 }} + spec: + {{- with .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.webapp.podSecurityContext | nindent 8 }} + initContainers: + - name: init-shared + image: busybox:1.35 + command: ['sh', '-c', 'mkdir -p /home/node/shared && chown 1000:1000 /home/node/shared'] + securityContext: + runAsUser: 0 + volumeMounts: + - name: shared + mountPath: /home/node/shared + containers: + - name: webapp + securityContext: + {{- toYaml .Values.webapp.securityContext | nindent 12 }} + image: {{ include "trigger-v4.image" . }} + imagePullPolicy: {{ .Values.webapp.image.pullPolicy }} + command: + - ./scripts/entrypoint.sh + ports: + - name: http + containerPort: {{ .Values.webapp.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: /healthcheck + port: http + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthcheck + port: http + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 5 + resources: + {{- toYaml .Values.webapp.resources | nindent 12 }} + env: + - name: APP_ORIGIN + value: {{ .Values.config.appOrigin | quote }} + - name: LOGIN_ORIGIN + value: {{ .Values.config.loginOrigin | quote }} + - name: API_ORIGIN + value: {{ .Values.config.apiOrigin | quote }} + - name: ELECTRIC_ORIGIN + value: {{ include "trigger-v4.electric.url" . | quote }} + - name: DATABASE_URL + value: {{ include "trigger-v4.postgresql.connectionString" . | quote }} + - name: DIRECT_URL + value: {{ include "trigger-v4.postgresql.connectionString" . | quote }} + - name: REDIS_HOST + value: {{ include "trigger-v4.redis.host" . | quote }} + - name: REDIS_PORT + value: {{ include "trigger-v4.redis.port" . | quote }} + - name: REDIS_TLS_DISABLED + value: "true" + - name: APP_LOG_LEVEL + value: {{ .Values.webapp.logLevel | quote }} + - name: DEV_OTEL_EXPORTER_OTLP_ENDPOINT + value: "{{ .Values.config.appOrigin }}/otel" + - name: DEPLOY_REGISTRY_HOST + value: {{ include "trigger-v4.registry.host" . | quote }} + - name: OBJECT_STORE_BASE_URL + value: {{ include "trigger-v4.minio.url" . | quote }} + - name: GRACEFUL_SHUTDOWN_TIMEOUT + value: {{ .Values.webapp.gracefulShutdownTimeout | quote }} + {{- if .Values.webapp.bootstrap.enabled }} + - name: TRIGGER_BOOTSTRAP_ENABLED + value: "1" + - name: TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME + value: {{ .Values.webapp.bootstrap.workerGroupName | quote }} + - name: TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH + value: {{ .Values.webapp.bootstrap.workerTokenPath | quote }} + {{- end }} + {{- if .Values.webapp.limits.taskPayloadOffloadThreshold }} + - name: TASK_PAYLOAD_OFFLOAD_THRESHOLD + value: {{ .Values.webapp.limits.taskPayloadOffloadThreshold | quote }} + {{- end }} + {{- if .Values.webapp.limits.taskPayloadMaximumSize }} + - name: TASK_PAYLOAD_MAXIMUM_SIZE + value: {{ .Values.webapp.limits.taskPayloadMaximumSize | quote }} + {{- end }} + {{- if .Values.webapp.limits.batchTaskPayloadMaximumSize }} + - name: BATCH_TASK_PAYLOAD_MAXIMUM_SIZE + value: {{ .Values.webapp.limits.batchTaskPayloadMaximumSize | quote }} + {{- end }} + {{- if .Values.webapp.limits.taskRunMetadataMaximumSize }} + - name: TASK_RUN_METADATA_MAXIMUM_SIZE + value: {{ .Values.webapp.limits.taskRunMetadataMaximumSize | quote }} + {{- end }} + {{- if .Values.webapp.limits.defaultEnvExecutionConcurrencyLimit }} + - name: DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT + value: {{ .Values.webapp.limits.defaultEnvExecutionConcurrencyLimit | quote }} + {{- end }} + {{- if .Values.webapp.limits.defaultOrgExecutionConcurrencyLimit }} + - name: DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT + value: {{ .Values.webapp.limits.defaultOrgExecutionConcurrencyLimit | quote }} + {{- end }} + - name: SESSION_SECRET + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: session-secret + - name: MAGIC_LINK_SECRET + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: magic-link-secret + - name: ENCRYPTION_KEY + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: encryption-key + - name: MANAGED_WORKER_SECRET + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: managed-worker-secret + - name: OBJECT_STORE_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: object-store-access-key-id + - name: OBJECT_STORE_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "trigger-v4.fullname" . }}-secrets + key: object-store-secret-access-key + {{- with .Values.webapp.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /home/node/shared + volumes: + - name: shared + {{- if .Values.persistence.shared.enabled }} + persistentVolumeClaim: + claimName: {{ include "trigger-v4.fullname" . }}-shared + {{- else }} + emptyDir: {} + {{- end }} + {{- with .Values.webapp.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webapp.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webapp.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "trigger-v4.fullname" . }}-webapp + labels: + {{- $component := "webapp" }} + {{- include "trigger-v4.componentLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +spec: + type: {{ .Values.webapp.service.type }} + ports: + - port: {{ .Values.webapp.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "trigger-v4.componentSelectorLabels" (dict "Chart" .Chart "Release" .Release "Values" .Values "component" $component) | nindent 4 }} +--- +{{- if .Values.persistence.shared.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "trigger-v4.fullname" . }}-shared + labels: + {{- include "trigger-v4.labels" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.persistence.shared.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.shared.size }} + {{- $storageClass := .Values.persistence.shared.storageClass | default .Values.global.storageClass }} + {{- if $storageClass }} + storageClassName: {{ $storageClass | quote }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/hosting/k8s/helm/values-production-example.yaml b/hosting/k8s/helm/values-production-example.yaml new file mode 100644 index 0000000000..5b652537c5 --- /dev/null +++ b/hosting/k8s/helm/values-production-example.yaml @@ -0,0 +1,141 @@ +# Production values example for Trigger.dev v4 Helm chart +# Copy this file and customize for your production deployment + +# REQUIRED: Generate your own secrets using: openssl rand -hex 16 +secrets: + sessionSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_001" + magicLinkSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_002" + encryptionKey: "YOUR_32_CHAR_HEX_SECRET_HERE_003" + managedWorkerSecret: "YOUR_32_CHAR_HEX_SECRET_HERE_004" + # Object store credentials (customize for your setup) + objectStore: + accessKeyId: "your-access-key" + secretAccessKey: "your-secret-key" + +# Production configuration +config: + appOrigin: "https://trigger.example.com" + loginOrigin: "https://trigger.example.com" + apiOrigin: "https://trigger.example.com" + +# Production ingress +ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + hosts: + - host: trigger.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: trigger-tls + hosts: + - trigger.example.com + +# Production webapp configuration +webapp: + bootstrap: + enabled: false # Usually disabled in production + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 1000m + memory: 2Gi + +# Production PostgreSQL (or use external) +postgresql: + primary: + persistence: + enabled: true + size: 100Gi + storageClass: "fast-ssd" + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + +# Production Redis (or use external) +redis: + master: + persistence: + enabled: true + size: 20Gi + storageClass: "fast-ssd" + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 512Mi + +# Production ClickHouse +clickhouse: + persistence: + enabled: true + size: 100Gi + storageClass: "fast-ssd" + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + +# Production MinIO (or use external S3) +minio: + auth: + rootUser: "admin" + rootPassword: "your-strong-minio-password" + persistence: + enabled: true + size: 500Gi + storageClass: "standard" + +# Production Registry +registry: + auth: + username: "registry-user" + password: "your-strong-registry-password" + persistence: + enabled: true + size: 100Gi + storageClass: "standard" + +# Production Supervisor (Kubernetes worker orchestrator) +supervisor: + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 512Mi +# Example: Use external PostgreSQL instead +# postgresql: +# enabled: false +# external: true +# externalConnection: +# host: "your-postgres-host.rds.amazonaws.com" +# port: 5432 +# database: "trigger" +# username: "trigger_user" +# password: "your-db-password" + +# Example: Use external Redis instead +# redis: +# enabled: false +# external: true +# externalConnection: +# host: "your-redis-cluster.cache.amazonaws.com" +# port: 6379 +# password: "your-redis-password" diff --git a/hosting/k8s/helm/values.yaml b/hosting/k8s/helm/values.yaml new file mode 100644 index 0000000000..7ad14bc654 --- /dev/null +++ b/hosting/k8s/helm/values.yaml @@ -0,0 +1,451 @@ +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + +nameOverride: "" +fullnameOverride: "" + +ingress: + enabled: false + className: "traefik" + annotations: {} + certManager: + enabled: false + clusterIssuer: "letsencrypt-prod" + externalDns: + enabled: false + hostname: "" + ttl: "300" + hosts: + - host: trigger.local + paths: + - path: / + pathType: Prefix + tls: + [] + # - secretName: trigger-tls + # hosts: + # - trigger.local + +# Webapp configuration +webapp: + image: + registry: ghcr.io + repository: triggerdotdev/trigger.dev + tag: "" # Defaults to Chart.appVersion when empty + pullPolicy: IfNotPresent + + replicaCount: 1 + + service: + type: ClusterIP + port: 3030 + targetPort: 3000 + + podAnnotations: {} + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + nodeSelector: {} + tolerations: [] + affinity: {} + + logLevel: "info" + gracefulShutdownTimeout: 1000 + + # Bootstrap configuration + bootstrap: + enabled: true + workerGroupName: "bootstrap" + workerTokenPath: "/home/node/shared/worker_token" + + # Limits + limits: + taskPayloadOffloadThreshold: 524288 # 512KB + taskPayloadMaximumSize: 3145728 # 3MB + batchTaskPayloadMaximumSize: 1000000 # 1MB + taskRunMetadataMaximumSize: 262144 # 256KB + defaultEnvExecutionConcurrencyLimit: 100 + defaultOrgExecutionConcurrencyLimit: 300 + + # Resources + resources: + {} + # Example resource configuration: + # limits: + # cpu: 1000m + # memory: 2Gi + # requests: + # cpu: 500m + # memory: 1Gi + + # Extra environment variables for webapp + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + # - name: SECRET_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret + # key: secret-key + + # ServiceMonitor for Prometheus monitoring + serviceMonitor: + enabled: false + interval: "30s" + path: "/metrics" + labels: {} + basicAuth: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Shared application configuration (used by multiple services) +config: + appOrigin: "http://localhost:3040" + loginOrigin: "http://localhost:3040" + apiOrigin: "http://localhost:3040" + electricOrigin: "http://electric:3000" + +# Secrets configuration +# IMPORTANT: The default values below are for TESTING ONLY and should NOT be used in production +# For production deployments: +# 1. Generate new secrets using: openssl rand -hex 16 +# 2. Override these values in your values.yaml or use external secret management +# 3. Each secret must be exactly 32 hex characters (16 bytes) +secrets: + # Session secret for user authentication (32 hex chars) + sessionSecret: "2818143646516f6fffd707b36f334bbb" + # Magic link secret for passwordless login (32 hex chars) + magicLinkSecret: "44da78b7bbb0dfe709cf38931d25dcdd" + # Encryption key for sensitive data (32 hex chars) + encryptionKey: "f686147ab967943ebbe9ed3b496e465a" + # Worker secret for managed worker authentication (32 hex chars) + managedWorkerSecret: "447c29678f9eaf289e9c4b70d3dd8a7f" + # Object store credentials (change for production) + objectStore: + accessKeyId: "admin" + secretAccessKey: "very-safe-password" + +# PostgreSQL configuration +postgresql: + enabled: true + external: false + image: + registry: docker.io + repository: postgres + tag: "14" + pullPolicy: IfNotPresent + auth: + postgresPassword: "postgres" + username: "postgres" + password: "postgres" + database: "main" + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + primary: + persistence: + enabled: true + size: 10Gi + service: + ports: + postgresql: 5432 + resources: {} + configuration: | + wal_level = logical + # External PostgreSQL connection (when external: true) + externalConnection: + host: "" + port: 5432 + database: "" + username: "" + password: "" + # Extra environment variables for PostgreSQL + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# Redis configuration +redis: + enabled: true + external: false + image: + registry: docker.io + repository: redis + tag: "7" + pullPolicy: IfNotPresent + auth: + enabled: false + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + master: + persistence: + enabled: true + size: 5Gi + service: + ports: + redis: 6379 + resources: {} + # External Redis connection (when external: true) + externalConnection: + host: "" + port: 6379 + password: "" + # Extra environment variables for Redis + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# Electric configuration +electric: + enabled: true + image: + registry: docker.io + repository: electricsql/electric + tag: "1.0.13" + pullPolicy: IfNotPresent + config: + insecure: true + usageReporting: false + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 3000 + targetPort: 3000 + resources: {} + # Extra environment variables for Electric + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# ClickHouse configuration +clickhouse: + enabled: true + image: + registry: docker.io + repository: bitnami/clickhouse + tag: "latest" + pullPolicy: IfNotPresent + auth: + adminUser: "default" + adminPassword: "password" + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + persistence: + enabled: true + size: 10Gi + service: + type: ClusterIP + port: 9000 + targetPort: 9000 + resources: {} + # Extra environment variables for ClickHouse + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# MinIO configuration +minio: + enabled: true + image: + registry: docker.io + repository: minio/minio + tag: "latest" + pullPolicy: IfNotPresent + auth: + rootUser: "admin" + rootPassword: "very-safe-password" + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + persistence: + enabled: true + size: 10Gi + service: + type: ClusterIP + ports: + api: 9000 + console: 9001 + resources: {} + # Extra environment variables for MinIO + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# Docker Registry configuration +registry: + enabled: false # EXPERIMENTAL - requires proper TLS setup. Use external: true instead. + external: true + image: + registry: docker.io + repository: registry + tag: "2" + pullPolicy: IfNotPresent + auth: + enabled: true + username: "registry-user" + password: "very-secure-indeed" + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + persistence: + enabled: true + size: 10Gi + service: + type: ClusterIP + port: 5000 + targetPort: 5000 + resources: {} + # External Registry connection (when external: true) + externalConnection: + host: "localhost" + port: 5001 + auth: + enabled: false + username: "" + password: "" + # Extra environment variables for Registry + extraEnv: + [] + # - name: CUSTOM_VAR + # value: "custom-value" + +# Supervisor configuration (Kubernetes worker orchestrator) +supervisor: + enabled: true + image: + registry: ghcr.io + repository: triggerdotdev/supervisor + tag: "" # Defaults to Chart.appVersion when empty + pullPolicy: IfNotPresent + + # podSecurityContext: + # fsGroup: 1000 + + # securityContext: + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + ports: + workload: 3000 + metrics: 9088 + resources: {} + config: + # Kubernetes mode configuration + kubernetesForceEnabled: true + kubernetesNamespace: "" # Default: uses release namespace + kubernetesWorkerNodetypeLabel: "" # When set, runs will only be scheduled on nodes with "nodetype=