From 7577dac411f3de00cf04e5414247306ed0b86b0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 16 Jul 2025 04:30:41 +0000 Subject: [PATCH 1/3] run CI in local container --- .github/workflows/flow.yml | 1 + docker-compose-e2e.yml | 278 +++++++++++++++++++++++ flow/e2e/clickhouse/clickhouse.go | 17 +- flow/e2e/clickhouse/peer_flow_ch_test.go | 13 ++ flow/e2e/test_utils.go | 8 +- flow/workflows/cdc_flow.go | 2 +- local-ci.sh | 29 +++ 7 files changed, 343 insertions(+), 5 deletions(-) create mode 100644 docker-compose-e2e.yml create mode 100755 local-ci.sh diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index e1d9918ea8..896e69685a 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -408,6 +408,7 @@ jobs: CI_MONGO_URI: mongodb://localhost:27017/?replicaSet=rs0&authSource=admin CI_MONGO_USERNAME: "csuser" CI_MONGO_PASSWORD: "cspass" + CI_CLICKHOUSE_CLUSTER: "true" ENABLE_OTEL_METRICS: ${{ (matrix.db-version.pg == '16' || matrix.db-version.mysql == 'mysql-pos') && 'true' || 'false' }} OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: http://localhost:4317 OTEL_EXPORTER_OTLP_METRICS_PROTOCOL: grpc diff --git a/docker-compose-e2e.yml b/docker-compose-e2e.yml new file mode 100644 index 0000000000..a0fe19b74f --- /dev/null +++ b/docker-compose-e2e.yml @@ -0,0 +1,278 @@ +name: peerdb-quickstart-dev + +x-minio-config: &minio-config + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: _peerdb_minioadmin + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: _peerdb_minioadmin + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION: us-east-1 + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://host.docker.internal:9001 + PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdbbucket + +x-catalog-config: &catalog-config + PEERDB_CATALOG_HOST: catalog + PEERDB_CATALOG_PORT: 5432 + PEERDB_CATALOG_USER: postgres + PEERDB_CATALOG_PASSWORD: postgres + PEERDB_CATALOG_DATABASE: postgres + +x-flow-worker-env: &flow-worker-env + # For Temporal Cloud, this will look like: + # ..tmprl.cloud:7233 + TEMPORAL_HOST_PORT: temporal:7233 + PEERDB_TEMPORAL_NAMESPACE: default + # For the below 2 cert and key variables, + # paste as base64 encoded strings. + TEMPORAL_CLIENT_CERT: + TEMPORAL_CLIENT_KEY: + # For GCS, these will be your HMAC keys instead + # For more information: + # https://cloud.google.com/storage/docs/authentication/managing-hmackeys + AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-} + AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-} + # For GCS, set this to "auto" without the quotes + AWS_REGION: ${AWS_REGION:-} + # For GCS, set this as: https://storage.googleapis.com + AWS_ENDPOINT: ${AWS_ENDPOINT:-} + # enables worker profiling using Go's pprof + ENABLE_PROFILING: "true" + PPROF_PORT: "6060" + +services: + catalog: + container_name: catalog + image: postgres:17-alpine@sha256:fbe21607052bb5c298674f2fd8cf044a63aa3ddf50b81627f894f91f40f50bcb + command: -c config_file=/etc/postgresql.conf + ports: + - 9901:5432 + environment: + PGUSER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: postgres + POSTGRES_INITDB_ARGS: --locale=C.UTF-8 + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - pgdata:/var/lib/postgresql/data + - ./volumes/postgresql.conf:/etc/postgresql.conf + - ./volumes/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d + healthcheck: + test: ["CMD", "pg_isready", "-d", "postgres", "-U", "postgres"] + interval: 10s + timeout: 30s + retries: 5 + start_period: 60s + + temporal: + container_name: temporal + depends_on: + catalog: + condition: service_healthy + environment: + - DB=postgres12 + - DB_PORT=5432 + - POSTGRES_USER=postgres + - POSTGRES_PWD=postgres + - POSTGRES_SEEDS=catalog + - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml + image: temporalio/auto-setup:1.28@sha256:3ee84bf3ec5494f2be6ee0c1fea3b52684e50da3ddc5f997e6fa56ad340da9a8 + ports: + - 7233:7233 + volumes: + - ./volumes/temporal-dynamicconfig:/etc/temporal/config/dynamicconfig + labels: + kompose.volume.type: configMap + + temporal-admin-tools: + container_name: temporal-admin-tools + depends_on: + - temporal + environment: + - TEMPORAL_ADDRESS=temporal:7233 + - TEMPORAL_CLI_ADDRESS=temporal:7233 + - TEMPORAL_CLI_SHOW_STACKS=1 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 + stdin_open: true + tty: true + entrypoint: /etc/temporal/entrypoint.sh + restart: on-failure + healthcheck: + test: ["CMD", "tctl", "workflow", "list"] + interval: 1s + timeout: 5s + retries: 30 + volumes: + - ./scripts/mirror-name-search.sh:/etc/temporal/entrypoint.sh + + temporal-ui: + container_name: temporal-ui + depends_on: + - temporal + environment: + - TEMPORAL_ADDRESS=temporal:7233 + - TEMPORAL_CORS_ORIGINS=http://localhost:3000 + - TEMPORAL_CSRF_COOKIE_INSECURE=true + image: temporalio/ui:2.39.0@sha256:b768f87f18b59663a6749e98a2f7782c266e8e4e4749f92248e2ba41d6330d3f + ports: + - 8085:8080 + + flow-api: + container_name: flow_api + build: + context: . + dockerfile: stacks/flow.Dockerfile + target: flow-api + args: + PEERDB_VERSION_SHA_SHORT: ${PEERDB_VERSION_SHA_SHORT:-} + ports: + - 8112:8112 + - 8113:8113 + environment: + <<: [*catalog-config, *flow-worker-env, *minio-config] + PEERDB_ALLOWED_TARGETS: + PEERDB_CLICKHOUSE_ALLOWED_DOMAINS: + extra_hosts: + - "host.docker.internal:host-gateway" + depends_on: + temporal-admin-tools: + condition: service_healthy + + flow-snapshot-worker: + container_name: flow-snapshot-worker + build: + context: . + dockerfile: stacks/flow.Dockerfile + target: flow-snapshot-worker + environment: + <<: [*catalog-config, *flow-worker-env, *minio-config] + depends_on: + temporal-admin-tools: + condition: service_healthy + + flow-worker: + container_name: flow-worker + build: + context: . + dockerfile: stacks/flow.Dockerfile + target: flow-worker + environment: + <<: [*catalog-config, *flow-worker-env, *minio-config] + extra_hosts: + - "host.docker.internal:host-gateway" + depends_on: + temporal-admin-tools: + condition: service_healthy + + peerdb: + container_name: peerdb-server + stop_signal: SIGINT + build: + context: . + dockerfile: stacks/peerdb-server.Dockerfile + environment: + <<: *catalog-config + PEERDB_PASSWORD: peerdb + PEERDB_FLOW_SERVER_ADDRESS: grpc://flow_api:8112 + RUST_LOG: info + RUST_BACKTRACE: 1 + ports: + - 9900:9900 + depends_on: + catalog: + condition: service_healthy + + peerdb-ui: + container_name: peerdb-ui + build: + context: . + dockerfile: stacks/peerdb-ui.Dockerfile + ports: + - 3000:3000 + env_file: + - path: ./.env + required: false + environment: + <<: *catalog-config + DATABASE_URL: postgres://postgres:postgres@catalog:5432/postgres + PEERDB_FLOW_SERVER_HTTP: http://flow_api:8113 + PEERDB_PASSWORD: + NEXTAUTH_SECRET: __changeme__ + NEXTAUTH_URL: http://localhost:3000 + PEERDB_ALLOWED_TARGETS: + PEERDB_CLICKHOUSE_ALLOWED_DOMAINS: + PEERDB_EXPERIMENTAL_ENABLE_SCRIPTING: true + depends_on: + - flow-api + + minio: + image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 + volumes: + - minio-data:/data + ports: + - "9001:9000" + - "9002:36987" + environment: + <<: *minio-config + entrypoint: > + /bin/sh -c " + export MINIO_ROOT_USER=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID; + export MINIO_ROOT_PASSWORD=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY; + minio server /data --console-address=":36987" & + sleep 2; + /usr/bin/mc alias set myminiopeerdb http://minio:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD; + /usr/bin/mc mb myminiopeerdb/$$PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME; + wait + " + + mysql: + container_name: mysql + build: + context: . + dockerfile: stacks/mysql.Dockerfile + ports: + - 3306:3306 + environment: + MYSQL_ROOT_PASSWORD: example + volumes: + - mydata:/var/lib/mysql + extra_hosts: + - "host.docker.internal:host-gateway" + + clickhouse: + image: clickhouse/clickhouse-server + ports: + - 9000:9000 + environment: + CLICKHOUSE_PASSWORD: example + volumes: + - chdata:/var/lib/clickhouse + extra_hosts: + - "host.docker.internal:host-gateway" + + e2e: + image: golang:1.24-alpine@sha256:ddf52008bce1be455fe2b22d780b6693259aaf97b16383b6372f4b22dd33ad66 + environment: + <<: [*catalog-config, *minio-config] + TEMPORAL_HOST_PORT: temporal:7233 + ENABLE_OTEL_METRICS: 'false' + AWS_ACCESS_KEY_ID: _peerdb_minioadmin + AWS_SECRET_ACCESS_KEY: _peerdb_minioadmin + AWS_REGION: us-east-1 + AWS_ENDPOINT_URL_S3: http://host.docker.internal:9001 + CI_CLICKHOUSE_HOST: clickhouse + CI_CLICKHOUSE_PASSWORD: example + volumes: + - ./flow:/flow + entrypoint: > + /bin/sh -c " + apk add --no-cache gcc geos-dev musl-dev && + cd /flow && + go test -p 16 /flow/e2e/clickhouse/... -timeout 900s" + +volumes: + pgdata: + mydata: + chdata: + minio-data: + +networks: + default: + name: peerdb_network diff --git a/flow/e2e/clickhouse/clickhouse.go b/flow/e2e/clickhouse/clickhouse.go index 833d510cc2..2b1d443638 100644 --- a/flow/e2e/clickhouse/clickhouse.go +++ b/flow/e2e/clickhouse/clickhouse.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "os" "reflect" "strings" "testing" @@ -65,6 +66,10 @@ func (s ClickHouseSuite) Suffix() string { } func (s ClickHouseSuite) Peer() *protos.Peer { + host := os.Getenv("CI_CLICKHOUSE_HOST") + if host == "" { + host = "localhost" + } dbname := "e2e_test_" + s.suffix if s.cluster { ret := &protos.Peer{ @@ -72,9 +77,10 @@ func (s ClickHouseSuite) Peer() *protos.Peer { Type: protos.DBType_CLICKHOUSE, Config: &protos.Peer_ClickhouseConfig{ ClickhouseConfig: &protos.ClickhouseConfig{ - Host: "localhost", + Host: host, Port: 9001, Database: dbname, + Password: os.Getenv("CI_CLICKHOUSE_PASSWORD"), DisableTls: true, S3: s.s3Helper.S3Config, Cluster: "cicluster", @@ -90,14 +96,19 @@ func (s ClickHouseSuite) Peer() *protos.Peer { } func (s ClickHouseSuite) PeerForDatabase(dbname string) *protos.Peer { + host := os.Getenv("CI_CLICKHOUSE_HOST") + if host == "" { + host = "localhost" + } ret := &protos.Peer{ Name: e2e.AddSuffix(s, dbname), Type: protos.DBType_CLICKHOUSE, Config: &protos.Peer_ClickhouseConfig{ ClickhouseConfig: &protos.ClickhouseConfig{ - Host: "localhost", + Host: host, Port: 9000, Database: dbname, + Password: os.Getenv("CI_CLICKHOUSE_PASSWORD"), DisableTls: true, S3: s.s3Helper.S3Config, }, @@ -375,7 +386,7 @@ func SetupSuite[TSource e2e.SuiteSource]( t.Helper() source, suffix, err := setupSource(t) - require.NoError(t, err, "failed to setup postgres") + require.NoError(t, err, "failed to setup source") s3Helper, err := e2e_s3.NewS3TestHelper(t.Context(), e2e_s3.Minio) require.NoError(t, err, "failed to setup S3") diff --git a/flow/e2e/clickhouse/peer_flow_ch_test.go b/flow/e2e/clickhouse/peer_flow_ch_test.go index d8ff52d8a2..17a3fb531e 100644 --- a/flow/e2e/clickhouse/peer_flow_ch_test.go +++ b/flow/e2e/clickhouse/peer_flow_ch_test.go @@ -4,6 +4,7 @@ import ( "embed" "fmt" "math/big" + "os" "reflect" "regexp" "strconv" @@ -41,6 +42,10 @@ func TestPeerFlowE2ETestSuitePG_CH(t *testing.T) { } func TestPeerFlowE2ETestSuiteMySQL_CH(t *testing.T) { + if os.Getenv("CI_MYSQL_VERSION") == "" { + t.Skip() + } + e2eshared.RunSuite(t, SetupSuite(t, false, func(t *testing.T) (*e2e.MySqlSource, string, error) { t.Helper() suffix := "mych_" + strings.ToLower(shared.RandomString(8)) @@ -50,6 +55,10 @@ func TestPeerFlowE2ETestSuiteMySQL_CH(t *testing.T) { } func TestPeerFlowE2ETestSuitePG_CH_Cluster(t *testing.T) { + if os.Getenv("CI_CLICKHOUSE_CLUTER") == "" { + t.Skip() + } + e2eshared.RunSuite(t, SetupSuite(t, true, func(t *testing.T) (*e2e.PostgresSource, string, error) { t.Helper() suffix := "pgchcl_" + strings.ToLower(shared.RandomString(8)) @@ -59,6 +68,10 @@ func TestPeerFlowE2ETestSuitePG_CH_Cluster(t *testing.T) { } func TestPeerFlowE2ETestSuiteMySQL_CH_Cluster(t *testing.T) { + if os.Getenv("CI_CLICKHOUSE_CLUTER") == "" || os.Getenv("CI_MYSQL_VERSION") == "" { + t.Skip() + } + e2eshared.RunSuite(t, SetupSuite(t, true, func(t *testing.T) (*e2e.MySqlSource, string, error) { t.Helper() suffix := "mychcl_" + strings.ToLower(shared.RandomString(8)) diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index ad6b721c52..6902f9c690 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "log/slog" + "os" "slices" "strings" "testing" @@ -600,8 +601,13 @@ func NewTemporalClient(t *testing.T) client.Client { ), )) + hostport := os.Getenv("TEMPORAL_HOST_PORT") + if hostport == "" { + hostport = "localhost:7233" + } + tc, err := client.Dial(client.Options{ - HostPort: "localhost:7233", + HostPort: hostport, Logger: logger, }) if err != nil { diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 7cd3b417d7..9d907034cc 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -780,7 +780,7 @@ func CDCFlowWorkflow( "panic in sync flow", slog.Any("error", panicErr.Error()), slog.String("stack", panicErr.StackTrace()), - slog.Any("sleepFor", sleepFor), + slog.Duration("sleepFor", sleepFor), ) } else { // cannot use shared.IsSQLStateError because temporal serialize/deserialize diff --git a/local-ci.sh b/local-ci.sh new file mode 100755 index 0000000000..c766005f78 --- /dev/null +++ b/local-ci.sh @@ -0,0 +1,29 @@ +#!/bin/sh +set -Eeu + +DOCKER="docker" +EXTRA_ARGS="--no-attach temporal --no-attach temporal-ui" +PODMAN_ARGS="" + +if test -n "${USE_PODMAN:=}" +then + # 0 is found, checking for not found so we check for podman then + if $(docker compose &>/dev/null) && [ $? -ne 0 ]; then + if $(podman compose &>/dev/null) && [ $? -eq 0 ]; then + echo "docker could not be found on PATH, using podman compose" + USE_PODMAN=1 + else + echo "docker compose could not be found on PATH" + exit 1 + fi + fi +fi + +if test -n "$USE_PODMAN"; then + DOCKER="podman" + EXTRA_ARGS="" + PODMAN_ARGS="--podman-run-args=--replace" +fi + +export PEERDB_VERSION_SHA_SHORT=local-$(git rev-parse --short HEAD) +exec $DOCKER compose $PODMAN_ARGS -f docker-compose-e2e.yml up --build $EXTRA_ARGS From c7458ea586176cc38b315ddb13907434cddca252 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 16 Jul 2025 23:18:56 +0000 Subject: [PATCH 2/3] try to pivot --- docker-compose-e2e.yml | 159 ++++++---------------------------- local-ci.sh => local-ci/db.sh | 2 + local-ci/env | 17 ++++ local-ci/flow-worker.sh | 5 ++ local-ci/snapshot-worker.sh | 5 ++ local-ci/test.sh | 5 ++ volumes/postgresql.conf | 1 + 7 files changed, 61 insertions(+), 133 deletions(-) rename local-ci.sh => local-ci/db.sh (97%) create mode 100644 local-ci/env create mode 100755 local-ci/flow-worker.sh create mode 100755 local-ci/snapshot-worker.sh create mode 100755 local-ci/test.sh diff --git a/docker-compose-e2e.yml b/docker-compose-e2e.yml index a0fe19b74f..91a3f5f332 100644 --- a/docker-compose-e2e.yml +++ b/docker-compose-e2e.yml @@ -1,4 +1,4 @@ -name: peerdb-quickstart-dev +name: peerdb-quickstart-e2e x-minio-config: &minio-config PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: _peerdb_minioadmin @@ -14,28 +14,6 @@ x-catalog-config: &catalog-config PEERDB_CATALOG_PASSWORD: postgres PEERDB_CATALOG_DATABASE: postgres -x-flow-worker-env: &flow-worker-env - # For Temporal Cloud, this will look like: - # ..tmprl.cloud:7233 - TEMPORAL_HOST_PORT: temporal:7233 - PEERDB_TEMPORAL_NAMESPACE: default - # For the below 2 cert and key variables, - # paste as base64 encoded strings. - TEMPORAL_CLIENT_CERT: - TEMPORAL_CLIENT_KEY: - # For GCS, these will be your HMAC keys instead - # For more information: - # https://cloud.google.com/storage/docs/authentication/managing-hmackeys - AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-} - AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-} - # For GCS, set this to "auto" without the quotes - AWS_REGION: ${AWS_REGION:-} - # For GCS, set this as: https://storage.googleapis.com - AWS_ENDPOINT: ${AWS_ENDPOINT:-} - # enables worker profiling using Go's pprof - ENABLE_PROFILING: "true" - PPROF_PORT: "6060" - services: catalog: container_name: catalog @@ -51,7 +29,7 @@ services: extra_hosts: - "host.docker.internal:host-gateway" volumes: - - pgdata:/var/lib/postgresql/data + - pgdata-e2e:/var/lib/postgresql/data - ./volumes/postgresql.conf:/etc/postgresql.conf - ./volumes/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d healthcheck: @@ -114,53 +92,6 @@ services: ports: - 8085:8080 - flow-api: - container_name: flow_api - build: - context: . - dockerfile: stacks/flow.Dockerfile - target: flow-api - args: - PEERDB_VERSION_SHA_SHORT: ${PEERDB_VERSION_SHA_SHORT:-} - ports: - - 8112:8112 - - 8113:8113 - environment: - <<: [*catalog-config, *flow-worker-env, *minio-config] - PEERDB_ALLOWED_TARGETS: - PEERDB_CLICKHOUSE_ALLOWED_DOMAINS: - extra_hosts: - - "host.docker.internal:host-gateway" - depends_on: - temporal-admin-tools: - condition: service_healthy - - flow-snapshot-worker: - container_name: flow-snapshot-worker - build: - context: . - dockerfile: stacks/flow.Dockerfile - target: flow-snapshot-worker - environment: - <<: [*catalog-config, *flow-worker-env, *minio-config] - depends_on: - temporal-admin-tools: - condition: service_healthy - - flow-worker: - container_name: flow-worker - build: - context: . - dockerfile: stacks/flow.Dockerfile - target: flow-worker - environment: - <<: [*catalog-config, *flow-worker-env, *minio-config] - extra_hosts: - - "host.docker.internal:host-gateway" - depends_on: - temporal-admin-tools: - condition: service_healthy - peerdb: container_name: peerdb-server stop_signal: SIGINT @@ -179,48 +110,18 @@ services: catalog: condition: service_healthy - peerdb-ui: - container_name: peerdb-ui - build: - context: . - dockerfile: stacks/peerdb-ui.Dockerfile - ports: - - 3000:3000 - env_file: - - path: ./.env - required: false - environment: - <<: *catalog-config - DATABASE_URL: postgres://postgres:postgres@catalog:5432/postgres - PEERDB_FLOW_SERVER_HTTP: http://flow_api:8113 - PEERDB_PASSWORD: - NEXTAUTH_SECRET: __changeme__ - NEXTAUTH_URL: http://localhost:3000 - PEERDB_ALLOWED_TARGETS: - PEERDB_CLICKHOUSE_ALLOWED_DOMAINS: - PEERDB_EXPERIMENTAL_ENABLE_SCRIPTING: true - depends_on: - - flow-api - minio: - image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 + image: bitnami/minio:2025.6.13 volumes: - - minio-data:/data + - minio-data-e2e:/data ports: - "9001:9000" - "9002:36987" environment: - <<: *minio-config - entrypoint: > - /bin/sh -c " - export MINIO_ROOT_USER=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID; - export MINIO_ROOT_PASSWORD=$$PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY; - minio server /data --console-address=":36987" & - sleep 2; - /usr/bin/mc alias set myminiopeerdb http://minio:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD; - /usr/bin/mc mb myminiopeerdb/$$PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME; - wait - " + MINIO_ROOT_USER: _peerdb_minioadmin + MINIO_ROOT_PASSWORD: _peerdb_minioadmin + MINIO_CONSOLE_PORT_NUMBER: 36987 + MINIO_DEFAULT_BUCKETS: peerdbbucket mysql: container_name: mysql @@ -232,7 +133,7 @@ services: environment: MYSQL_ROOT_PASSWORD: example volumes: - - mydata:/var/lib/mysql + - mydata-e2e:/var/lib/mysql extra_hosts: - "host.docker.internal:host-gateway" @@ -243,35 +144,27 @@ services: environment: CLICKHOUSE_PASSWORD: example volumes: - - chdata:/var/lib/clickhouse + - chdata-e2e:/var/lib/clickhouse extra_hosts: - "host.docker.internal:host-gateway" - e2e: - image: golang:1.24-alpine@sha256:ddf52008bce1be455fe2b22d780b6693259aaf97b16383b6372f4b22dd33ad66 - environment: - <<: [*catalog-config, *minio-config] - TEMPORAL_HOST_PORT: temporal:7233 - ENABLE_OTEL_METRICS: 'false' - AWS_ACCESS_KEY_ID: _peerdb_minioadmin - AWS_SECRET_ACCESS_KEY: _peerdb_minioadmin - AWS_REGION: us-east-1 - AWS_ENDPOINT_URL_S3: http://host.docker.internal:9001 - CI_CLICKHOUSE_HOST: clickhouse - CI_CLICKHOUSE_PASSWORD: example - volumes: - - ./flow:/flow - entrypoint: > - /bin/sh -c " - apk add --no-cache gcc geos-dev musl-dev && - cd /flow && - go test -p 16 /flow/e2e/clickhouse/... -timeout 900s" - volumes: - pgdata: - mydata: - chdata: - minio-data: + pgdata-e2e: + driver_opts: + type: tmpfs + device: tmpfs + mydata-e2e: + driver_opts: + type: tmpfs + device: tmpfs + chdata-e2e: + driver_opts: + type: tmpfs + device: tmpfs + minio-data-e2e: + driver_opts: + type: tmpfs + device: tmpfs networks: default: diff --git a/local-ci.sh b/local-ci/db.sh similarity index 97% rename from local-ci.sh rename to local-ci/db.sh index c766005f78..5ba10c1e2a 100755 --- a/local-ci.sh +++ b/local-ci/db.sh @@ -1,6 +1,8 @@ #!/bin/sh set -Eeu +cd "$(dirname "$0")"/.. + DOCKER="docker" EXTRA_ARGS="--no-attach temporal --no-attach temporal-ui" PODMAN_ARGS="" diff --git a/local-ci/env b/local-ci/env new file mode 100644 index 0000000000..ab7c4d71ca --- /dev/null +++ b/local-ci/env @@ -0,0 +1,17 @@ +export PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID=_peerdb_minioadmin +export PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY=_peerdb_minioadmin +export PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION=us-east-1 +export PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3=http://localhost:9001 +export PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME=peerdbbucket +export PEERDB_CATALOG_HOST=localhost +export PEERDB_CATALOG_PORT=9901 +export PEERDB_CATALOG_USER=postgres +export PEERDB_CATALOG_PASSWORD=postgres +export PEERDB_CATALOG_DATABASE=postgres +export TEMPORAL_HOST_PORT=localhost:7233 +export ENABLE_OTEL_METRICS=false +export AWS_ACCESS_KEY_ID=_peerdb_minioadmin +export AWS_SECRET_ACCESS_KEY=_peerdb_minioadmin +export AWS_REGION=us-east-1 +export AWS_ENDPOINT_URL_S3=http://localhost:9001 +export CI_CLICKHOUSE_PASSWORD=example diff --git a/local-ci/flow-worker.sh b/local-ci/flow-worker.sh new file mode 100755 index 0000000000..ab641a4925 --- /dev/null +++ b/local-ci/flow-worker.sh @@ -0,0 +1,5 @@ +#!/bin/sh +cd "$(dirname "$0")" +. ./env +cd ../flow +exec go run . worker diff --git a/local-ci/snapshot-worker.sh b/local-ci/snapshot-worker.sh new file mode 100755 index 0000000000..3a73e271ad --- /dev/null +++ b/local-ci/snapshot-worker.sh @@ -0,0 +1,5 @@ +#!/bin/sh +cd "$(dirname "$0")" +. ./env +cd ../flow +exec go run . snapshot-worker diff --git a/local-ci/test.sh b/local-ci/test.sh new file mode 100755 index 0000000000..31c13a2e7a --- /dev/null +++ b/local-ci/test.sh @@ -0,0 +1,5 @@ +#!/bin/sh +cd "$(dirname "$0")" +. ./env +cd ../flow +exec go test -v -parallel 1 -p 1 ./e2e/clickhouse/... diff --git a/volumes/postgresql.conf b/volumes/postgresql.conf index 36980f0ae7..2cb654880c 100644 --- a/volumes/postgresql.conf +++ b/volumes/postgresql.conf @@ -1,5 +1,6 @@ listen_addresses = '*' +max_connections = 1000 wal_level = logical max_wal_senders = 4 max_replication_slots = 4 From b0fc882a3db43cc682cb477c11b10030dad52323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Fri, 25 Jul 2025 14:52:16 +0000 Subject: [PATCH 3/3] avoid container name conflicts --- docker-compose-e2e.yml | 46 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/docker-compose-e2e.yml b/docker-compose-e2e.yml index 91a3f5f332..c753b7725d 100644 --- a/docker-compose-e2e.yml +++ b/docker-compose-e2e.yml @@ -8,15 +8,15 @@ x-minio-config: &minio-config PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdbbucket x-catalog-config: &catalog-config - PEERDB_CATALOG_HOST: catalog + PEERDB_CATALOG_HOST: catalog-e2e PEERDB_CATALOG_PORT: 5432 PEERDB_CATALOG_USER: postgres PEERDB_CATALOG_PASSWORD: postgres PEERDB_CATALOG_DATABASE: postgres services: - catalog: - container_name: catalog + catalog-e2e: + container_name: catalog-e2e image: postgres:17-alpine@sha256:fbe21607052bb5c298674f2fd8cf044a63aa3ddf50b81627f894f91f40f50bcb command: -c config_file=/etc/postgresql.conf ports: @@ -39,17 +39,17 @@ services: retries: 5 start_period: 60s - temporal: - container_name: temporal + temporal-e2e: + container_name: temporal-e2e depends_on: - catalog: + catalog-e2e: condition: service_healthy environment: - DB=postgres12 - DB_PORT=5432 - POSTGRES_USER=postgres - POSTGRES_PWD=postgres - - POSTGRES_SEEDS=catalog + - POSTGRES_SEEDS=catalog-e2e - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml image: temporalio/auto-setup:1.28@sha256:3ee84bf3ec5494f2be6ee0c1fea3b52684e50da3ddc5f997e6fa56ad340da9a8 ports: @@ -59,13 +59,13 @@ services: labels: kompose.volume.type: configMap - temporal-admin-tools: - container_name: temporal-admin-tools + temporal-admin-tools-e2e: + container_name: temporal-admin-tools-e2e depends_on: - - temporal + - temporal-e2e environment: - - TEMPORAL_ADDRESS=temporal:7233 - - TEMPORAL_CLI_ADDRESS=temporal:7233 + - TEMPORAL_ADDRESS=temporal-e2e:7233 + - TEMPORAL_CLI_ADDRESS=temporal-e2e:7233 - TEMPORAL_CLI_SHOW_STACKS=1 image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 stdin_open: true @@ -80,20 +80,20 @@ services: volumes: - ./scripts/mirror-name-search.sh:/etc/temporal/entrypoint.sh - temporal-ui: - container_name: temporal-ui + temporal-ui-e2e: + container_name: temporal-ui-e2e depends_on: - - temporal + - temporal-e2e environment: - - TEMPORAL_ADDRESS=temporal:7233 + - TEMPORAL_ADDRESS=temporal-e2e:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true image: temporalio/ui:2.39.0@sha256:b768f87f18b59663a6749e98a2f7782c266e8e4e4749f92248e2ba41d6330d3f ports: - 8085:8080 - peerdb: - container_name: peerdb-server + peerdb-e2e: + container_name: peerdb-server-e2e stop_signal: SIGINT build: context: . @@ -107,10 +107,10 @@ services: ports: - 9900:9900 depends_on: - catalog: + catalog-e2e: condition: service_healthy - minio: + minio-e2e: image: bitnami/minio:2025.6.13 volumes: - minio-data-e2e:/data @@ -123,8 +123,8 @@ services: MINIO_CONSOLE_PORT_NUMBER: 36987 MINIO_DEFAULT_BUCKETS: peerdbbucket - mysql: - container_name: mysql + mysql-e2e: + container_name: mysql-e2e build: context: . dockerfile: stacks/mysql.Dockerfile @@ -137,7 +137,7 @@ services: extra_hosts: - "host.docker.internal:host-gateway" - clickhouse: + clickhouse-e2e: image: clickhouse/clickhouse-server ports: - 9000:9000