Skip to content

Commit f53071f

Browse files
committed
Merge branch 'master' into e2e-tests-on-published-chart
2 parents 2cc7bbd + b478944 commit f53071f

File tree

8 files changed

+38
-48
lines changed

8 files changed

+38
-48
lines changed

.evergreen-tasks.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1116,7 +1116,6 @@ tasks:
11161116

11171117
- name: e2e_multi_cluster_validation
11181118
tags: [ "patch-run" ]
1119-
exec_timeout_secs: 1000
11201119
commands:
11211120
- func: e2e_test
11221121

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
kind: other
3+
date: 2025-10-27
4+
---
5+
6+
* **kubectl-mongodb plugin**: `cosign`, the signing tool that is used to sign `kubectl-mongodb` plugin binaries, has been updated to version `3.0.2`. With this change, released binaries will be bundled with `.bundle` files containing both signature and certificate information. For more information on how to verify signatures using new `cosign` version please refer to -> https://github.com/sigstore/cosign/blob/v3.0.2/doc/cosign_verify-blob.md

docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_disaster_recovery.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -152,11 +152,12 @@ def test_delete_om_and_appdb_statefulset_in_failed_cluster(
152152
# delete OM to simulate losing Ops Manager application
153153
# this is only for testing unavailability of the OM application, it's not testing losing OM cluster
154154
# we don't delete here any additional resources (secrets, configmaps) that are required for a proper OM recovery testing
155+
# it will be immediately recreated by the operator, so we cannot check if it was deleted
155156
delete_statefulset(
156157
ops_manager.namespace,
157158
ops_manager.name,
158159
propagation_policy="Background",
159-
api_client=central_cluster_client,
160+
api_client=get_member_cluster_api_client(OM_MEMBER_CLUSTER_NAME),
160161
)
161162
except kubernetes.client.ApiException as e:
162163
if e.status != 404:
@@ -184,14 +185,6 @@ def statefulset_is_deleted(namespace: str, name: str, api_client=Optional[kubern
184185
else:
185186
raise e
186187

187-
run_periodically(
188-
lambda: statefulset_is_deleted(
189-
ops_manager.namespace,
190-
ops_manager.name,
191-
api_client=get_member_cluster_api_client(OM_MEMBER_CLUSTER_NAME),
192-
),
193-
timeout=120,
194-
)
195188
run_periodically(
196189
lambda: statefulset_is_deleted(
197190
ops_manager.namespace,

scripts/funcs/multicluster

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -128,26 +128,6 @@ EOF
128128
sleep 1
129129

130130
local service_account_name="operator-tests-multi-cluster-service-account"
131-
132-
local secret_name
133-
secret_name="$(kubectl --context "${CENTRAL_CLUSTER}" get secret -n "${NAMESPACE}" | { grep "${service_account_name}" || test $? = 1; } | awk '{ print $1 }')"
134-
if [[ "${secret_name}" == "" ]]; then
135-
secret_name="${service_account_name}-token-secret"
136-
create_service_account_token_secret "${CENTRAL_CLUSTER}" "${service_account_name}" "${secret_name}"
137-
fi
138-
139-
local central_cluster_token
140-
central_cluster_token="$(kubectl --context "${CENTRAL_CLUSTER}" get secret "${secret_name}" -o jsonpath='{ .data.token}' -n "${NAMESPACE}" | base64 -d)"
141-
echo "Creating Multi Cluster configuration secret"
142-
143-
configuration_params=(
144-
"--from-literal=central_cluster=${CENTRAL_CLUSTER}"
145-
)
146-
147-
configuration_params+=(
148-
"--from-literal=${CENTRAL_CLUSTER}=${central_cluster_token}"
149-
)
150-
151131
local secret_name
152132
secret_name="$(kubectl --context "${CENTRAL_CLUSTER}" get secret -n "${NAMESPACE}" | { grep "${service_account_name}" || test $? = 1; } | awk '{ print $1 }')"
153133
if [[ "${secret_name}" == "" ]]; then
@@ -175,7 +155,18 @@ EOF
175155
create_service_account_token_secret "${member_cluster}" "${service_account_name}" "${secret_name}"
176156
fi
177157

178-
member_cluster_token="$(kubectl --context "${member_cluster}" get secret "${secret_name}" -o jsonpath='{ .data.token}' -n "${NAMESPACE}" | base64 -d)"
158+
# Retry up to 10 times if .data.token is not yet populated
159+
for _ in {1..10}; do
160+
member_cluster_token="$(kubectl --context "${member_cluster}" get secret "${secret_name}" -o jsonpath='{ .data.token }' -n "${NAMESPACE}" | base64 -d)"
161+
if [[ -n "${member_cluster_token}" ]]; then
162+
break
163+
fi
164+
sleep 1
165+
done
166+
if [[ -z "${member_cluster_token}" ]]; then
167+
echo "Error: .data.token not populated for secret ${secret_name} in cluster ${member_cluster}"
168+
exit 1
169+
fi
179170
# for 2 cluster tests central cluster is the first member, so we cannot add this as it will result in duplicate key and error in create secret
180171
if [[ "${member_cluster}" != "${CENTRAL_CLUSTER}" ]]; then
181172
configuration_params+=(

scripts/release/atomic_pipeline.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -77,19 +77,20 @@ def build_image(
7777
# Build the image once with all repository tags
7878
tags = []
7979
for registry in registries:
80-
tag = f"{registry}:{build_configuration.version}"
80+
arch_suffix = ""
81+
if build_configuration.architecture_suffix and len(build_configuration.platforms) == 1:
82+
arch_suffix = f"-{build_configuration.platforms[0].split("/")[1]}"
83+
84+
tag = f"{registry}:{build_configuration.version}{arch_suffix}"
8185
if build_configuration.skip_if_exists and check_if_image_exists(tag):
8286
logger.info(f"Image with tag {tag} already exists. Skipping it.")
8387
else:
8488
tags.append(tag)
8589
if build_configuration.latest_tag:
86-
tags.append(f"{registry}:latest")
90+
tags.append(f"{registry}:latest{arch_suffix}")
8791
if build_configuration.olm_tag:
8892
olm_tag = create_olm_version_tag(build_configuration.version)
89-
tags.append(f"{registry}:{olm_tag}")
90-
if build_configuration.architecture_suffix and len(build_configuration.platforms) == 1:
91-
arch = build_configuration.platforms[0].split("/")[1]
92-
tags.append(f"{tag}-{arch}")
93+
tags.append(f"{registry}:{olm_tag}{arch_suffix}")
9394

9495
if not tags:
9596
logger.info("All specified image tags already exist. Skipping build.")

scripts/release/kubectl_mongodb/sign.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ set -euo pipefail
88
# Sign a binary using garasign credentials
99

1010
ARTIFACT=$1
11-
SIGNATURE="${ARTIFACT}.sig"
11+
SIGNATURE_BUNDLE="${ARTIFACT}.bundle"
1212

1313
TMPDIR=${TMPDIR:-/tmp}
1414
SIGNING_ENVFILE="${TMPDIR}/signing-envfile"
@@ -21,7 +21,7 @@ SIGNING_IMAGE_URI=${SIGNING_IMAGE_URI}
2121
ARTIFACTORY_PASSWORD=${ARTIFACTORY_PASSWORD}
2222
ARTIFACTORY_USERNAME=${ARTIFACTORY_USERNAME}
2323

24-
echo "Signing artifact ${ARTIFACT} and saving signature to ${SIGNATURE}"
24+
echo "Signing artifact ${ARTIFACT} and saving signature bundle to ${SIGNATURE_BUNDLE}"
2525

2626
{
2727
echo "GRS_CONFIG_USER1_USERNAME=${GRS_USERNAME}";
@@ -40,4 +40,4 @@ docker run \
4040
-v "$(pwd)":"$(pwd)" \
4141
-w "$(pwd)" \
4242
"${SIGNING_IMAGE_URI}" \
43-
cosign sign-blob --key "${PKCS11_URI}" --output-signature "${SIGNATURE}" "${ARTIFACT}" --yes
43+
cosign sign-blob --key "${PKCS11_URI}" --tlog-upload=false --use-signing-config=false --bundle "${SIGNATURE_BUNDLE}" "${ARTIFACT}" --yes

scripts/release/kubectl_mongodb/verify.sh

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22

33
set -euo pipefail
44

5-
# Verify the signature of a binary with the operator's public key
5+
# Verify the signature bundle of a binary with the operator's public key
66

77
ARTIFACT=$1
8-
SIGNATURE="${ARTIFACT}.sig"
8+
SIGNATURE_BUNDLE="${ARTIFACT}.bundle"
99

1010
HOSTED_SIGN_PUBKEY="https://cosign.mongodb.com/mongodb-enterprise-kubernetes-operator.pem" # to complete
1111
TMPDIR=${TMPDIR:-/tmp}
@@ -14,19 +14,19 @@ KEY_FILE="${TMPDIR}/host-public.key"
1414
SIGNING_IMAGE_URI="${SIGNING_IMAGE_URI}"
1515

1616
curl -o "${KEY_FILE}" "${HOSTED_SIGN_PUBKEY}"
17-
echo "Verifying signature ${SIGNATURE} of artifact ${ARTIFACT}"
17+
echo "Verifying signature bundle ${SIGNATURE_BUNDLE} of artifact ${ARTIFACT}"
1818
echo "Keyfile is ${KEY_FILE}"
1919

2020
# When working locally, the following command can be used instead of Docker
21-
# cosign verify-blob --key ${KEY_FILE} --signature ${SIGNATURE} ${ARTIFACT}
21+
# cosign verify-blob --key ${KEY_FILE} --insecure-ignore-tlog --bundle ${SIGNATURE_BUNDLE} ${ARTIFACT}
2222

2323
docker run \
2424
--rm \
2525
-v "$(pwd)":"$(pwd)" \
2626
-v "${KEY_FILE}":"${KEY_FILE}" \
2727
-w "$(pwd)" \
2828
"${SIGNING_IMAGE_URI}" \
29-
cosign verify-blob --key "${KEY_FILE}" --signature "${SIGNATURE}" "${ARTIFACT}"
29+
cosign verify-blob --key "${KEY_FILE}" --insecure-ignore-tlog --bundle "${SIGNATURE_BUNDLE}" "${ARTIFACT}"
3030

31-
# Without below line, Evergreen fails at archiving with "open dist/kubectl-[...]/kubectl-mongodb.sig: permission denied
32-
sudo chmod 666 "${SIGNATURE}"
31+
# Without below line, Evergreen fails at archiving with "open dist/kubectl-[...]/kubectl-mongodb.bundle: permission denied
32+
sudo chmod 666 "${SIGNATURE_BUNDLE}"

scripts/release/publish_helm_chart.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33
import subprocess
44

55
import yaml
6-
from release.build.build_scenario import SUPPORTED_SCENARIOS
76

87
from lib.base_logger import logger
98
from scripts.release.build.build_info import *
9+
from scripts.release.build.build_scenario import SUPPORTED_SCENARIOS
1010

1111
CHART_DIR = "helm_chart"
1212

0 commit comments

Comments
 (0)