From 00fc7a90ed4a77c9bcb32fcba6882fa915d8b6dd Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Mon, 23 Jun 2025 20:28:46 +0000 Subject: [PATCH 1/6] Bump component versions in Dockerfiles Signed-off-by: Brad Davidson --- Dockerfile.dapper | 12 ++++++------ e2e/cluster/local/images/k3s/Dockerfile | 2 +- e2e/cluster/local/scripts/cluster-prepare | 6 +++--- package/Dockerfile | 6 +++--- scripts/e2e-sonobuoy | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 0d234ba5..10c862a5 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -1,17 +1,17 @@ -ARG KUBECTL=rancher/kubectl:v1.32.2 +ARG KUBECTL=rancher/kubectl:v1.32.6 FROM ${KUBECTL} AS kubectl -FROM registry.suse.com/bci/golang:1.23 +FROM registry.suse.com/bci/golang:1.24 COPY --from=kubectl /bin/kubectl /usr/local/bin/kubectl ARG DAPPER_HOST_ARCH ENV ARCH $DAPPER_HOST_ARCH -ARG SONOBUOY_VERSION=0.57.1 -RUN zypper -n install expect git jq docker vim less file curl wget iproute2 gawk +ARG SONOBUOY_VERSION=0.57.3 +RUN zypper -n install expect git jq docker vim less file curl wget iproute2 gawk RUN if [ "${ARCH:-$(go env GOARCH)}" = "amd64" ]; then \ - curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.61.0; \ - curl -sL "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv5.4.3/kustomize_v5.4.3_linux_amd64.tar.gz" | tar -xz -C /usr/local/bin; \ + curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.64.8; \ + curl -sL "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv5.6.0/kustomize_v5.6.0_linux_amd64.tar.gz" | tar -xz -C /usr/local/bin; \ curl -sL "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_${ARCH}.tar.gz" | tar -xz -C /usr/local/bin; \ curl -sL "https://github.com/docker/compose/releases/download/v2.29.1/docker-compose-linux-x86_64" -o /usr/local/bin/docker-compose && \ chmod +x /usr/local/bin/docker-compose; \ diff --git a/e2e/cluster/local/images/k3s/Dockerfile b/e2e/cluster/local/images/k3s/Dockerfile index d6b45368..2867108b 100644 --- a/e2e/cluster/local/images/k3s/Dockerfile +++ b/e2e/cluster/local/images/k3s/Dockerfile @@ -3,7 +3,7 @@ ARG BCI="registry.suse.com/bci/bci-base:15.6" FROM ${BCI} AS k3s ARG ARCH -ARG K3S_VERSION="v1.30.3+k3s1" +ARG K3S_VERSION="v1.32.5+k3s1" RUN set -x \ && zypper -n in \ ca-certificates \ diff --git a/e2e/cluster/local/scripts/cluster-prepare b/e2e/cluster/local/scripts/cluster-prepare index 11b57c0a..8688d299 100755 --- a/e2e/cluster/local/scripts/cluster-prepare +++ b/e2e/cluster/local/scripts/cluster-prepare @@ -4,9 +4,9 @@ set -e : "${ARCH?required}" : "${DIST?required}" -: "${BCI_TAG:=15.4}" -: "${KUBECTL_TAG:=v1.30.3}" -: "${SONOBUOY_TAG:=v0.57.1}" +: "${BCI_TAG:=15.6}" +: "${KUBECTL_TAG:=v1.32.6}" +: "${SONOBUOY_TAG:=v0.57.3}" docker-image-save() { echo "Pulling '$1:$2' ..." diff --git a/package/Dockerfile b/package/Dockerfile index ac4dada1..a8d8947e 100644 --- a/package/Dockerfile +++ b/package/Dockerfile @@ -1,10 +1,10 @@ ARG BCI=registry.suse.com/bci/bci-base:15.6 -ARG GOLANG=registry.suse.com/bci/golang:1.23 -ARG ALPINE=alpine:3.21 +ARG GOLANG=registry.suse.com/bci/golang:1.24 +ARG ALPINE=alpine:3.22 FROM ${GOLANG} AS e2e-ginkgo ENV GOBIN=/bin -RUN go install github.com/onsi/ginkgo/v2/ginkgo@v2.20.0 +RUN go install github.com/onsi/ginkgo/v2/ginkgo@v2.23.4 FROM ${BCI} AS e2e-tests ARG TARGETARCH diff --git a/scripts/e2e-sonobuoy b/scripts/e2e-sonobuoy index 140df31d..1f55d88c 100755 --- a/scripts/e2e-sonobuoy +++ b/scripts/e2e-sonobuoy @@ -13,7 +13,7 @@ export PATH="$PWD/e2e/cluster/${E2E_CLUSTER}/scripts:$PATH" e2e-sonobuoy-run() { - : ${SONOBUOY_RUN_SONOBUOY_IMAGE:="sonobuoy/sonobuoy:v0.57.1"} + : ${SONOBUOY_RUN_SONOBUOY_IMAGE:="sonobuoy/sonobuoy:v0.57.3"} : ${SONOBUOY_RUN_IMAGE_PULL_POLICY:=Never} : ${SONOBUOY_RUN_WAIT:=10} From 5b4dc4b5261e15f85c1dcd26f7ea3bc5456404cd Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Fri, 20 Jun 2025 23:26:11 +0000 Subject: [PATCH 2/6] Exclude kubectl annotations from propagation Fixes `kubectl.kubernetes.io/last-applied-configuration` annotation being passed through from Plan to Job Signed-off-by: Brad Davidson --- pkg/upgrade/job/job.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/upgrade/job/job.go b/pkg/upgrade/job/job.go index ececb72b..143a9ab0 100644 --- a/pkg/upgrade/job/job.go +++ b/pkg/upgrade/job/job.go @@ -153,7 +153,7 @@ func New(plan *upgradeapiv1.Plan, node *corev1.Node, controllerName string) *bat podAnnotations := labels.Set{} for key, value := range plan.Annotations { - if !strings.Contains(key, "cattle.io/") { + if !strings.Contains(key, "cattle.io/") && !strings.Contains(key, "kubectl.kubernetes.io/") { jobAnnotations[key] = value podAnnotations[key] = value } From a2c062d21ed5b45b0b200faa8f22df1abf1224ef Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Sat, 21 Jun 2025 00:03:42 +0000 Subject: [PATCH 3/6] Migrate CRD generation to controller-gen Signed-off-by: Brad Davidson --- Dockerfile.dapper | 1 + e2e/suite/job_generate_test.go | 4 +- go.mod | 26 +- go.sum | 59 +- hack/crdgen.go | 18 - main.go | 3 +- pkg/apis/upgrade.cattle.io/v1/types.go | 121 +- .../v1/zz_generated_deepcopy.go | 7 +- pkg/crds/crds.go | 84 ++ .../generated/upgrade.cattle.io_plans.yaml | 1187 +++++++++++++++++ .../clientset/versioned/clientset.go | 4 +- .../versioned/fake/clientset_generated.go | 6 +- .../upgrade.cattle.io/v1/fake/fake_plan.go | 131 +- .../v1/fake/fake_upgrade.cattle.io_client.go | 2 +- .../typed/upgrade.cattle.io/v1/plan.go | 163 +-- .../v1/upgrade.cattle.io_client.go | 10 +- pkg/upgrade/controller.go | 25 +- pkg/upgrade/job/job.go | 39 +- pkg/upgrade/job/job_suite_test.go | 32 +- pkg/upgrade/plan/plan.go | 27 +- scripts/package-controller | 2 +- 21 files changed, 1529 insertions(+), 422 deletions(-) delete mode 100644 hack/crdgen.go create mode 100644 pkg/crds/crds.go create mode 100644 pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 10c862a5..3e2d0287 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -19,6 +19,7 @@ RUN if [ "${ARCH:-$(go env GOARCH)}" = "amd64" ]; then \ RUN mkdir -p /usr/local/lib/docker/cli-plugins; \ curl -o /usr/local/lib/docker/cli-plugins/docker-buildx -fsSL "https://github.com/docker/buildx/releases/download/v0.17.1/buildx-v0.17.1.linux-amd64"; \ chmod +x /usr/local/lib/docker/cli-plugins/docker-buildx +RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.3 ENV DAPPER_ENV REPO TAG DRONE_TAG ENV DAPPER_SOURCE /go/src/github.com/rancher/system-upgrade-controller/ ENV DAPPER_OUTPUT ./bin ./dist diff --git a/e2e/suite/job_generate_test.go b/e2e/suite/job_generate_test.go index 68bcd9b7..6ed79071 100644 --- a/e2e/suite/job_generate_test.go +++ b/e2e/suite/job_generate_test.go @@ -174,7 +174,7 @@ var _ = Describe("Job Generation", func() { plan.Spec.Concurrency = 1 plan.Spec.ServiceAccountName = e2e.Namespace.Name plan.Spec.Window = &upgradeapiv1.TimeWindowSpec{ - Days: []string{"never"}, + Days: []upgradeapiv1.Day{"never"}, StartTime: "00:00:00", EndTime: "23:59:59", TimeZone: "UTC", @@ -193,7 +193,7 @@ var _ = Describe("Job Generation", func() { Expect(upgradeapiv1.PlanSpecValidated.IsTrue(plan)).To(BeFalse()) Expect(upgradeapiv1.PlanSpecValidated.GetMessage(plan)).To(ContainSubstring("spec.window is invalid")) - plan.Spec.Window.Days = []string{"su", "mo", "tu", "we", "th", "fr", "sa"} + plan.Spec.Window.Days = []upgradeapiv1.Day{"su", "mo", "tu", "we", "th", "fr", "sa"} plan, err = e2e.UpdatePlan(plan) Expect(err).ToNot(HaveOccurred()) diff --git a/go.mod b/go.mod index 1bb9eb91..49fa73da 100644 --- a/go.mod +++ b/go.mod @@ -44,12 +44,13 @@ require ( github.com/kubereboot/kured v1.13.1 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 - github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813 + github.com/rancher/lasso v0.2.2 github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0 - github.com/rancher/wrangler/v3 v3.1.0 + github.com/rancher/wrangler/v3 v3.2.1 github.com/sirupsen/logrus v1.9.3 github.com/urfave/cli v1.22.15 k8s.io/api v0.32.2 + k8s.io/apiextensions-apiserver v0.32.1 k8s.io/apimachinery v0.32.2 k8s.io/client-go v0.32.2 k8s.io/kubectl v0.32.2 @@ -80,9 +81,9 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -90,7 +91,7 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -99,7 +100,7 @@ require ( github.com/google/btree v1.0.1 // indirect github.com/google/cadvisor v0.51.0 // indirect github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect @@ -152,25 +153,24 @@ require ( go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.35.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/mod v0.23.0 // indirect + golang.org/x/net v0.35.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.1 // indirect k8s.io/apiserver v0.32.2 // indirect k8s.io/cloud-provider v0.30.3 // indirect k8s.io/code-generator v0.32.2 // indirect @@ -181,7 +181,7 @@ require ( k8s.io/cri-client v0.0.0 // indirect k8s.io/csi-translation-lib v0.0.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/gengo v0.0.0-20240826214909-a7b603a56eb7 // indirect + k8s.io/gengo v0.0.0-20250130153323-76c5745d3511 // indirect k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kms v0.32.2 // indirect @@ -191,6 +191,6 @@ require ( k8s.io/mount-utils v0.0.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 3f697917..937a4782 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -56,12 +55,12 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -78,12 +77,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -103,8 +100,8 @@ github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8 github.com/google/cadvisor v0.51.0/go.mod h1:czGE/c/P/i0QFpVNKTFrIEzord9Y10YfpwuaSWXELc0= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -142,7 +139,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -201,10 +197,10 @@ github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJN github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813 h1:V/LY8pUHZG9Kc+xEDWDOryOnCU6/Q+Lsr9QQEQnshpU= -github.com/rancher/lasso v0.0.0-20240924233157-8f384efc8813/go.mod h1:IxgTBO55lziYhTEETyVKiT8/B5Rg92qYiRmcIIYoPgI= -github.com/rancher/wrangler/v3 v3.1.0 h1:8ETBnQOEcZaR6WBmUSysWW7WnERBOiNTMJr4Dj3UG/s= -github.com/rancher/wrangler/v3 v3.1.0/go.mod h1:gUPHS1ANs2NyByfeERHwkGiQ1rlIa8BpTJZtNSgMlZw= +github.com/rancher/lasso v0.2.2 h1:oKP5d4+eSupwHftLMTWo6QS1tYYZ5XK+ZjP+VNhwmk8= +github.com/rancher/lasso v0.2.2/go.mod h1:KSV3jBXfdXqdCuMm2uC8kKB9q/wuDYb3h0eHZoRjShM= +github.com/rancher/wrangler/v3 v3.2.1 h1:V51PnoGb8bZ5jJdxFlqKQApzWdSp4sEy5OPGuEGqVbI= +github.com/rancher/wrangler/v3 v3.2.1/go.mod h1:RV8kkv5br5HaxXWamIbr95pOjvVeoC5CeBldcdw5Fv0= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -230,8 +226,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM= @@ -278,8 +275,8 @@ go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeX go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -293,14 +290,14 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -327,8 +324,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -341,8 +338,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -387,8 +384,8 @@ k8s.io/csi-translation-lib v0.32.2 h1:aLzAyaoJUc5rgtLi8Xd4No1tet6UpvUsGIgRoGnPSS k8s.io/csi-translation-lib v0.32.2/go.mod h1:PlOKan6Vc0G6a+giQbm36plJ+E1LH+GPRLAVMQMSMcY= k8s.io/dynamic-resource-allocation v0.32.2 h1:6wP8/GGvhhvTJLrzwPSoMJDnspmosFj1CKmfrAH6m5U= k8s.io/dynamic-resource-allocation v0.32.2/go.mod h1:+3qnQfvikLHVZrdZ0/gYkRiV96weUR9j7+Ph3Ui/hYU= -k8s.io/gengo v0.0.0-20240826214909-a7b603a56eb7 h1:HCbtr1pVu/ElMcTTs18KdMtH5y6f7PQvrjh1QZj3qCI= -k8s.io/gengo v0.0.0-20240826214909-a7b603a56eb7/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20250130153323-76c5745d3511 h1:4eL6zr5VCj71nu2nOuQ6j6m/kqh5WueXBN8daZkNe90= +k8s.io/gengo v0.0.0-20250130153323-76c5745d3511/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -416,8 +413,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcp sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/crdgen.go b/hack/crdgen.go deleted file mode 100644 index ce47e917..00000000 --- a/hack/crdgen.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "os" - - _ "github.com/rancher/system-upgrade-controller/pkg/generated/controllers/upgrade.cattle.io/v1" - "github.com/rancher/system-upgrade-controller/pkg/upgrade/plan" - "github.com/rancher/wrangler/v3/pkg/crd" -) - -func main() { - planCrd, err := plan.CRD() - if err != nil { - print(err) - return - } - crd.Print(os.Stdout, []crd.CRD{*planCrd}) -} diff --git a/main.go b/main.go index 19eab155..d97fd237 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,7 @@ //go:generate go run pkg/codegen/cleanup/cleanup.go -//go:generate rm -rf pkg/generated +//go:generate rm -rf pkg/generated pkg/crds/yaml/generated //go:generate go run pkg/codegen/codegen.go +//go:generate controller-gen crd:generateEmbeddedObjectMeta=true paths=./pkg/apis/... output:crd:dir=./pkg/crds/yaml/generated package main diff --git a/pkg/apis/upgrade.cattle.io/v1/types.go b/pkg/apis/upgrade.cattle.io/v1/types.go index 2b106159..ec46c276 100644 --- a/pkg/apis/upgrade.cattle.io/v1/types.go +++ b/pkg/apis/upgrade.cattle.io/v1/types.go @@ -23,9 +23,13 @@ var ( ) // +genclient +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.upgrade.image` +// +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Plan represents a "JobSet" of ApplyingNodes +// Plan represents a set of Jobs to apply an upgrade (or other operation) to set of Nodes. type Plan struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -36,38 +40,71 @@ type Plan struct { // PlanSpec represents the user-configurable details of a Plan. type PlanSpec struct { - Concurrency int64 `json:"concurrency,omitempty"` - JobActiveDeadlineSecs int64 `json:"jobActiveDeadlineSecs,omitempty"` - NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` - - Channel string `json:"channel,omitempty"` - Version string `json:"version,omitempty"` + // The maximum number of concurrent nodes to apply this update on. + Concurrency int64 `json:"concurrency,omitempty"` + // Sets ActiveDeadlineSeconds on Jobs generated to apply this Plan. + // If the Job does not complete within this time, the Plan will stop processing until it is updated to trigger a redeploy. + // If set to 0, Jobs have no deadline. If not set, the controller default value is used. + JobActiveDeadlineSecs *int64 `json:"jobActiveDeadlineSecs,omitempty"` + // Select which nodes this plan can be applied to. + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + // The service account for the pod to use. As with normal pods, if not specified the default service account from the namespace will be assigned. + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // A URL that returns HTTP 302 with the last path element of the value returned in the Location header assumed to be an image tag (after munging "+" to "-"). + Channel string `json:"channel,omitempty"` + // Providing a value for version will prevent polling/resolution of the channel if specified. + Version string `json:"version,omitempty"` + // Secrets to be mounted into the Job Pod. Secrets []SecretSpec `json:"secrets,omitempty"` - + // Specify which node taints should be tolerated by pods applying the upgrade. + // Anything specified here is appended to the default of: + // - {key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists} Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - + // Jobs for exclusive plans cannot be run alongside any other exclusive plan. Exclusive bool `json:"exclusive,omitempty"` - - Window *TimeWindowSpec `json:"window,omitempty"` - Prepare *ContainerSpec `json:"prepare,omitempty"` - Cordon bool `json:"cordon,omitempty"` - Drain *DrainSpec `json:"drain,omitempty"` - Upgrade *ContainerSpec `json:"upgrade,omitempty" wrangler:"required"` - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - PostCompleteDelay *metav1.Duration `json:"postCompleteDelay,omitempty"` + // A time window in which to execute Jobs for this Plan. + // Jobs will not be generated outside this time window, but may continue executing into the window once started. + Window *TimeWindowSpec `json:"window,omitempty"` + // The prepare init container, if specified, is run before cordon/drain which is run before the upgrade container. + // Shares the same format as the upgrade container. + // If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + Prepare *ContainerSpec `json:"prepare,omitempty"` + // If drain is specified, the value for cordon is ignored. + // If neither drain nor cordon are specified and the node is marked as schedulable=false it will not be marked as schedulable=true when the Job completes. + Cordon bool `json:"cordon,omitempty"` + // If left unspecified, no drain will be performed. See: + // - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ + // - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + Drain *DrainSpec `json:"drain,omitempty"` + // The upgrade container. + // If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + Upgrade *ContainerSpec `json:"upgrade,omitempty"` + // Image Pull Secrets, used to pull images for the Job. + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Time after a Job for one Node is complete before a new Job will be created for the next Node. + PostCompleteDelay *metav1.Duration `json:"postCompleteDelay,omitempty"` } // PlanStatus represents the resulting state from processing Plan events. type PlanStatus struct { - Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"` - LatestVersion string `json:"latestVersion,omitempty"` - LatestHash string `json:"latestHash,omitempty"` - Applying []string `json:"applying,omitempty"` + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"` + + // The latest version, as resolved from .spec.version, or the channel server. + LatestVersion string `json:"latestVersion,omitempty"` + // The hash of the most recently applied plan .spec. + LatestHash string `json:"latestHash,omitempty"` + // List of Node names that the Plan is currently being applied on. + Applying []string `json:"applying,omitempty"` } // ContainerSpec is a simplified container template. type ContainerSpec struct { + // Image name. If the tag is omitted, the value from .status.latestVersion will be used. Image string `json:"image,omitempty"` Command []string `json:"command,omitempty"` Args []string `json:"args,omitempty"` @@ -77,13 +114,17 @@ type ContainerSpec struct { SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` } +// HostPath volume to mount into the pod type VolumeSpec struct { - Name string `json:"name,omitempty"` - Source string `json:"source,omitempty"` + // Name of the Volume as it will appear within the Pod spec. + Name string `json:"name,omitempty"` + // Path on the host to mount. + Source string `json:"source,omitempty"` + // Path to mount the Volume at within the Pod. Destination string `json:"destination,omitempty"` } -// DrainSpec encapsulates `kubectl drain` parameters minus node/pod selectors. +// DrainSpec encapsulates kubectl drain parameters minus node/pod selectors. type DrainSpec struct { Timeout *time.Duration `json:"timeout,omitempty"` GracePeriod *int32 `json:"gracePeriod,omitempty"` @@ -98,21 +139,35 @@ type DrainSpec struct { // SecretSpec describes a secret to be mounted for prepare/upgrade containers. type SecretSpec struct { - Name string `json:"name,omitempty"` - Path string `json:"path,omitempty"` - IgnoreUpdates bool `json:"ignoreUpdates,omitempty"` + // Secret name + Name string `json:"name,omitempty"` + // Path to mount the Secret volume within the Pod. + Path string `json:"path,omitempty"` + // If set to true, the Secret contents will not be hashed, and changes to the Secret will not trigger new application of the Plan. + IgnoreUpdates bool `json:"ignoreUpdates,omitempty"` } // TimeWindowSpec describes a time window in which a Plan should be processed. type TimeWindowSpec struct { - Days []string `json:"days,omitempty"` - StartTime string `json:"startTime,omitempty"` - EndTime string `json:"endTime,omitempty"` - TimeZone string `json:"timeZone,omitempty"` + // Days that this time window is valid for + Days []Day `json:"days,omitempty"` + // Start of the time window. + StartTime string `json:"startTime,omitempty"` + // End of the time window. + EndTime string `json:"endTime,omitempty"` + // Time zone for the time window; if not specified UTC will be used. + TimeZone string `json:"timeZone,omitempty"` } +// +kubebuilder:validation:Enum={"0","su","sun","sunday","1","mo","mon","monday","2","tu","tue","tuesday","3","we","wed","wednesday","4","th","thu","thursday","5","fr","fri","friday","6","sa","sat","saturday"} +type Day string + func (tws *TimeWindowSpec) Contains(t time.Time) bool { - tw, err := timewindow.New(tws.Days, tws.StartTime, tws.EndTime, tws.TimeZone) + days := make([]string, len(tws.Days)) + for i, day := range tws.Days { + days[i] = string(day) + } + tw, err := timewindow.New(days, tws.StartTime, tws.EndTime, tws.TimeZone) if err != nil { return false } diff --git a/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go b/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go index b379098a..9f568e1e 100644 --- a/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go +++ b/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go @@ -190,6 +190,11 @@ func (in *PlanList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlanSpec) DeepCopyInto(out *PlanSpec) { *out = *in + if in.JobActiveDeadlineSecs != nil { + in, out := &in.JobActiveDeadlineSecs, &out.JobActiveDeadlineSecs + *out = new(int64) + **out = **in + } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = new(metav1.LabelSelector) @@ -297,7 +302,7 @@ func (in *TimeWindowSpec) DeepCopyInto(out *TimeWindowSpec) { *out = *in if in.Days != nil { in, out := &in.Days, &out.Days - *out = make([]string, len(*in)) + *out = make([]Day, len(*in)) copy(*out, *in) } return diff --git a/pkg/crds/crds.go b/pkg/crds/crds.go new file mode 100644 index 00000000..35d69792 --- /dev/null +++ b/pkg/crds/crds.go @@ -0,0 +1,84 @@ +package crds + +import ( + "embed" + "fmt" + "path/filepath" + + "github.com/rancher/wrangler/v3/pkg/yaml" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +const ( + baseDir = "." + crdKind = "CustomResourceDefinition" +) + +var ( + //go:embed yaml + crdFS embed.FS + + errDuplicate = fmt.Errorf("duplicate CRD") +) + +func List() ([]*apiextv1.CustomResourceDefinition, error) { + crdMap, err := crdsFromDir(baseDir) + if err != nil { + return nil, err + } + crds := make([]*apiextv1.CustomResourceDefinition, 0, len(crdMap)) + for _, crd := range crdMap { + crds = append(crds, crd) + } + return crds, nil +} + +// crdsFromDir recursively traverses the embedded yaml directory and find all CRD yamls. +// cribbed from https://github.com/rancher/rancher/blob/v2.11.2/pkg/crds/crds.go +func crdsFromDir(dirName string) (map[string]*apiextv1.CustomResourceDefinition, error) { + // read all entries in the embedded directory + crdFiles, err := crdFS.ReadDir(dirName) + if err != nil { + return nil, fmt.Errorf("failed to read embedded dir '%s': %w", dirName, err) + } + + allCRDs := map[string]*apiextv1.CustomResourceDefinition{} + for _, dirEntry := range crdFiles { + fullPath := filepath.Join(dirName, dirEntry.Name()) + if dirEntry.IsDir() { + // if the entry is the dir recurse into that folder to get all crds + subCRDs, err := crdsFromDir(fullPath) + if err != nil { + return nil, err + } + for k, v := range subCRDs { + if _, ok := allCRDs[k]; ok { + return nil, fmt.Errorf("%w for '%s", errDuplicate, k) + } + allCRDs[k] = v + } + continue + } + + // read the file and convert it to a crd object + file, err := crdFS.Open(fullPath) + if err != nil { + return nil, fmt.Errorf("failed to open embedded file '%s': %w", fullPath, err) + } + crdObjs, err := yaml.UnmarshalWithJSONDecoder[*apiextv1.CustomResourceDefinition](file) + if err != nil { + return nil, fmt.Errorf("failed to convert embedded file '%s' to yaml: %w", fullPath, err) + } + for _, crdObj := range crdObjs { + if crdObj.Kind != crdKind { + // if the yaml is not a CRD return an error + return nil, fmt.Errorf("decoded object is not '%s' instead found Kind='%s'", crdKind, crdObj.Kind) + } + if _, ok := allCRDs[crdObj.Name]; ok { + return nil, fmt.Errorf("%w for '%s", errDuplicate, crdObj.Name) + } + allCRDs[crdObj.Name] = crdObj + } + } + return allCRDs, nil +} diff --git a/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml new file mode 100644 index 00000000..2e5753b3 --- /dev/null +++ b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml @@ -0,0 +1,1187 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: plans.upgrade.cattle.io +spec: + group: upgrade.cattle.io + names: + kind: Plan + listKind: PlanList + plural: plans + singular: plan + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.upgrade.image + name: Image + type: string + - jsonPath: .spec.channel + name: Channel + type: string + - jsonPath: .spec.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + description: Plan represents a set of Jobs to apply an upgrade (or other operation) + to set of Nodes. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlanSpec represents the user-configurable details of a Plan. + properties: + channel: + description: A URL that returns HTTP 302 with the last path element + of the value returned in the Location header assumed to be an image + tag (after munging "+" to "-"). + type: string + concurrency: + description: The maximum number of concurrent nodes to apply this + update on. + format: int64 + type: integer + cordon: + description: |- + If drain is specified, the value for cordon is ignored. + If neither drain nor cordon are specified and the node is marked as schedulable=false it will not be marked as schedulable=true when the Job completes. + type: boolean + drain: + description: |- + If left unspecified, no drain will be performed. See: + - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ + - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + properties: + deleteEmptydirData: + type: boolean + deleteLocalData: + type: boolean + disableEviction: + type: boolean + force: + type: boolean + gracePeriod: + format: int32 + type: integer + ignoreDaemonSets: + type: boolean + podSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + skipWaitForDeleteTimeout: + type: integer + timeout: + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. + format: int64 + type: integer + type: object + exclusive: + description: Jobs for exclusive plans cannot be run alongside any + other exclusive plan. + type: boolean + imagePullSecrets: + description: Image Pull Secrets, used to pull images for the Job. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + jobActiveDeadlineSecs: + description: |- + Sets ActiveDeadlineSeconds on Jobs generated to apply this Plan. + If the Job does not complete within this time, the Plan will stop processing until it is updated to trigger a redeploy. + If set to 0, Jobs have no deadline. If not set, the controller default value is used. + format: int64 + type: integer + nodeSelector: + description: Select which nodes this plan can be applied to. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + postCompleteDelay: + description: Time after a Job for one Node is complete before a new + Job will be created for the next Node. + type: string + prepare: + description: |- + The prepare init container, if specified, is run before cordon/drain which is run before the upgrade container. + Shares the same format as the upgrade container. + If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envs: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: Image name. If the tag is omitted, the value from + .status.latestVersion will be used. + type: string + securityContext: + description: |- + SecurityContext holds security configuration that will be applied to a container. + Some fields are present in both SecurityContext and PodSecurityContext. When both + are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + volumes: + items: + description: HostPath volume to mount into the pod + properties: + destination: + description: Path to mount the Volume at within the Pod. + type: string + name: + description: Name of the Volume as it will appear within + the Pod spec. + type: string + source: + description: Path on the host to mount. + type: string + type: object + type: array + type: object + secrets: + description: Secrets to be mounted into the Job Pod. + items: + description: SecretSpec describes a secret to be mounted for prepare/upgrade + containers. + properties: + ignoreUpdates: + description: If set to true, the Secret contents will not be + hashed, and changes to the Secret will not trigger new application + of the Plan. + type: boolean + name: + description: Secret name + type: string + path: + description: Path to mount the Secret volume within the Pod. + type: string + type: object + type: array + serviceAccountName: + description: The service account for the pod to use. As with normal + pods, if not specified the default service account from the namespace + will be assigned. + type: string + tolerations: + description: |- + Specify which node taints should be tolerated by pods applying the upgrade. + Anything specified here is appended to the default of: + - {key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists} + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + upgrade: + description: |- + The upgrade container. + If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envs: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: Image name. If the tag is omitted, the value from + .status.latestVersion will be used. + type: string + securityContext: + description: |- + SecurityContext holds security configuration that will be applied to a container. + Some fields are present in both SecurityContext and PodSecurityContext. When both + are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + volumes: + items: + description: HostPath volume to mount into the pod + properties: + destination: + description: Path to mount the Volume at within the Pod. + type: string + name: + description: Name of the Volume as it will appear within + the Pod spec. + type: string + source: + description: Path on the host to mount. + type: string + type: object + type: array + type: object + version: + description: Providing a value for version will prevent polling/resolution + of the channel if specified. + type: string + window: + description: |- + A time window in which to execute Jobs for this Plan. + Jobs will not be generated outside this time window, but may continue executing into the window once started. + properties: + days: + description: Days that this time window is valid for + items: + enum: + - "0" + - su + - sun + - sunday + - "1" + - mo + - mon + - monday + - "2" + - tu + - tue + - tuesday + - "3" + - we + - wed + - wednesday + - "4" + - th + - thu + - thursday + - "5" + - fr + - fri + - friday + - "6" + - sa + - sat + - saturday + type: string + type: array + endTime: + description: End of the time window. + type: string + startTime: + description: Start of the time window. + type: string + timeZone: + description: Time zone for the time window; if not specified UTC + will be used. + type: string + type: object + type: object + status: + description: PlanStatus represents the resulting state from processing + Plan events. + properties: + applying: + description: List of Node names that the Plan is currently being applied + on. + items: + type: string + type: array + conditions: + items: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + lastUpdateTime: + description: The last time this condition was updated. + type: string + message: + description: Human-readable message indicating details about + last transition + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of cluster condition. + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + latestHash: + description: The hash of the most recently applied plan .spec. + type: string + latestVersion: + description: The latest version, as resolved from .spec.version, or + the channel server. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index 189d7e44..f7f8b325 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" upgradev1 "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1" discovery "k8s.io/client-go/discovery" diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index 7487af9e..2401086d 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -31,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_plan.go b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_plan.go index bfba6b29..4a76c5c7 100644 --- a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_plan.go +++ b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_plan.go @@ -19,123 +19,30 @@ limitations under the License. package fake import ( - "context" - v1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + upgradecattleiov1 "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1" + gentype "k8s.io/client-go/gentype" ) -// FakePlans implements PlanInterface -type FakePlans struct { +// fakePlans implements PlanInterface +type fakePlans struct { + *gentype.FakeClientWithList[*v1.Plan, *v1.PlanList] Fake *FakeUpgradeV1 - ns string -} - -var plansResource = v1.SchemeGroupVersion.WithResource("plans") - -var plansKind = v1.SchemeGroupVersion.WithKind("Plan") - -// Get takes name of the plan, and returns the corresponding plan object, and an error if there is any. -func (c *FakePlans) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Plan, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(plansResource, c.ns, name), &v1.Plan{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Plan), err -} - -// List takes label and field selectors, and returns the list of Plans that match those selectors. -func (c *FakePlans) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PlanList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(plansResource, plansKind, c.ns, opts), &v1.PlanList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PlanList{ListMeta: obj.(*v1.PlanList).ListMeta} - for _, item := range obj.(*v1.PlanList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested plans. -func (c *FakePlans) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(plansResource, c.ns, opts)) - -} - -// Create takes the representation of a plan and creates it. Returns the server's representation of the plan, and an error, if there is any. -func (c *FakePlans) Create(ctx context.Context, plan *v1.Plan, opts metav1.CreateOptions) (result *v1.Plan, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(plansResource, c.ns, plan), &v1.Plan{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Plan), err -} - -// Update takes the representation of a plan and updates it. Returns the server's representation of the plan, and an error, if there is any. -func (c *FakePlans) Update(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (result *v1.Plan, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(plansResource, c.ns, plan), &v1.Plan{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Plan), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePlans) UpdateStatus(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (*v1.Plan, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(plansResource, "status", c.ns, plan), &v1.Plan{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Plan), err -} - -// Delete takes name of the plan and deletes it. Returns an error if one occurs. -func (c *FakePlans) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(plansResource, c.ns, name, opts), &v1.Plan{}) - - return err } -// DeleteCollection deletes a collection of objects. -func (c *FakePlans) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(plansResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PlanList{}) - return err -} - -// Patch applies the patch and returns the patched plan. -func (c *FakePlans) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Plan, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(plansResource, c.ns, name, pt, data, subresources...), &v1.Plan{}) - - if obj == nil { - return nil, err +func newFakePlans(fake *FakeUpgradeV1, namespace string) upgradecattleiov1.PlanInterface { + return &fakePlans{ + gentype.NewFakeClientWithList[*v1.Plan, *v1.PlanList]( + fake.Fake, + namespace, + v1.SchemeGroupVersion.WithResource("plans"), + v1.SchemeGroupVersion.WithKind("Plan"), + func() *v1.Plan { return &v1.Plan{} }, + func() *v1.PlanList { return &v1.PlanList{} }, + func(dst, src *v1.PlanList) { dst.ListMeta = src.ListMeta }, + func(list *v1.PlanList) []*v1.Plan { return gentype.ToPointerSlice(list.Items) }, + func(list *v1.PlanList, items []*v1.Plan) { list.Items = gentype.FromPointerSlice(items) }, + ), + fake, } - return obj.(*v1.Plan), err } diff --git a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_upgrade.cattle.io_client.go b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_upgrade.cattle.io_client.go index 641d1b67..1faf8961 100644 --- a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_upgrade.cattle.io_client.go +++ b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/fake/fake_upgrade.cattle.io_client.go @@ -29,7 +29,7 @@ type FakeUpgradeV1 struct { } func (c *FakeUpgradeV1) Plans(namespace string) v1.PlanInterface { - return &FakePlans{c, namespace} + return newFakePlans(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/plan.go b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/plan.go index 6135b6bc..154d2dbd 100644 --- a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/plan.go +++ b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/plan.go @@ -19,15 +19,14 @@ limitations under the License. package v1 import ( - "context" - "time" + context "context" - v1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" + upgradecattleiov1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" scheme "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // PlansGetter has a method to return a PlanInterface. @@ -38,158 +37,34 @@ type PlansGetter interface { // PlanInterface has methods to work with Plan resources. type PlanInterface interface { - Create(ctx context.Context, plan *v1.Plan, opts metav1.CreateOptions) (*v1.Plan, error) - Update(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (*v1.Plan, error) - UpdateStatus(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (*v1.Plan, error) + Create(ctx context.Context, plan *upgradecattleiov1.Plan, opts metav1.CreateOptions) (*upgradecattleiov1.Plan, error) + Update(ctx context.Context, plan *upgradecattleiov1.Plan, opts metav1.UpdateOptions) (*upgradecattleiov1.Plan, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, plan *upgradecattleiov1.Plan, opts metav1.UpdateOptions) (*upgradecattleiov1.Plan, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Plan, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PlanList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*upgradecattleiov1.Plan, error) + List(ctx context.Context, opts metav1.ListOptions) (*upgradecattleiov1.PlanList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Plan, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *upgradecattleiov1.Plan, err error) PlanExpansion } // plans implements PlanInterface type plans struct { - client rest.Interface - ns string + *gentype.ClientWithList[*upgradecattleiov1.Plan, *upgradecattleiov1.PlanList] } // newPlans returns a Plans func newPlans(c *UpgradeV1Client, namespace string) *plans { return &plans{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*upgradecattleiov1.Plan, *upgradecattleiov1.PlanList]( + "plans", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *upgradecattleiov1.Plan { return &upgradecattleiov1.Plan{} }, + func() *upgradecattleiov1.PlanList { return &upgradecattleiov1.PlanList{} }, + ), } } - -// Get takes name of the plan, and returns the corresponding plan object, and an error if there is any. -func (c *plans) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Plan, err error) { - result = &v1.Plan{} - err = c.client.Get(). - Namespace(c.ns). - Resource("plans"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Plans that match those selectors. -func (c *plans) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PlanList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PlanList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("plans"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested plans. -func (c *plans) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("plans"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a plan and creates it. Returns the server's representation of the plan, and an error, if there is any. -func (c *plans) Create(ctx context.Context, plan *v1.Plan, opts metav1.CreateOptions) (result *v1.Plan, err error) { - result = &v1.Plan{} - err = c.client.Post(). - Namespace(c.ns). - Resource("plans"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(plan). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a plan and updates it. Returns the server's representation of the plan, and an error, if there is any. -func (c *plans) Update(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (result *v1.Plan, err error) { - result = &v1.Plan{} - err = c.client.Put(). - Namespace(c.ns). - Resource("plans"). - Name(plan.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(plan). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *plans) UpdateStatus(ctx context.Context, plan *v1.Plan, opts metav1.UpdateOptions) (result *v1.Plan, err error) { - result = &v1.Plan{} - err = c.client.Put(). - Namespace(c.ns). - Resource("plans"). - Name(plan.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(plan). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the plan and deletes it. Returns an error if one occurs. -func (c *plans) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("plans"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *plans) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("plans"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched plan. -func (c *plans) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Plan, err error) { - result = &v1.Plan{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("plans"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/upgrade.cattle.io_client.go b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/upgrade.cattle.io_client.go index 82410309..3dd7d3e1 100644 --- a/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/upgrade.cattle.io_client.go +++ b/pkg/generated/clientset/versioned/typed/upgrade.cattle.io/v1/upgrade.cattle.io_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" - "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/scheme" + upgradecattleiov1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" + scheme "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *UpgradeV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := upgradecattleiov1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/upgrade/controller.go b/pkg/upgrade/controller.go index 08d4aca9..972a4146 100644 --- a/pkg/upgrade/controller.go +++ b/pkg/upgrade/controller.go @@ -7,8 +7,8 @@ import ( "os" "time" + "github.com/rancher/system-upgrade-controller/pkg/crds" upgradectl "github.com/rancher/system-upgrade-controller/pkg/generated/controllers/upgrade.cattle.io" - upgradeplan "github.com/rancher/system-upgrade-controller/pkg/upgrade/plan" "github.com/rancher/system-upgrade-controller/pkg/version" "github.com/rancher/wrangler/v3/pkg/apply" "github.com/rancher/wrangler/v3/pkg/crd" @@ -19,6 +19,7 @@ import ( "github.com/rancher/wrangler/v3/pkg/start" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" @@ -27,6 +28,11 @@ import ( "k8s.io/client-go/tools/record" ) +const ( + // readyDuration time to wait for CRDs to be ready. + readyDuration = time.Minute * 1 +) + var ( ErrPlanNotReady = errors.New("plan is not valid and resolved") ErrOutsideWindow = errors.New("current time is not within configured window") @@ -176,21 +182,14 @@ func (ctl *Controller) Start(ctx context.Context, threads int) error { } func (ctl *Controller) registerCRD(ctx context.Context) error { - factory, err := crd.NewFactoryFromClient(ctl.cfg) + crds, err := crds.List() if err != nil { return err } - - var crds []crd.CRD - for _, crdFn := range []func() (*crd.CRD, error){ - upgradeplan.CRD, - } { - crdef, err := crdFn() - if err != nil { - return err - } - crds = append(crds, *crdef) + client, err := clientset.NewForConfig(ctl.cfg) + if err != nil { + return err } - return factory.BatchCreateCRDs(ctx, crds...).BatchWait() + return crd.BatchCreateCRDs(ctx, client.ApiextensionsV1().CustomResourceDefinitions(), nil, readyDuration, crds) } diff --git a/pkg/upgrade/job/job.go b/pkg/upgrade/job/job.go index 143a9ab0..e9cb9e74 100644 --- a/pkg/upgrade/job/job.go +++ b/pkg/upgrade/job/job.go @@ -417,23 +417,36 @@ func New(plan *upgradeapiv1.Plan, node *corev1.Node, controllerName string) *bat ), } - activeDeadlineSeconds := ActiveDeadlineSeconds - - if plan.Spec.JobActiveDeadlineSecs > 0 { - activeDeadlineSeconds = plan.Spec.JobActiveDeadlineSecs + if plan.Spec.JobActiveDeadlineSecs == nil { + // nil means default from controller + job.Spec.ActiveDeadlineSeconds = pointer.Int64(ActiveDeadlineSeconds) + } else { + if *plan.Spec.JobActiveDeadlineSecs < 0 { + // < 0 means default from controller + job.Spec.ActiveDeadlineSeconds = pointer.Int64(ActiveDeadlineSeconds) + } else if *plan.Spec.JobActiveDeadlineSecs > 0 { + // > 0 means value as set + job.Spec.ActiveDeadlineSeconds = plan.Spec.JobActiveDeadlineSecs + } else { + // 0 means nil, no deadline + job.Spec.ActiveDeadlineSeconds = nil + } } - // If configured with a maximum deadline via "SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS_MAX", - // clamp the Plan's given deadline to the maximum. - if ActiveDeadlineSecondsMax > 0 && activeDeadlineSeconds > ActiveDeadlineSecondsMax { - activeDeadlineSeconds = ActiveDeadlineSecondsMax + if job.Spec.ActiveDeadlineSeconds == nil { + // ActiveDeadlineSeconds cannot be nil if a max is configured + if ActiveDeadlineSecondsMax > 0 { + job.Spec.ActiveDeadlineSeconds = pointer.Int64(ActiveDeadlineSecondsMax) + } + } else { + // Clamp configured ActiveDeadlineSeconds to max + if ActiveDeadlineSecondsMax > 0 && *job.Spec.ActiveDeadlineSeconds > ActiveDeadlineSecondsMax { + job.Spec.ActiveDeadlineSeconds = pointer.Int64(ActiveDeadlineSecondsMax) + } } - if activeDeadlineSeconds > 0 { - job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - if drain != nil && drain.Timeout != nil && drain.Timeout.Milliseconds() > ActiveDeadlineSeconds*1000 { - logrus.Warnf("drain timeout exceeds active deadline seconds") - } + if drain != nil && drain.Timeout != nil && job.Spec.ActiveDeadlineSeconds != nil && drain.Timeout.Milliseconds() > *job.Spec.ActiveDeadlineSeconds*1000 { + logrus.Warnf("Plan %s/%s drain timeout exceeds active deadline seconds", plan.Namespace, plan.Name) } return job diff --git a/pkg/upgrade/job/job_suite_test.go b/pkg/upgrade/job/job_suite_test.go index 191229da..17317ccb 100644 --- a/pkg/upgrade/job/job_suite_test.go +++ b/pkg/upgrade/job/job_suite_test.go @@ -5,10 +5,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" upgradev1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" sucjob "github.com/rancher/system-upgrade-controller/pkg/upgrade/job" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" ) func TestJob(t *testing.T) { @@ -45,21 +47,33 @@ var _ = Describe("Jobs", func() { Describe("Setting the batchv1.Job ActiveDeadlineSeconds field", func() { Context("When the Plan has a positive non-zero value for deadline", func() { It("Constructs the batchv1.Job with the Plan's given value", func() { - plan.Spec.JobActiveDeadlineSecs = 12345 + plan.Spec.JobActiveDeadlineSecs = pointer.Int64(12345) job := sucjob.New(plan, node, "foo") - Expect(*job.Spec.ActiveDeadlineSeconds).To(Equal(int64(12345))) + Expect(job.Spec.ActiveDeadlineSeconds).To(PointTo(Equal(int64(12345)))) }) }) - Context("When the Plan has a zero-value given as its deadline", func() { + Context("When the Plan has a nil given as its deadline", func() { It("Constructs the batchv1.Job with a global default", func() { oldActiveDeadlineSeconds := sucjob.ActiveDeadlineSeconds sucjob.ActiveDeadlineSeconds = 300 defer func() { sucjob.ActiveDeadlineSeconds = oldActiveDeadlineSeconds }() - plan.Spec.JobActiveDeadlineSecs = 0 + plan.Spec.JobActiveDeadlineSecs = nil job := sucjob.New(plan, node, "bar") - Expect(*job.Spec.ActiveDeadlineSeconds).To(Equal(int64(300))) + Expect(job.Spec.ActiveDeadlineSeconds).To(PointTo(Equal(int64(300)))) + }) + }) + + Context("When the Plan has a zero value given as its deadline", func() { + It("Constructs the batchv1.Job with no deadline", func() { + oldActiveDeadlineSeconds := sucjob.ActiveDeadlineSeconds + sucjob.ActiveDeadlineSeconds = 300 + defer func() { sucjob.ActiveDeadlineSeconds = oldActiveDeadlineSeconds }() + + plan.Spec.JobActiveDeadlineSecs = pointer.Int64(0) + job := sucjob.New(plan, node, "bar") + Expect(job.Spec.ActiveDeadlineSeconds).To(BeNil()) }) }) @@ -69,9 +83,9 @@ var _ = Describe("Jobs", func() { sucjob.ActiveDeadlineSeconds = 3600 defer func() { sucjob.ActiveDeadlineSeconds = oldActiveDeadlineSeconds }() - plan.Spec.JobActiveDeadlineSecs = -1 + plan.Spec.JobActiveDeadlineSecs = pointer.Int64(-1) job := sucjob.New(plan, node, "baz") - Expect(*job.Spec.ActiveDeadlineSeconds).To(Equal(int64(3600))) + Expect(job.Spec.ActiveDeadlineSeconds).To(PointTo(Equal(int64(3600)))) }) }) @@ -81,9 +95,9 @@ var _ = Describe("Jobs", func() { sucjob.ActiveDeadlineSecondsMax = 300 defer func() { sucjob.ActiveDeadlineSecondsMax = oldActiveDeadlineSecondsMax }() - plan.Spec.JobActiveDeadlineSecs = 600 + plan.Spec.JobActiveDeadlineSecs = pointer.Int64(600) job := sucjob.New(plan, node, "foobar") - Expect(*job.Spec.ActiveDeadlineSeconds).To(Equal(int64(300))) + Expect(job.Spec.ActiveDeadlineSeconds).To(PointTo(Equal(int64(300)))) }) }) diff --git a/pkg/upgrade/plan/plan.go b/pkg/upgrade/plan/plan.go index f7e2054d..14194325 100644 --- a/pkg/upgrade/plan/plan.go +++ b/pkg/upgrade/plan/plan.go @@ -15,11 +15,9 @@ import ( "github.com/kubereboot/kured/pkg/timewindow" upgradeapi "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io" upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" - "github.com/rancher/wrangler/v3/pkg/crd" "github.com/rancher/wrangler/v3/pkg/data" corectlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" "github.com/rancher/wrangler/v3/pkg/merr" - "github.com/rancher/wrangler/v3/pkg/schemas/openapi" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,25 +48,6 @@ var ( }(defaultPollingInterval) ) -func CRD() (*crd.CRD, error) { - prototype := upgradeapiv1.NewPlan("", "", upgradeapiv1.Plan{}) - schema, err := openapi.ToOpenAPIFromStruct(*prototype) - if err != nil { - return nil, err - } - plan := crd.CRD{ - GVK: prototype.GroupVersionKind(), - PluralName: upgradeapiv1.PlanResourceName, - Status: true, - Schema: schema, - Categories: []string{"upgrade"}, - }. - WithColumn("Image", ".spec.upgrade.image"). - WithColumn("Channel", ".spec.channel"). - WithColumn("Version", ".spec.version") - return &plan, nil -} - func DigestStatus(plan *upgradeapiv1.Plan, secretCache corectlv1.SecretCache) (upgradeapiv1.PlanStatus, error) { if upgradeapiv1.PlanLatestResolved.GetReason(plan) != "Error" { h := sha256.New224() @@ -257,7 +236,11 @@ func Validate(plan *upgradeapiv1.Plan, secretCache corectlv1.SecretCache) error } } if windowSpec := plan.Spec.Window; windowSpec != nil { - if _, err := timewindow.New(windowSpec.Days, windowSpec.StartTime, windowSpec.EndTime, windowSpec.TimeZone); err != nil { + days := make([]string, len(windowSpec.Days)) + for i, day := range windowSpec.Days { + days[i] = string(day) + } + if _, err := timewindow.New(days, windowSpec.StartTime, windowSpec.EndTime, windowSpec.TimeZone); err != nil { return merr.NewErrors(ErrInvalidWindow, err) } } diff --git a/scripts/package-controller b/scripts/package-controller index 7f992415..da89c225 100755 --- a/scripts/package-controller +++ b/scripts/package-controller @@ -31,5 +31,5 @@ if [ "$ARCH" = "amd64" ]; then trap reset-kustomization EXIT kustomize edit set image "rancher/system-upgrade-controller=${REPO}/system-upgrade-controller:${VERSION}" kustomize build --output ./dist/artifacts/system-upgrade-controller.yaml - go run hack/crdgen.go > ./dist/artifacts/crd.yaml + cat ./pkg/crds/yaml/*/* > dist/artifacts/crd.yaml fi From 9cdb0e079d310436df788f75f1c201a656127deb Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Sat, 21 Jun 2025 04:58:56 +0000 Subject: [PATCH 4/6] Add API docs Also clean up some of the api spec for better documentation, and fix tests. Signed-off-by: Brad Davidson --- Dockerfile.dapper | 3 +- README.md | 4 + crd-ref-docs.yaml | 5 + doc/plan.md | 201 ++++++++++++++++++ e2e/suite/job_generate_test.go | 13 +- e2e/suite/plan_create_test.go | 2 +- main.go | 1 + pkg/apis/upgrade.cattle.io/v1/types.go | 52 +++-- .../v1/zz_generated_deepcopy.go | 10 +- .../generated/upgrade.cattle.io_plans.yaml | 43 ++-- 10 files changed, 280 insertions(+), 54 deletions(-) create mode 100644 crd-ref-docs.yaml create mode 100644 doc/plan.md diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 3e2d0287..4827dced 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -19,7 +19,8 @@ RUN if [ "${ARCH:-$(go env GOARCH)}" = "amd64" ]; then \ RUN mkdir -p /usr/local/lib/docker/cli-plugins; \ curl -o /usr/local/lib/docker/cli-plugins/docker-buildx -fsSL "https://github.com/docker/buildx/releases/download/v0.17.1/buildx-v0.17.1.linux-amd64"; \ chmod +x /usr/local/lib/docker/cli-plugins/docker-buildx -RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.3 +RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.3 && \ + go install github.com/elastic/crd-ref-docs@v0.1.0 ENV DAPPER_ENV REPO TAG DRONE_TAG ENV DAPPER_SOURCE /go/src/github.com/rancher/system-upgrade-controller/ ENV DAPPER_OUTPUT ./bin ./dist diff --git a/README.md b/README.md index 4b5b426e..a0204161 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,10 @@ But in the time-honored tradition of `curl ${script} | sudo sh -` here is a nice kubectl apply -k github.com/rancher/system-upgrade-controller ``` +## API Documentation + +Autogenerated API docs for `upgrade.cattle.io/v1 Plan` are available at [doc/plan.md](doc/plan.md#Plan) + ### Example Plans - [examples/k3s-upgrade.yaml](examples/k3s-upgrade.yaml) diff --git a/crd-ref-docs.yaml b/crd-ref-docs.yaml new file mode 100644 index 00000000..2873f458 --- /dev/null +++ b/crd-ref-docs.yaml @@ -0,0 +1,5 @@ +processor: + ignoreFields: + - "TypeMeta$" +render: + kubernetesVersion: 1.32 diff --git a/doc/plan.md b/doc/plan.md new file mode 100644 index 00000000..ce4750a5 --- /dev/null +++ b/doc/plan.md @@ -0,0 +1,201 @@ +# API Reference + +## Packages +- [upgrade.cattle.io/v1](#upgradecattleiov1) + + +## upgrade.cattle.io/v1 + + + + + + +#### ContainerSpec + + + +ContainerSpec is a simplified container template spec, used to configure the prepare and upgrade +containers of the Job Pod. + + + +_Appears in:_ +- [PlanSpec](#planspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `image` _string_ | Image name. If the tag is omitted, the value from .status.latestVersion will be used. | | Required: \{\}
| +| `command` _string array_ | | | | +| `args` _string array_ | | | | +| `envs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envvar-v1-core) array_ | | | | +| `envFrom` _[EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envfromsource-v1-core) array_ | | | | +| `volumes` _[VolumeSpec](#volumespec) array_ | | | | +| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#securitycontext-v1-core)_ | | | | + + +#### Day + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [0 su sun sunday 1 mo mon monday 2 tu tue tuesday 3 we wed wednesday 4 th thu thursday 5 fr fri friday 6 sa sat saturday] + +_Appears in:_ +- [TimeWindowSpec](#timewindowspec) + + + +#### DrainSpec + + + +DrainSpec encapsulates kubectl drain parameters minus node/pod selectors. See: +- https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ +- https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + + + +_Appears in:_ +- [PlanSpec](#planspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `timeout` _[Duration](#duration)_ | | | | +| `gracePeriod` _integer_ | | | | +| `deleteLocalData` _boolean_ | | | | +| `deleteEmptydirData` _boolean_ | | | | +| `ignoreDaemonSets` _boolean_ | | | | +| `force` _boolean_ | | | | +| `disableEviction` _boolean_ | | | | +| `skipWaitForDeleteTimeout` _integer_ | | | | +| `podSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)_ | | | | + + +#### Plan + + + +Plan represents a set of Jobs to apply an upgrade (or other operation) to set of Nodes. + + + +_Appears in:_ +- [PlanList](#planlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[PlanSpec](#planspec)_ | | | | +| `status` _[PlanStatus](#planstatus)_ | | | | + + + + +#### PlanSpec + + + +PlanSpec represents the user-configurable details of a Plan. + + + +_Appears in:_ +- [Plan](#plan) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `concurrency` _integer_ | The maximum number of concurrent nodes to apply this update on. | | | +| `jobActiveDeadlineSecs` _integer_ | Sets ActiveDeadlineSeconds on Jobs generated to apply this Plan.
If the Job does not complete within this time, the Plan will stop processing until it is updated to trigger a redeploy.
If set to 0, Jobs have no deadline. If not set, the controller default value is used. | | | +| `nodeSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)_ | Select which nodes this plan can be applied to. | | | +| `serviceAccountName` _string_ | The service account for the pod to use. As with normal pods, if not specified the default service account from the namespace will be assigned. | | | +| `channel` _string_ | A URL that returns HTTP 302 with the last path element of the value returned in the Location header assumed to be an image tag (after munging "+" to "-"). | | | +| `version` _string_ | Providing a value for version will prevent polling/resolution of the channel if specified. | | | +| `secrets` _[SecretSpec](#secretspec) array_ | Secrets to be mounted into the Job Pod. | | | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | Specify which node taints should be tolerated by pods applying the upgrade.
Anything specified here is appended to the default of:
- `\{key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists\}` | | | +| `exclusive` _boolean_ | Jobs for exclusive plans cannot be run alongside any other exclusive plan. | | | +| `window` _[TimeWindowSpec](#timewindowspec)_ | A time window in which to execute Jobs for this Plan.
Jobs will not be generated outside this time window, but may continue executing into the window once started. | | | +| `prepare` _[ContainerSpec](#containerspec)_ | The prepare init container, if specified, is run before cordon/drain which is run before the upgrade container. | | | +| `upgrade` _[ContainerSpec](#containerspec)_ | The upgrade container; must be specified. | | | +| `cordon` _boolean_ | If Cordon is true, the node is cordoned before the upgrade container is run.
If drain is specified, the value for cordon is ignored, and the node is cordoned.
If neither drain nor cordon are specified and the node is marked as schedulable=false it will not be marked as schedulable=true when the Job completes. | | | +| `drain` _[DrainSpec](#drainspec)_ | Configuration for draining nodes prior to upgrade. If left unspecified, no drain will be performed. | | | +| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#localobjectreference-v1-core) array_ | Image Pull Secrets, used to pull images for the Job. | | | +| `postCompleteDelay` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | Time after a Job for one Node is complete before a new Job will be created for the next Node. | | | + + +#### PlanStatus + + + +PlanStatus represents the resulting state from processing Plan events. + + + +_Appears in:_ +- [Plan](#plan) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _GenericCondition array_ | | | | +| `latestVersion` _string_ | The latest version, as resolved from .spec.version, or the channel server. | | | +| `latestHash` _string_ | The hash of the most recently applied plan .spec. | | | +| `applying` _string array_ | List of Node names that the Plan is currently being applied on. | | | + + +#### SecretSpec + + + +SecretSpec describes a Secret to be mounted for prepare/upgrade containers. + + + +_Appears in:_ +- [PlanSpec](#planspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Secret name | | Required: \{\}
| +| `path` _string_ | Path to mount the Secret volume within the Pod. | | Required: \{\}
| +| `ignoreUpdates` _boolean_ | If set to true, the Secret contents will not be hashed, and changes to the Secret will not trigger new application of the Plan. | | | + + +#### TimeWindowSpec + + + +TimeWindowSpec describes a time window in which a Plan should be processed. + + + +_Appears in:_ +- [PlanSpec](#planspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `days` _[Day](#day) array_ | Days that this time window is valid for | | Enum: [0 su sun sunday 1 mo mon monday 2 tu tue tuesday 3 we wed wednesday 4 th thu thursday 5 fr fri friday 6 sa sat saturday]
MinItems: 1
| +| `startTime` _string_ | Start of the time window. | | | +| `endTime` _string_ | End of the time window. | | | +| `timeZone` _string_ | Time zone for the time window; if not specified UTC will be used. | | | + + +#### VolumeSpec + + + +HostPath volume to mount into the pod + + + +_Appears in:_ +- [ContainerSpec](#containerspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name of the Volume as it will appear within the Pod spec. | | Required: \{\}
| +| `source` _string_ | Path on the host to mount. | | Required: \{\}
| +| `destination` _string_ | Path to mount the Volume at within the Pod. | | Required: \{\}
| + + diff --git a/e2e/suite/job_generate_test.go b/e2e/suite/job_generate_test.go index 6ed79071..b7ffe93e 100644 --- a/e2e/suite/job_generate_test.go +++ b/e2e/suite/job_generate_test.go @@ -185,16 +185,11 @@ var _ = Describe("Job Generation", func() { Operator: metav1.LabelSelectorOpDoesNotExist, }}, } - plan, err = e2e.CreatePlan(plan) - Expect(err).ToNot(HaveOccurred()) - - plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanSpecValidated, 30*time.Second) - Expect(err).ToNot(HaveOccurred()) - Expect(upgradeapiv1.PlanSpecValidated.IsTrue(plan)).To(BeFalse()) - Expect(upgradeapiv1.PlanSpecValidated.GetMessage(plan)).To(ContainSubstring("spec.window is invalid")) + _, err = e2e.CreatePlan(plan) + Expect(err).To(MatchError(ContainSubstring("invalid: spec.window.days"))) plan.Spec.Window.Days = []upgradeapiv1.Day{"su", "mo", "tu", "we", "th", "fr", "sa"} - plan, err = e2e.UpdatePlan(plan) + plan, err = e2e.CreatePlan(plan) Expect(err).ToNot(HaveOccurred()) plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanSpecValidated, 30*time.Second) @@ -205,7 +200,7 @@ var _ = Describe("Job Generation", func() { Expect(err).ToNot(HaveOccurred()) Expect(jobs).To(HaveLen(1)) }) - It("should apply successfully after edit", func() { + It("should apply successfully when valid", func() { Expect(jobs).To(HaveLen(1)) Expect(jobs[0].Status.Succeeded).To(BeNumerically("==", 1)) Expect(jobs[0].Status.Active).To(BeNumerically("==", 0)) diff --git a/e2e/suite/plan_create_test.go b/e2e/suite/plan_create_test.go index beb59683..bcaee7e1 100644 --- a/e2e/suite/plan_create_test.go +++ b/e2e/suite/plan_create_test.go @@ -20,7 +20,7 @@ var _ = Describe("Plan Creation", func() { plan, err = e2e.CreatePlan(plan) }) It("should return an error if upgrade in nil", func() { - Expect(err).Should(HaveOccurred()) + Expect(err).To(HaveOccurred()) }) }) }) diff --git a/main.go b/main.go index d97fd237..e9d6b73c 100644 --- a/main.go +++ b/main.go @@ -2,6 +2,7 @@ //go:generate rm -rf pkg/generated pkg/crds/yaml/generated //go:generate go run pkg/codegen/codegen.go //go:generate controller-gen crd:generateEmbeddedObjectMeta=true paths=./pkg/apis/... output:crd:dir=./pkg/crds/yaml/generated +//go:generate crd-ref-docs --config=crd-ref-docs.yaml --renderer=markdown --output-path=doc/plan.md package main diff --git a/pkg/apis/upgrade.cattle.io/v1/types.go b/pkg/apis/upgrade.cattle.io/v1/types.go index ec46c276..5bdc1396 100644 --- a/pkg/apis/upgrade.cattle.io/v1/types.go +++ b/pkg/apis/upgrade.cattle.io/v1/types.go @@ -50,7 +50,7 @@ type PlanSpec struct { NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` // The service account for the pod to use. As with normal pods, if not specified the default service account from the namespace will be assigned. ServiceAccountName string `json:"serviceAccountName,omitempty"` - // A URL that returns HTTP 302 with the last path element of the value returned in the Location header assumed to be an image tag (after munging "+" to "-"). + // A URL that returns HTTP 302 with the last path element of the value returned in the Location header assumed to be an image tag (after munging "+" to "-"). Channel string `json:"channel,omitempty"` // Providing a value for version will prevent polling/resolution of the channel if specified. Version string `json:"version,omitempty"` @@ -58,7 +58,7 @@ type PlanSpec struct { Secrets []SecretSpec `json:"secrets,omitempty"` // Specify which node taints should be tolerated by pods applying the upgrade. // Anything specified here is appended to the default of: - // - {key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists} + // - `{key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists}` Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // Jobs for exclusive plans cannot be run alongside any other exclusive plan. Exclusive bool `json:"exclusive,omitempty"` @@ -66,19 +66,15 @@ type PlanSpec struct { // Jobs will not be generated outside this time window, but may continue executing into the window once started. Window *TimeWindowSpec `json:"window,omitempty"` // The prepare init container, if specified, is run before cordon/drain which is run before the upgrade container. - // Shares the same format as the upgrade container. - // If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. Prepare *ContainerSpec `json:"prepare,omitempty"` - // If drain is specified, the value for cordon is ignored. + // The upgrade container; must be specified. + Upgrade *ContainerSpec `json:"upgrade"` + // If Cordon is true, the node is cordoned before the upgrade container is run. + // If drain is specified, the value for cordon is ignored, and the node is cordoned. // If neither drain nor cordon are specified and the node is marked as schedulable=false it will not be marked as schedulable=true when the Job completes. Cordon bool `json:"cordon,omitempty"` - // If left unspecified, no drain will be performed. See: - // - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ - // - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + // Configuration for draining nodes prior to upgrade. If left unspecified, no drain will be performed. Drain *DrainSpec `json:"drain,omitempty"` - // The upgrade container. - // If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. - Upgrade *ContainerSpec `json:"upgrade,omitempty"` // Image Pull Secrets, used to pull images for the Job. ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Time after a Job for one Node is complete before a new Job will be created for the next Node. @@ -102,10 +98,12 @@ type PlanStatus struct { Applying []string `json:"applying,omitempty"` } -// ContainerSpec is a simplified container template. +// ContainerSpec is a simplified container template spec, used to configure the prepare and upgrade +// containers of the Job Pod. type ContainerSpec struct { // Image name. If the tag is omitted, the value from .status.latestVersion will be used. - Image string `json:"image,omitempty"` + // +kubebuilder:validation:Required + Image string `json:"image"` Command []string `json:"command,omitempty"` Args []string `json:"args,omitempty"` Env []corev1.EnvVar `json:"envs,omitempty"` @@ -117,14 +115,19 @@ type ContainerSpec struct { // HostPath volume to mount into the pod type VolumeSpec struct { // Name of the Volume as it will appear within the Pod spec. - Name string `json:"name,omitempty"` + // +kubebuilder:validation:Required + Name string `json:"name"` // Path on the host to mount. - Source string `json:"source,omitempty"` + // +kubebuilder:validation:Required + Source string `json:"source"` // Path to mount the Volume at within the Pod. - Destination string `json:"destination,omitempty"` + // +kubebuilder:validation:Required + Destination string `json:"destination"` } -// DrainSpec encapsulates kubectl drain parameters minus node/pod selectors. +// DrainSpec encapsulates kubectl drain parameters minus node/pod selectors. See: +// - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ +// - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain type DrainSpec struct { Timeout *time.Duration `json:"timeout,omitempty"` GracePeriod *int32 `json:"gracePeriod,omitempty"` @@ -137,19 +140,25 @@ type DrainSpec struct { PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"` } -// SecretSpec describes a secret to be mounted for prepare/upgrade containers. +// SecretSpec describes a Secret to be mounted for prepare/upgrade containers. type SecretSpec struct { // Secret name - Name string `json:"name,omitempty"` + // +kubebuilder:validation:Required + Name string `json:"name"` // Path to mount the Secret volume within the Pod. - Path string `json:"path,omitempty"` + // +kubebuilder:validation:Required + Path string `json:"path"` // If set to true, the Secret contents will not be hashed, and changes to the Secret will not trigger new application of the Plan. IgnoreUpdates bool `json:"ignoreUpdates,omitempty"` } +// +kubebuilder:validation:Enum={"0","su","sun","sunday","1","mo","mon","monday","2","tu","tue","tuesday","3","we","wed","wednesday","4","th","thu","thursday","5","fr","fri","friday","6","sa","sat","saturday"} +type Day string + // TimeWindowSpec describes a time window in which a Plan should be processed. type TimeWindowSpec struct { // Days that this time window is valid for + // +kubebuilder:validation:MinItems=1 Days []Day `json:"days,omitempty"` // Start of the time window. StartTime string `json:"startTime,omitempty"` @@ -159,9 +168,6 @@ type TimeWindowSpec struct { TimeZone string `json:"timeZone,omitempty"` } -// +kubebuilder:validation:Enum={"0","su","sun","sunday","1","mo","mon","monday","2","tu","tue","tuesday","3","we","wed","wednesday","4","th","thu","thursday","5","fr","fri","friday","6","sa","sat","saturday"} -type Day string - func (tws *TimeWindowSpec) Contains(t time.Time) bool { days := make([]string, len(tws.Days)) for i, day := range tws.Days { diff --git a/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go b/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go index 9f568e1e..8a9bcf01 100644 --- a/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go +++ b/pkg/apis/upgrade.cattle.io/v1/zz_generated_deepcopy.go @@ -222,16 +222,16 @@ func (in *PlanSpec) DeepCopyInto(out *PlanSpec) { *out = new(ContainerSpec) (*in).DeepCopyInto(*out) } - if in.Drain != nil { - in, out := &in.Drain, &out.Drain - *out = new(DrainSpec) - (*in).DeepCopyInto(*out) - } if in.Upgrade != nil { in, out := &in.Upgrade, &out.Upgrade *out = new(ContainerSpec) (*in).DeepCopyInto(*out) } + if in.Drain != nil { + in, out := &in.Drain, &out.Drain + *out = new(DrainSpec) + (*in).DeepCopyInto(*out) + } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]corev1.LocalObjectReference, len(*in)) diff --git a/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml index 2e5753b3..ec741ed3 100644 --- a/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml +++ b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml @@ -52,7 +52,7 @@ spec: properties: channel: description: A URL that returns HTTP 302 with the last path element - of the value returned in the Location header assumed to be an image + of the value returned in the Location header assumed to be an image tag (after munging "+" to "-"). type: string concurrency: @@ -62,14 +62,13 @@ spec: type: integer cordon: description: |- - If drain is specified, the value for cordon is ignored. + If Cordon is true, the node is cordoned before the upgrade container is run. + If drain is specified, the value for cordon is ignored, and the node is cordoned. If neither drain nor cordon are specified and the node is marked as schedulable=false it will not be marked as schedulable=true when the Job completes. type: boolean drain: - description: |- - If left unspecified, no drain will be performed. See: - - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ - - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + description: Configuration for draining nodes prior to upgrade. If + left unspecified, no drain will be performed. properties: deleteEmptydirData: type: boolean @@ -224,10 +223,8 @@ spec: Job will be created for the next Node. type: string prepare: - description: |- - The prepare init container, if specified, is run before cordon/drain which is run before the upgrade container. - Shares the same format as the upgrade container. - If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + description: The prepare init container, if specified, is run before + cordon/drain which is run before the upgrade container. properties: args: items: @@ -614,13 +611,19 @@ spec: source: description: Path on the host to mount. type: string + required: + - destination + - name + - source type: object type: array + required: + - image type: object secrets: description: Secrets to be mounted into the Job Pod. items: - description: SecretSpec describes a secret to be mounted for prepare/upgrade + description: SecretSpec describes a Secret to be mounted for prepare/upgrade containers. properties: ignoreUpdates: @@ -634,6 +637,9 @@ spec: path: description: Path to mount the Secret volume within the Pod. type: string + required: + - name + - path type: object type: array serviceAccountName: @@ -645,7 +651,7 @@ spec: description: |- Specify which node taints should be tolerated by pods applying the upgrade. Anything specified here is appended to the default of: - - {key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists} + - `{key: node.kubernetes.io/unschedulable, effect: NoSchedule, operator: Exists}` items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -684,9 +690,7 @@ spec: type: object type: array upgrade: - description: |- - The upgrade container. - If no tag is included in the image name, the tag portion of the image will be the value from .status.latestVersion a.k.a. the resolved version for this plan. + description: The upgrade container; must be specified. properties: args: items: @@ -1073,8 +1077,14 @@ spec: source: description: Path on the host to mount. type: string + required: + - destination + - name + - source type: object type: array + required: + - image type: object version: description: Providing a value for version will prevent polling/resolution @@ -1118,6 +1128,7 @@ spec: - sat - saturday type: string + minItems: 1 type: array endTime: description: End of the time window. @@ -1130,6 +1141,8 @@ spec: will be used. type: string type: object + required: + - upgrade type: object status: description: PlanStatus represents the resulting state from processing From fa163957d09890593643a296ba70130038542fcb Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Tue, 24 Jun 2025 09:02:18 +0000 Subject: [PATCH 5/6] Add message to event and update status condition when job fails Signed-off-by: Brad Davidson --- e2e/framework/controller/deployment.go | 3 + e2e/suite/channel_resolve_test.go | 1 + e2e/suite/job_generate_test.go | 155 ++++++++++++++----------- e2e/suite/plan_create_test.go | 1 + e2e/suite/plan_resolve_test.go | 5 + pkg/upgrade/handle_batch.go | 19 ++- pkg/upgrade/handle_upgrade.go | 8 +- 7 files changed, 114 insertions(+), 78 deletions(-) diff --git a/e2e/framework/controller/deployment.go b/e2e/framework/controller/deployment.go index 8c5bb803..e5110422 100644 --- a/e2e/framework/controller/deployment.go +++ b/e2e/framework/controller/deployment.go @@ -51,6 +51,9 @@ func NewDeployment(name string, opt ...DeploymentOption) *appsv1.Deployment { }, { Name: "SYSTEM_UPGRADE_CONTROLLER_LEADER_ELECT", Value: "true", + }, { + Name: "SYSTEM_UPGRADE_CONTROLLER_DEBUG", + Value: "true", }, { Name: "SYSTEM_UPGRADE_CONTROLLER_NAMESPACE", ValueFrom: &corev1.EnvVarSource{ diff --git a/e2e/suite/channel_resolve_test.go b/e2e/suite/channel_resolve_test.go index 3519b59c..0e03d7e6 100644 --- a/e2e/suite/channel_resolve_test.go +++ b/e2e/suite/channel_resolve_test.go @@ -65,5 +65,6 @@ var _ = Describe("Resolve channel", func() { Expect(err).To(HaveOccurred()) Expect(latest).To(BeEmpty()) }) + AfterEach(CollectLogsOnFailure(e2e)) }) }) diff --git a/e2e/suite/job_generate_test.go b/e2e/suite/job_generate_test.go index b7ffe93e..2904047d 100644 --- a/e2e/suite/job_generate_test.go +++ b/e2e/suite/job_generate_test.go @@ -8,13 +8,17 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + format "github.com/onsi/gomega/format" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" "k8s.io/utils/ptr" "github.com/rancher/system-upgrade-controller/e2e/framework" upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + _ "k8s.io/kubernetes/test/utils/format" ) var _ = Describe("Job Generation", func() { @@ -137,29 +141,7 @@ var _ = Describe("Job Generation", func() { Expect(jobs[0].Spec.Template.Spec.InitContainers[0].Args).To(ContainElement(ContainSubstring("!upgrade.cattle.io/controller"))) Expect(jobs[0].Spec.Template.Spec.InitContainers[0].Args).To(ContainElement(ContainSubstring("component notin (sonobuoy)"))) }) - AfterEach(func() { - if CurrentSpecReport().Failed() { - podList, _ := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) - for _, pod := range podList.Items { - containerNames := []string{} - for _, container := range pod.Spec.InitContainers { - containerNames = append(containerNames, container.Name) - } - for _, container := range pod.Spec.Containers { - containerNames = append(containerNames, container.Name) - } - for _, container := range containerNames { - reportName := fmt.Sprintf("podlogs-%s-%s", pod.Name, container) - logs := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).GetLogs(pod.Name, &v1.PodLogOptions{Container: container}) - if logStreamer, err := logs.Stream(context.Background()); err == nil { - if podLogs, err := io.ReadAll(logStreamer); err == nil { - AddReportEntry(reportName, string(podLogs)) - } - } - } - } - } - }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("fails because of invalid time window", func() { @@ -206,29 +188,7 @@ var _ = Describe("Job Generation", func() { Expect(jobs[0].Status.Active).To(BeNumerically("==", 0)) Expect(jobs[0].Status.Failed).To(BeNumerically("==", 0)) }) - AfterEach(func() { - if CurrentSpecReport().Failed() { - podList, _ := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) - for _, pod := range podList.Items { - containerNames := []string{} - for _, container := range pod.Spec.InitContainers { - containerNames = append(containerNames, container.Name) - } - for _, container := range pod.Spec.Containers { - containerNames = append(containerNames, container.Name) - } - for _, container := range containerNames { - reportName := fmt.Sprintf("podlogs-%s-%s", pod.Name, container) - logs := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).GetLogs(pod.Name, &v1.PodLogOptions{Container: container}) - if logStreamer, err := logs.Stream(context.Background()); err == nil { - if podLogs, err := io.ReadAll(logStreamer); err == nil { - AddReportEntry(reportName, string(podLogs)) - } - } - } - } - } - }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("fails because of invalid post complete delay", func() { @@ -275,32 +235,10 @@ var _ = Describe("Job Generation", func() { Expect(jobs[0].Status.Active).To(BeNumerically("==", 0)) Expect(jobs[0].Status.Failed).To(BeNumerically("==", 0)) }) - AfterEach(func() { - if CurrentSpecReport().Failed() { - podList, _ := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) - for _, pod := range podList.Items { - containerNames := []string{} - for _, container := range pod.Spec.InitContainers { - containerNames = append(containerNames, container.Name) - } - for _, container := range pod.Spec.Containers { - containerNames = append(containerNames, container.Name) - } - for _, container := range containerNames { - reportName := fmt.Sprintf("podlogs-%s-%s", pod.Name, container) - logs := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).GetLogs(pod.Name, &v1.PodLogOptions{Container: container}) - if logStreamer, err := logs.Stream(context.Background()); err == nil { - if podLogs, err := io.ReadAll(logStreamer); err == nil { - AddReportEntry(reportName, string(podLogs)) - } - } - } - } - } - }) + AfterEach(CollectLogsOnFailure(e2e)) }) - When("updated secret should not change hash", func() { + When("updated secret does not change hash", func() { var ( err error plan *upgradeapiv1.Plan @@ -347,5 +285,82 @@ var _ = Describe("Job Generation", func() { It("hash should be equal", func() { Expect(plan.Status.LatestHash).Should(Equal(hash)) }) + AfterEach(CollectLogsOnFailure(e2e)) + }) + + When("job failure message is reflected in plan status condition", func() { + var ( + err error + plan *upgradeapiv1.Plan + jobs []batchv1.Job + ) + BeforeEach(func() { + plan = e2e.NewPlan("job-deadline-", "library/alpine:3.18", []string{"sh", "-c"}, "sleep 3600") + plan.Spec.JobActiveDeadlineSecs = pointer.Int64(15) + plan.Spec.Version = "latest" + plan.Spec.Concurrency = 1 + plan.Spec.ServiceAccountName = e2e.Namespace.Name + plan.Spec.NodeSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "node-role.kubernetes.io/control-plane", + Operator: metav1.LabelSelectorOpDoesNotExist, + }}, + } + plan, err = e2e.CreatePlan(plan) + Expect(err).ToNot(HaveOccurred()) + + plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second) + Expect(err).ToNot(HaveOccurred()) + }) + It("message should contain deadline reason and message", func() { + jobs, err = e2e.WaitForPlanJobs(plan, 1, 120*time.Second) + Expect(err).ToNot(HaveOccurred()) + Expect(jobs).To(HaveLen(1)) + Expect(jobs[0].Status.Succeeded).To(BeNumerically("==", 0)) + Expect(jobs[0].Status.Active).To(BeNumerically("==", 0)) + Expect(jobs[0].Status.Failed).To(BeNumerically(">=", 1)) + + Eventually(e2e.GetPlan). + WithArguments(plan.Name, metav1.GetOptions{}). + WithTimeout(30 * time.Second). + Should(SatisfyAll( + WithTransform(upgradeapiv1.PlanComplete.IsTrue, BeFalse()), + WithTransform(upgradeapiv1.PlanComplete.GetReason, Equal("JobFailed")), + WithTransform(upgradeapiv1.PlanComplete.GetMessage, ContainSubstring("DeadlineExceeded: Job was active longer than specified deadline")), + )) + }) + AfterEach(CollectLogsOnFailure(e2e)) }) }) + +func CollectLogsOnFailure(e2e *framework.Client) func() { + return func() { + if CurrentSpecReport().Failed() { + planList, _ := e2e.UpgradeClientSet.UpgradeV1().Plans(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + AddReportEntry("plans", format.Object(planList, 0)) + + jobList, _ := e2e.ClientSet.BatchV1().Jobs(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + AddReportEntry("jobs", format.Object(jobList, 0)) + + podList, _ := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).List(context.Background(), metav1.ListOptions{}) + for _, pod := range podList.Items { + containerNames := []string{} + for _, container := range pod.Spec.InitContainers { + containerNames = append(containerNames, container.Name) + } + for _, container := range pod.Spec.Containers { + containerNames = append(containerNames, container.Name) + } + for _, container := range containerNames { + reportName := fmt.Sprintf("podlogs-%s-%s", pod.Name, container) + logs := e2e.ClientSet.CoreV1().Pods(e2e.Namespace.Name).GetLogs(pod.Name, &v1.PodLogOptions{Container: container}) + if logStreamer, err := logs.Stream(context.Background()); err == nil { + if podLogs, err := io.ReadAll(logStreamer); err == nil { + AddReportEntry(reportName, string(podLogs)) + } + } + } + } + } + } +} diff --git a/e2e/suite/plan_create_test.go b/e2e/suite/plan_create_test.go index bcaee7e1..0e9ccff2 100644 --- a/e2e/suite/plan_create_test.go +++ b/e2e/suite/plan_create_test.go @@ -22,5 +22,6 @@ var _ = Describe("Plan Creation", func() { It("should return an error if upgrade in nil", func() { Expect(err).To(HaveOccurred()) }) + AfterEach(CollectLogsOnFailure(e2e)) }) }) diff --git a/e2e/suite/plan_resolve_test.go b/e2e/suite/plan_resolve_test.go index 8f11e4d3..0733dce7 100644 --- a/e2e/suite/plan_resolve_test.go +++ b/e2e/suite/plan_resolve_test.go @@ -33,6 +33,7 @@ var _ = Describe("Plan Resolution", func() { Expect(plan.Status.LatestVersion).To(BeEmpty()) Expect(plan.Status.LatestHash).To(BeEmpty()) }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("has version", func() { @@ -56,6 +57,7 @@ var _ = Describe("Plan Resolution", func() { Expect(plan.Status.LatestVersion).To(Equal(plan.Spec.Version)) Expect(plan.Status.LatestHash).ToNot(BeEmpty()) }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("has version with semver+metadata", func() { @@ -82,6 +84,7 @@ var _ = Describe("Plan Resolution", func() { It("should munge the semver", func() { Expect(plan.Status.LatestVersion).ToNot(ContainSubstring(`+`)) }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("has channel", func() { @@ -114,6 +117,7 @@ var _ = Describe("Plan Resolution", func() { Expect(plan.Status.LatestVersion).To(Equal(channelTag)) Expect(plan.Status.LatestHash).ToNot(BeEmpty()) }) + AfterEach(CollectLogsOnFailure(e2e)) }) When("has channel with semver+metadata", func() { @@ -148,5 +152,6 @@ var _ = Describe("Plan Resolution", func() { It("should munge the semver", func() { Expect(plan.Status.LatestVersion).ToNot(ContainSubstring(`+`)) }) + AfterEach(CollectLogsOnFailure(e2e)) }) }) diff --git a/pkg/upgrade/handle_batch.go b/pkg/upgrade/handle_batch.go index 97692e82..004ff1e7 100644 --- a/pkg/upgrade/handle_batch.go +++ b/pkg/upgrade/handle_batch.go @@ -2,18 +2,20 @@ package upgrade import ( "context" + "errors" "fmt" "sort" "strconv" "time" upgradeapi "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io" + upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" upgradejob "github.com/rancher/system-upgrade-controller/pkg/upgrade/job" batchctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch/v1" "github.com/sirupsen/logrus" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" ) @@ -50,7 +52,7 @@ func (ctl *Controller) handleJobs(ctx context.Context) error { // get the plan being applied plan, err := plans.Get(obj.Namespace, planName, metav1.GetOptions{}) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): // plan is gone, delete return obj, deleteJob(jobs, obj, metav1.DeletePropagationBackground) case err != nil: @@ -73,7 +75,7 @@ func (ctl *Controller) handleJobs(ctx context.Context) error { // get the node that the plan is being applied to node, err := nodes.Cache().Get(nodeName) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): // node is gone, delete return obj, deleteJob(jobs, obj, metav1.DeletePropagationBackground) case err != nil: @@ -85,7 +87,16 @@ func (ctl *Controller) handleJobs(ctx context.Context) error { if failedTime.IsZero() { return obj, fmt.Errorf("condition %q missing field %q", upgradejob.ConditionFailed, "LastTransitionTime") } - ctl.recorder.Eventf(plan, corev1.EventTypeWarning, "JobFailed", "Job failed on Node %s", node.Name) + message := fmt.Sprintf("Job %s/%s failed on Node %s: %s: %s", + obj.Namespace, obj.Name, nodeName, + upgradejob.ConditionFailed.GetReason(obj), + upgradejob.ConditionFailed.GetMessage(obj), + ) + ctl.recorder.Eventf(plan, corev1.EventTypeWarning, "JobFailed", message) + upgradeapiv1.PlanComplete.SetError(plan, "JobFailed", errors.New(message)) + if plan, err = plans.UpdateStatus(plan); err != nil { + return obj, err + } return obj, enqueueOrDelete(jobs, obj, failedTime) } // if the job has completed tag the node then enqueue-or-delete depending on the TTL window diff --git a/pkg/upgrade/handle_upgrade.go b/pkg/upgrade/handle_upgrade.go index 2a0bfa7a..3aa56edb 100644 --- a/pkg/upgrade/handle_upgrade.go +++ b/pkg/upgrade/handle_upgrade.go @@ -162,11 +162,11 @@ func (ctl *Controller) handlePlans(ctx context.Context) error { if !slices.Equal(obj.Status.Applying, concurrentNodeNames) { recorder.Eventf(obj, corev1.EventTypeNormal, "SyncJob", "Jobs synced for version %s on Nodes %s. Hash: %s", obj.Status.LatestVersion, strings.Join(concurrentNodeNames, ","), obj.Status.LatestHash) + obj.Status.Applying = concurrentNodeNames[:] + complete.False(obj) + complete.Message(obj, "") + complete.Reason(obj, "SyncJob") } - obj.Status.Applying = concurrentNodeNames[:] - complete.False(obj) - complete.Message(obj, "") - complete.Reason(obj, "SyncJob") } else { // set PlanComplete to true when no nodes have been selected, // and emit an event if the plan just completed From 04f3372a4fbfb646664a38bd5a7f64c09aa3f171 Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Mon, 30 Jun 2025 23:33:48 +0000 Subject: [PATCH 6/6] Improve docs and table output for conditions Signed-off-by: Brad Davidson --- doc/plan.md | 2 +- pkg/apis/upgrade.cattle.io/v1/types.go | 6 ++++++ .../yaml/generated/upgrade.cattle.io_plans.yaml | 14 ++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/doc/plan.md b/doc/plan.md index ce4750a5..f747c4cf 100644 --- a/doc/plan.md +++ b/doc/plan.md @@ -138,7 +138,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _GenericCondition array_ | | | | +| `conditions` _GenericCondition array_ | `LatestResolved` indicates that the latest version as per the spec has been determined.
`Validated` indicates that the plan spec has been validated.
`Complete` indicates that the latest version of the plan has completed on all selected nodes. If any Jobs for the Plan fail to complete, this condition will remain false, and the reason and message will reflect the source of the error. | | | | `latestVersion` _string_ | The latest version, as resolved from .spec.version, or the channel server. | | | | `latestHash` _string_ | The hash of the most recently applied plan .spec. | | | | `applying` _string array_ | List of Node names that the Plan is currently being applied on. | | | diff --git a/pkg/apis/upgrade.cattle.io/v1/types.go b/pkg/apis/upgrade.cattle.io/v1/types.go index 5bdc1396..58f4f323 100644 --- a/pkg/apis/upgrade.cattle.io/v1/types.go +++ b/pkg/apis/upgrade.cattle.io/v1/types.go @@ -27,6 +27,9 @@ var ( // +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.upgrade.image` // +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel` // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Complete",type=string,JSONPath=`.status.conditions[?(@.type=='Complete')].status` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.conditions[?(@.message!='')].message` +// +kubebuilder:printcolumn:name="Applying",type=string,JSONPath=`.status.applying`,priority=10 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Plan represents a set of Jobs to apply an upgrade (or other operation) to set of Nodes. @@ -83,6 +86,9 @@ type PlanSpec struct { // PlanStatus represents the resulting state from processing Plan events. type PlanStatus struct { + // `LatestResolved` indicates that the latest version as per the spec has been determined. + // `Validated` indicates that the plan spec has been validated. + // `Complete` indicates that the latest version of the plan has completed on all selected nodes. If any Jobs for the Plan fail to complete, this condition will remain false, and the reason and message will reflect the source of the error. // +optional // +patchMergeKey=type // +patchStrategy=merge diff --git a/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml index ec741ed3..e04535ff 100644 --- a/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml +++ b/pkg/crds/yaml/generated/upgrade.cattle.io_plans.yaml @@ -24,6 +24,16 @@ spec: - jsonPath: .spec.version name: Version type: string + - jsonPath: .status.conditions[?(@.type=='Complete')].status + name: Complete + type: string + - jsonPath: .status.conditions[?(@.message!='')].message + name: Message + type: string + - jsonPath: .status.applying + name: Applying + priority: 10 + type: string name: v1 schema: openAPIV3Schema: @@ -1155,6 +1165,10 @@ spec: type: string type: array conditions: + description: |- + `LatestResolved` indicates that the latest version as per the spec has been determined. + `Validated` indicates that the plan spec has been validated. + `Complete` indicates that the latest version of the plan has completed on all selected nodes. If any Jobs for the Plan fail to complete, this condition will remain false, and the reason and message will reflect the source of the error. items: properties: lastTransitionTime: