From 8a7f633d5acfd0ff88b316bd662eefc9279c65d6 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Mon, 2 Mar 2026 18:29:33 +0530 Subject: [PATCH 1/9] enable multiple control plane classes - ClusterClass now supports multiple classes for control-plane - similar to workers. - Cluster topology now includes a field "class" for control-plane which references to the control. Signed-off-by: Dhairya Arora --- .github/workflows/release.yaml | 40 +- .github/workflows/tag-release.yaml | 36 + Makefile | 54 +- README.md | 21 +- api/core/v1beta1/cluster_types.go | 9 + api/core/v1beta1/clusterclass_types.go | 53 +- api/core/v1beta1/zz_generated.conversion.go | 58 + api/core/v1beta1/zz_generated.deepcopy.go | 32 + api/core/v1beta2/cluster_types.go | 9 + api/core/v1beta2/clusterclass_types.go | 54 +- api/core/v1beta2/zz_generated.deepcopy.go | 32 + api/core/v1beta2/zz_generated.openapi.go | 81 +- .../topologymutation_variable_types.go | 5 + .../hooks/v1alpha1/zz_generated.openapi.go | 7 + cmd/clusterctl/client/cluster/objectgraph.go | 29 +- .../cluster.x-k8s.io_clusterclasses.yaml | 1021 +++++++++++++++++ .../crd/bases/cluster.x-k8s.io_clusters.yaml | 18 + exp/topology/desiredstate/desired_state.go | 37 +- exp/topology/scope/blueprint.go | 52 +- .../clusterclass/clusterclass_controller.go | 12 +- .../controllers/topology/cluster/blueprint.go | 19 +- .../topology/cluster/patches/engine.go | 3 +- .../patches/inline/json_patch_generator.go | 30 + .../cluster/patches/variables/variables.go | 6 +- .../patches/variables/variables_test.go | 2 +- .../topology/cluster/reconcile_state.go | 40 +- internal/topology/check/compatibility.go | 94 +- internal/topology/check/compatibility_test.go | 72 +- internal/topology/selectors/selectors.go | 31 +- internal/webhooks/cluster.go | 33 +- internal/webhooks/clusterclass.go | 74 +- internal/webhooks/patch_validation.go | 42 + 32 files changed, 1966 insertions(+), 140 deletions(-) create mode 100644 .github/workflows/tag-release.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 1cc8354051b1..89b8fc0091e6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,4 @@ -name: Create Release +name: Push Release Tags on: push: @@ -8,13 +8,11 @@ on: - 'CHANGELOG/*.md' permissions: - contents: write # Allow to push a tag, create a release branch and publish a draft release. + contents: write # Allow to push a tag and create a release branch. jobs: push_release_tags: runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.release-version.outputs.release_version }} steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 @@ -78,37 +76,3 @@ jobs: git push origin ${RELEASE_VERSION} git push origin test/${RELEASE_VERSION} echo "Created tags $RELEASE_VERSION and test/${RELEASE_VERSION}" - release: - name: create draft release - runs-on: ubuntu-latest - needs: push_release_tags - steps: - - name: Set env - run: echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV - env: - RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} - - name: checkout code - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 - with: - fetch-depth: 0 - ref: ${{ env.RELEASE_TAG }} - - name: Calculate go version - run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - - name: Set up Go - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # tag=v6.4.0 - with: - go-version: ${{ env.go_version }} - - name: generate release artifacts - run: | - make release - - name: get release notes - run: | - curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ - -o "${{ env.RELEASE_TAG }}.md" - - name: Release - uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # tag=v2.6.1 - with: - draft: true - files: out/* - body_path: ${{ env.RELEASE_TAG }}.md - tag_name: ${{ env.RELEASE_TAG }} diff --git a/.github/workflows/tag-release.yaml b/.github/workflows/tag-release.yaml new file mode 100644 index 000000000000..ed3452b48f08 --- /dev/null +++ b/.github/workflows/tag-release.yaml @@ -0,0 +1,36 @@ +name: Create Release + +on: + push: + tags: + - "v*" + +permissions: + contents: write + +jobs: + release: + name: create draft release + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} + - name: Calculate go version + run: echo "go_version=$(make go-version)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + with: + go-version: ${{ env.go_version }} + - name: generate release artifacts + run: | + make release + - name: Release + uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + with: + draft: true + files: out/* + name: ${{ github.ref_name }} + tag_name: ${{ github.ref_name }} diff --git a/Makefile b/Makefile index 38d5a3957a08..39b074c5ae0e 100644 --- a/Makefile +++ b/Makefile @@ -222,11 +222,14 @@ TILT_PREPARE_BIN := tilt-prepare TILT_PREPARE := $(abspath $(TOOLS_BIN_DIR)/$(TILT_PREPARE_BIN)) # Define Docker related variables. Releases should modify and double check these vars. -REGISTRY ?= gcr.io/$(shell gcloud config get-value project) +REGISTRY ?= gcr.io/xxxxxx + +# For string inside YAML files (in "out" directory) PROD_REGISTRY ?= registry.k8s.io/cluster-api +# For string inside YAML files (in "out" directory) STAGING_REGISTRY ?= gcr.io/k8s-staging-cluster-api -STAGING_BUCKET ?= k8s-staging-cluster-api +#STAGING_BUCKET ?= k8s-staging-cluster-api # core IMAGE_NAME ?= cluster-api-controller @@ -260,7 +263,7 @@ CAPI_KIND_CLUSTER_NAME ?= capi-test TAG ?= dev ARCH ?= $(shell go env GOARCH) -ALL_ARCH ?= amd64 arm arm64 ppc64le s390x +ALL_ARCH ?= amd64 # Allow overriding the imagePullPolicy PULL_POLICY ?= Always @@ -830,10 +833,11 @@ docker-build-%: # Choice of images to build/push ALL_DOCKER_BUILD ?= core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension clusterctl +SYSELF_RELEVANT_DOCKER_BUILD ?= core .PHONY: docker-build docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all the images - $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD)) + $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ALL_DOCKER_BUILD_E2E = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension @@ -1056,9 +1060,9 @@ $(RELEASE_NOTES_DIR): .PHONY: release release: clean-release ## Build and push container images using the latest git tag for the commit - @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi - @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi - git checkout "${RELEASE_TAG}" + #@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi + #@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi + #git checkout "${RELEASE_TAG}" # Build binaries first. GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries # Set the manifest images to the staging/production bucket and Builds the manifests to publish with a release. @@ -1134,11 +1138,11 @@ release-manifests-dev: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the development mani .PHONY: release-binaries release-binaries: ## Build the binaries to publish with a release RELEASE_BINARY=clusterctl-linux-amd64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary .PHONY: release-binary release-binary: $(RELEASE_DIR) @@ -1147,9 +1151,11 @@ release-binary: $(RELEASE_DIR) -e CGO_ENABLED=0 \ -e GOOS=$(GOOS) \ -e GOARCH=$(GOARCH) \ - -e GOCACHE=/tmp/ \ + -e GOCACHE=/go/build-cache/ \ --user $$(id -u):$$(id -g) \ -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ + -v "$$(go env GOMODCACHE):/go/pkg/mod" \ + -v "$$(go env GOCACHE):/go/build-cache" \ -w /workspace \ golang:$(GO_VERSION) \ go build -a -trimpath -gcflags "$(GCFLAGS)" -ldflags "$(LDFLAGS) -extldflags '-static'" \ @@ -1171,7 +1177,8 @@ release-staging: ## Build and push container images to the staging bucket $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/main/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) + + ##gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) .PHONY: release-staging-nightly release-staging-nightly: ## Tag and push container images to the staging bucket. Example image tag: cluster-api-controller:nightly_main_20210121 @@ -1188,16 +1195,17 @@ release-staging-nightly: ## Tag and push container images to the staging bucket. $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20240425/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) + #gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) .PHONY: release-alias-tag release-alias-tag: ## Add the release alias tag to the last build tag - gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) + echo "Syself: skipping" +# gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) .PHONY: release-notes-tool release-notes-tool: @@ -1233,13 +1241,13 @@ docker-image-verify: ## Verifies all built images to contain the correct binary .PHONY: docker-push-all docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests - $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(ALL_DOCKER_BUILD)) + $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(SYSELF_RELEVANT_DOCKER_BUILD)) docker-push-%: $(MAKE) ARCH=$* docker-push .PHONY: docker-push -docker-push: $(addprefix docker-push-,$(ALL_DOCKER_BUILD)) ## Push the docker images to be included in the release +docker-push: $(addprefix docker-push-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ## Push the docker images to be included in the release .PHONY: docker-push-core docker-push-core: ## Push the core docker image diff --git a/README.md b/README.md index 2f87ecc9b480..586597ac88f2 100644 --- a/README.md +++ b/README.md @@ -62,4 +62,23 @@ Participation in the Kubernetes community is governed by the [Kubernetes Code of [Good first issue]: https://github.com/kubernetes-sigs/cluster-api/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [Help wanted]: https://github.com/kubernetes-sigs/cluster-api/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+ - +# Release/Development (Syself Fork) + +```console +export RELEASE_TAG=v1.11.6-syself.XX && git tag -a $RELEASE_TAG -m $RELEASE_TAG && git push origin $RELEASE_TAG +``` + +Then a Github Action starts and builds a draft release. + +You can get notified when the action is finished like this: + +```console +gh run watch -i 20 ; music +``` + +Then open Git repo `autopilot`. Use branch `main` for deploy to prod and branch `syself/oci` for +deploy to testing-cluster. + +Update the capi version. + +Follow the Autpilot release docs: [autopilot README](https://github.com/syself/autopilot/). diff --git a/api/core/v1beta1/cluster_types.go b/api/core/v1beta1/cluster_types.go index ab232262de63..4bed37caa049 100644 --- a/api/core/v1beta1/cluster_types.go +++ b/api/core/v1beta1/cluster_types.go @@ -596,6 +596,15 @@ type ControlPlaneTopology struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class is the name of the ControlPlaneClass used to create the set of control plane nodes. + // This should match one of the control plane classes defined in the ClusterClass object. + // If left empty `clusterclass.Spec.ControlPlane` is used. + // syself new field. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Class string `json:"class,omitempty"` + // replicas is the number of control plane nodes. // If the value is nil, the ControlPlane object is created without the number of Replicas // and it's assumed that the control plane controller does not implement support for this field. diff --git a/api/core/v1beta1/clusterclass_types.go b/api/core/v1beta1/clusterclass_types.go index 9bee4bb133f9..979d850fd816 100644 --- a/api/core/v1beta1/clusterclass_types.go +++ b/api/core/v1beta1/clusterclass_types.go @@ -116,6 +116,18 @@ type ClusterClassSpec struct { // +optional ControlPlane ControlPlaneClass `json:"controlPlane,omitempty"` + // controlPlaneClasses is a list of named control plane classes that can be referenced + // from the Cluster topology. Each class defines a distinct control plane + // configuration. The class name MUST be unique within this list. + // When classes is defined, the Cluster topology can reference a specific + // control plane class by name. + // syself new field. + // +optional + // +listType=map + // +listMapKey=class + // +kubebuilder:validation:MaxItems=100 + ControlPlaneClasses []ControlPlaneClass `json:"controlPlaneClasses,omitempty"` + // workers describes the worker nodes for the cluster. // It is a collection of node types which can be used to create // the worker nodes of the cluster. @@ -164,6 +176,15 @@ type ControlPlaneClass struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class denotes a type of control-plane node present in the cluster. + // When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + // within the list and can be referenced from the Cluster topology. + // syself new field. + // +optional + // +default="" + // +kubebuilder:validation:MaxLength=1024 + Class string `json:"class,omitempty"` //nolint:kubeapilinter + // LocalObjectTemplate contains the reference to the control plane provider. LocalObjectTemplate `json:",inline"` @@ -1108,6 +1129,12 @@ type PatchSelectorMatch struct { // +optional InfrastructureCluster bool `json:"infrastructureCluster,omitempty"` + // controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + // .spec.controlPlane.classes. + // syself new field. + // +optional + ControlPlaneClass *PatchSelectorMatchControlPlaneClass `json:"controlPlaneClass,omitempty"` + // machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in // .spec.workers.machineDeployments. // +optional @@ -1119,8 +1146,30 @@ type PatchSelectorMatch struct { MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } -// PatchSelectorMatchMachineDeploymentClass selects templates referenced -// in specific MachineDeploymentClasses in .spec.workers.machineDeployments. +// PatchSelectorMatchControlPlaneClass provides a way to target patch operations +// at templates that are associated with specific ControlPlane classes. In a +// ClusterClass definition, the .spec.controlPlane.classes field defines one or +// more named classes, each of which references infrastructure and bootstrap +// templates. This selector lets you narrow down which of those classes (and +// therefore which templates) a given patch should apply to, rather than +// applying the patch to all control plane templates indiscriminately. +// syself new type. +type PatchSelectorMatchControlPlaneClass struct { + // names selects templates by class names. + // +optional + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + Names []string `json:"names,omitempty"` +} + +// PatchSelectorMatchMachineDeploymentClass provides a way to target patch +// operations at templates associated with specific MachineDeployment classes. +// In a ClusterClass definition, .spec.workers.machineDeployments defines named +// classes that each reference infrastructure and bootstrap templates for worker +// nodes. This selector lets you scope a patch so it only affects the templates +// tied to particular MachineDeployment classes. +// syself change in comment. type PatchSelectorMatchMachineDeploymentClass struct { // names selects templates by class names. // +optional diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index 4bd292dce442..d04441795678 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -489,6 +489,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*PatchSelectorMatchControlPlaneClass)(nil), (*v1beta2.PatchSelectorMatchControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(a.(*PatchSelectorMatchControlPlaneClass), b.(*v1beta2.PatchSelectorMatchControlPlaneClass), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.PatchSelectorMatchControlPlaneClass)(nil), (*PatchSelectorMatchControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(a.(*v1beta2.PatchSelectorMatchControlPlaneClass), b.(*PatchSelectorMatchControlPlaneClass), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*PatchSelectorMatchMachineDeploymentClass)(nil), (*v1beta2.PatchSelectorMatchMachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_PatchSelectorMatchMachineDeploymentClass_To_v1beta2_PatchSelectorMatchMachineDeploymentClass(a.(*PatchSelectorMatchMachineDeploymentClass), b.(*v1beta2.PatchSelectorMatchMachineDeploymentClass), scope) }); err != nil { @@ -1206,6 +1216,17 @@ func autoConvert_v1beta1_ClusterClassSpec_To_v1beta2_ClusterClassSpec(in *Cluste if err := Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]v1beta2.ControlPlaneClass, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ControlPlaneClasses = nil + } if err := Convert_v1beta1_WorkersClass_To_v1beta2_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -1246,6 +1267,17 @@ func autoConvert_v1beta2_ClusterClassSpec_To_v1beta1_ClusterClassSpec(in *v1beta if err := Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + if err := Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ControlPlaneClasses = nil + } if err := Convert_v1beta2_WorkersClass_To_v1beta1_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -1713,6 +1745,7 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *Cont if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class // WARNING: in.LocalObjectTemplate requires manual conversion: does not exist in peer-type // WARNING: in.MachineInfrastructure requires manual conversion: inconvertible types (*sigs.k8s.io/cluster-api/api/core/v1beta1.LocalObjectTemplate vs sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassMachineInfrastructureTemplate) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type @@ -1729,6 +1762,7 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1be if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class // WARNING: in.TemplateRef requires manual conversion: does not exist in peer-type // WARNING: in.MachineInfrastructure requires manual conversion: inconvertible types (sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassMachineInfrastructureTemplate vs *sigs.k8s.io/cluster-api/api/core/v1beta1.LocalObjectTemplate) // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type @@ -1743,6 +1777,7 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if err := Convert_v1beta1_ControlPlaneTopologyRolloutSpec_To_v1beta2_ControlPlaneTopologyRolloutSpec(&in.Rollout, &out.Rollout, s); err != nil { return err @@ -1761,6 +1796,7 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if err := Convert_v1beta2_ControlPlaneTopologyRolloutSpec_To_v1beta1_ControlPlaneTopologyRolloutSpec(&in.Rollout, &out.Rollout, s); err != nil { return err @@ -3591,6 +3627,7 @@ func autoConvert_v1beta1_PatchSelectorMatch_To_v1beta2_PatchSelectorMatch(in *Pa if err := v1.Convert_bool_To_Pointer_bool(&in.InfrastructureCluster, &out.InfrastructureCluster, s); err != nil { return err } + out.ControlPlaneClass = (*v1beta2.PatchSelectorMatchControlPlaneClass)(unsafe.Pointer(in.ControlPlaneClass)) out.MachineDeploymentClass = (*v1beta2.PatchSelectorMatchMachineDeploymentClass)(unsafe.Pointer(in.MachineDeploymentClass)) out.MachinePoolClass = (*v1beta2.PatchSelectorMatchMachinePoolClass)(unsafe.Pointer(in.MachinePoolClass)) return nil @@ -3608,6 +3645,7 @@ func autoConvert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in *v1 if err := v1.Convert_Pointer_bool_To_bool(&in.InfrastructureCluster, &out.InfrastructureCluster, s); err != nil { return err } + out.ControlPlaneClass = (*PatchSelectorMatchControlPlaneClass)(unsafe.Pointer(in.ControlPlaneClass)) out.MachineDeploymentClass = (*PatchSelectorMatchMachineDeploymentClass)(unsafe.Pointer(in.MachineDeploymentClass)) out.MachinePoolClass = (*PatchSelectorMatchMachinePoolClass)(unsafe.Pointer(in.MachinePoolClass)) return nil @@ -3618,6 +3656,26 @@ func Convert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in *v1beta return autoConvert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in, out, s) } +func autoConvert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in *PatchSelectorMatchControlPlaneClass, out *v1beta2.PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + return nil +} + +// Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass is an autogenerated conversion function. +func Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in *PatchSelectorMatchControlPlaneClass, out *v1beta2.PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + return autoConvert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in, out, s) +} + +func autoConvert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in *v1beta2.PatchSelectorMatchControlPlaneClass, out *PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + return nil +} + +// Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass is an autogenerated conversion function. +func Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in *v1beta2.PatchSelectorMatchControlPlaneClass, out *PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + return autoConvert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in, out, s) +} + func autoConvert_v1beta1_PatchSelectorMatchMachineDeploymentClass_To_v1beta2_PatchSelectorMatchMachineDeploymentClass(in *PatchSelectorMatchMachineDeploymentClass, out *v1beta2.PatchSelectorMatchMachineDeploymentClass, s conversion.Scope) error { out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) return nil diff --git a/api/core/v1beta1/zz_generated.deepcopy.go b/api/core/v1beta1/zz_generated.deepcopy.go index 973774e851f1..dfe8b084d1f6 100644 --- a/api/core/v1beta1/zz_generated.deepcopy.go +++ b/api/core/v1beta1/zz_generated.deepcopy.go @@ -217,6 +217,13 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { (*in).DeepCopyInto(*out) } in.ControlPlane.DeepCopyInto(&out.ControlPlane) + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Workers.DeepCopyInto(&out.Workers) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -2906,6 +2913,11 @@ func (in *PatchSelector) DeepCopy() *PatchSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = *in + if in.ControlPlaneClass != nil { + in, out := &in.ControlPlaneClass, &out.ControlPlaneClass + *out = new(PatchSelectorMatchControlPlaneClass) + (*in).DeepCopyInto(*out) + } if in.MachineDeploymentClass != nil { in, out := &in.MachineDeploymentClass, &out.MachineDeploymentClass *out = new(PatchSelectorMatchMachineDeploymentClass) @@ -2928,6 +2940,26 @@ func (in *PatchSelectorMatch) DeepCopy() *PatchSelectorMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopyInto(out *PatchSelectorMatchControlPlaneClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchControlPlaneClass. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopy() *PatchSelectorMatchControlPlaneClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchControlPlaneClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopyInto(out *PatchSelectorMatchMachineDeploymentClass) { *out = *in diff --git a/api/core/v1beta2/cluster_types.go b/api/core/v1beta2/cluster_types.go index 9348ce5f5573..bfd1675d29bb 100644 --- a/api/core/v1beta2/cluster_types.go +++ b/api/core/v1beta2/cluster_types.go @@ -625,6 +625,15 @@ type ControlPlaneTopology struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty,omitzero"` + // class is the name of the ControlPlaneClass used to create the set of control plane nodes. + // This should match one of the control plane classes defined in the ClusterClass object. + // If left empty `clusterclass.Spec.ControlPlane` is used. + // syself new field. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Class string `json:"class,omitempty"` + // replicas is the number of control plane nodes. // If the value is not set, the ControlPlane object is created without the number of Replicas // and it's assumed that the control plane controller does not implement support for this field. diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index 17e09e48a0cb..f87b078f8adc 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -113,6 +113,18 @@ type ClusterClassSpec struct { // +required ControlPlane ControlPlaneClass `json:"controlPlane,omitempty,omitzero"` + // controlPlaneClasses is a list of named control plane classes that can be referenced + // from the Cluster topology. Each class defines a distinct control plane + // configuration. The class name MUST be unique within this list. + // When classes is defined, the Cluster topology can reference a specific + // control plane class by name. + // syself new field. + // +optional + // +listType=map + // +listMapKey=class + // +kubebuilder:validation:MaxItems=100 + ControlPlaneClasses []ControlPlaneClass `json:"controlPlaneClasses,omitempty"` + // workers describes the worker nodes for the cluster. // It is a collection of node types which can be used to create // the worker nodes of the cluster. @@ -176,6 +188,15 @@ type ControlPlaneClass struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty,omitzero"` + // class denotes a type of control-plane node present in the cluster. + // When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + // within the list and can be referenced from the Cluster topology. + // syself new field. + // +optional + // +default="" + // +kubebuilder:validation:MaxLength=1024 + Class string `json:"class,omitempty"` //nolint:kubeapilinter + // templateRef contains the reference to a provider-specific control plane template. // +required TemplateRef ClusterClassTemplateReference `json:"templateRef,omitempty,omitzero"` @@ -1403,6 +1424,12 @@ type PatchSelectorMatch struct { // +optional InfrastructureCluster *bool `json:"infrastructureCluster,omitempty"` + // controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + // .spec.controlPlane.classes. + // syself new field. + // +optional + ControlPlaneClass *PatchSelectorMatchControlPlaneClass `json:"controlPlaneClass,omitempty"` + // machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in // .spec.workers.machineDeployments. // +optional @@ -1414,8 +1441,31 @@ type PatchSelectorMatch struct { MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } -// PatchSelectorMatchMachineDeploymentClass selects templates referenced -// in specific MachineDeploymentClasses in .spec.workers.machineDeployments. +// PatchSelectorMatchControlPlaneClass provides a way to target patch operations +// at templates that are associated with specific ControlPlane classes. In a +// ClusterClass definition, the .spec.controlPlane.classes field defines one or +// more named classes, each of which references infrastructure and bootstrap +// templates. This selector lets you narrow down which of those classes (and +// therefore which templates) a given patch should apply to, rather than +// applying the patch to all control plane templates indiscriminately. +// syself new type. +type PatchSelectorMatchControlPlaneClass struct { + // names selects templates by class names. + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + Names []string `json:"names,omitempty"` +} + +// PatchSelectorMatchMachineDeploymentClass provides a way to target patch +// operations at templates associated with specific MachineDeployment classes. +// In a ClusterClass definition, .spec.workers.machineDeployments defines named +// classes that each reference infrastructure and bootstrap templates for worker +// nodes. This selector lets you scope a patch so it only affects the templates +// tied to particular MachineDeployment classes. +// syself change in comment. type PatchSelectorMatchMachineDeploymentClass struct { // names selects templates by class names. // +optional diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index 4a7c37c12b09..9619e49b1478 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -238,6 +238,13 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { } out.Infrastructure = in.Infrastructure in.ControlPlane.DeepCopyInto(&out.ControlPlane) + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Workers.DeepCopyInto(&out.Workers) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -3717,6 +3724,11 @@ func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = new(bool) **out = **in } + if in.ControlPlaneClass != nil { + in, out := &in.ControlPlaneClass, &out.ControlPlaneClass + *out = new(PatchSelectorMatchControlPlaneClass) + (*in).DeepCopyInto(*out) + } if in.MachineDeploymentClass != nil { in, out := &in.MachineDeploymentClass, &out.MachineDeploymentClass *out = new(PatchSelectorMatchMachineDeploymentClass) @@ -3739,6 +3751,26 @@ func (in *PatchSelectorMatch) DeepCopy() *PatchSelectorMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopyInto(out *PatchSelectorMatchControlPlaneClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchControlPlaneClass. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopy() *PatchSelectorMatchControlPlaneClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchControlPlaneClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopyInto(out *PatchSelectorMatchMachineDeploymentClass) { *out = *in diff --git a/api/core/v1beta2/zz_generated.openapi.go b/api/core/v1beta2/zz_generated.openapi.go index 717fbf09017a..cc8d394a6e3f 100644 --- a/api/core/v1beta2/zz_generated.openapi.go +++ b/api/core/v1beta2/zz_generated.openapi.go @@ -172,6 +172,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchDefinition": schema_cluster_api_api_core_v1beta2_PatchDefinition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelector": schema_cluster_api_api_core_v1beta2_PatchSelector(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatch": schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchControlPlaneClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.Topology": schema_cluster_api_api_core_v1beta2_Topology(ref), @@ -577,6 +578,28 @@ func schema_cluster_api_api_core_v1beta2_ClusterClassSpec(ref common.ReferenceCa Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass"), }, }, + "controlPlaneClasses": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "class", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClasses is a list of named control plane classes that can be referenced from the Cluster topology. Each class defines a distinct control plane configuration. The class name MUST be unique within this list. When classes is defined, the Cluster topology can reference a specific control plane class by name. syself new field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass"), + }, + }, + }, + }, + }, "workers": { SchemaProps: spec.SchemaProps{ Description: "workers describes the worker nodes for the cluster. It is a collection of node types which can be used to create the worker nodes of the cluster.", @@ -1562,6 +1585,14 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneClass(ref common.ReferenceC Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class denotes a type of control-plane node present in the cluster. When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology. syself new field.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, "templateRef": { SchemaProps: spec.SchemaProps{ Description: "templateRef contains the reference to a provider-specific control plane template.", @@ -1888,6 +1919,13 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneTopology(ref common.Referen Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the name of the ControlPlaneClass used to create the set of control plane nodes. This should match one of the control plane classes defined in the ClusterClass object. If left empty `clusterclass.Spec.ControlPlane` is used. syself new field.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the number of control plane nodes. If the value is not set, the ControlPlane object is created without the number of Replicas and it's assumed that the control plane controller does not implement support for this field. When specified against a control plane provider that lacks support for this field, this value will be ignored.", @@ -6779,6 +6817,12 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref common.Reference Format: "", }, }, + "controlPlaneClass": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClass selects templates referenced in specific ControlPlaneClasses in .spec.controlPlane.classes. syself new field.", + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass"), + }, + }, "machineDeploymentClass": { SchemaProps: spec.SchemaProps{ Description: "machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", @@ -6795,7 +6839,40 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref common.Reference }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass"}, + } +} + +func schema_cluster_api_api_core_v1beta2_PatchSelectorMatchControlPlaneClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchSelectorMatchControlPlaneClass provides a way to target patch operations at templates that are associated with specific ControlPlane classes. In a ClusterClass definition, the .spec.controlPlane.classes field defines one or more named classes, each of which references infrastructure and bootstrap templates. This selector lets you narrow down which of those classes (and therefore which templates) a given patch should apply to, rather than applying the patch to all control plane templates indiscriminately. syself new type.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "names selects templates by class names.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, } } @@ -6803,7 +6880,7 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachineDeploymentClas return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PatchSelectorMatchMachineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", + Description: "PatchSelectorMatchMachineDeploymentClass provides a way to target patch operations at templates associated with specific MachineDeployment classes. In a ClusterClass definition, .spec.workers.machineDeployments defines named classes that each reference infrastructure and bootstrap templates for worker nodes. This selector lets you scope a patch so it only affects the templates tied to particular MachineDeployment classes. syself change in comment.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "names": { diff --git a/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go b/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go index 7248afea6ad1..e6d73097ea10 100644 --- a/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go +++ b/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go @@ -139,6 +139,11 @@ type ControlPlaneBuiltins struct { // +optional Name string `json:"name,omitempty"` + // class is the class name of the ControlPlane, + // to which the current template belongs to. + // +optional + Class string `json:"class,omitempty"` + // replicas is the value of the replicas field of the ControlPlane object. // +optional Replicas *int32 `json:"replicas,omitempty"` diff --git a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go index 7fe814045182..38f1a96788fb 100644 --- a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go +++ b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go @@ -1909,6 +1909,13 @@ func schema_api_runtime_hooks_v1alpha1_ControlPlaneBuiltins(ref common.Reference Format: "", }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the class name of the ControlPlane, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the value of the replicas field of the ControlPlane object.", diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index 83bd433c2a03..1bab8633a77c 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -43,9 +43,11 @@ import ( secretutil "sigs.k8s.io/cluster-api/util/secret" ) -const clusterTopologyNameKey = "cluster.spec.topology.class" -const clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" -const clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +const ( + clusterTopologyNameKey = "cluster.spec.topology.class" + clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" + clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +) type empty struct{} @@ -523,12 +525,29 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { errs := []error{} _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.Infrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) - _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.TemplateRef.ToObjectReference(cc.Namespace)) - errs = append(errs, err) + + // syself change. + // Fetch inline control plane refs (if defined). + if cc.Spec.ControlPlane.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) + // Fetch refs from named control plane classes. + for _, cpClass := range cc.Spec.ControlPlaneClasses { + if cpClass.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } + if cpClass.MachineInfrastructure.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.MachineInfrastructure.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } + } + for _, mdClass := range cc.Spec.Workers.MachineDeployments { _, err = o.fetchRef(ctx, discoveryBackoff, mdClass.Infrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index e04f1fa552bc..a8df4539f3df 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -94,6 +94,15 @@ spec: controlPlane is a reference to a local struct that holds the details for provisioning the Control Plane for the Cluster. properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + maxLength: 1024 + type: string machineHealthCheck: description: |- machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. @@ -542,6 +551,480 @@ spec: required: - ref type: object + controlPlaneClasses: + description: |- + controlPlaneClasses is a list of named control plane classes that can be referenced + from the Cluster topology. Each class defines a distinct control plane + configuration. The class name MUST be unique within this list. + When classes is defined, the Cluster topology can reference a specific + control plane class by name. + syself new field. + items: + description: ControlPlaneClass defines the class for the control + plane. + properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + maxLength: 1024 + type: string + machineHealthCheck: + description: |- + machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. + properties: + maxUnhealthy: + anyOf: + - type: integer + - type: string + description: |- + maxUnhealthy specifies the maximum number of unhealthy machines allowed. + Any further remediation is only allowed if at most "maxUnhealthy" machines selected by + "selector" are not healthy. + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: |- + nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + to consider a Machine unhealthy if a corresponding Node isn't associated + through a `Spec.ProviderID` field. + + The duration set in this field is compared to the greatest of: + - Cluster's infrastructure ready condition timestamp (if and when available) + - Control Plane's initialized condition timestamp (if and when available) + - Machine's infrastructure ready condition timestamp (if and when available) + - Machine's metadata creation timestamp + + Defaults to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. + type: string + remediationTemplate: + description: |- + remediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + unhealthyConditions: + description: |- + unhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + minLength: 1 + type: string + timeout: + description: |- + timeout is the duration that a node must be in a given status for, + after which the node is considered unhealthy. + For example, with a value of "1h", the node must match the status + for at least 1 hour before being considered unhealthy. + type: string + type: + description: type of Node condition + minLength: 1 + type: string + required: + - status + - timeout + - type + type: object + maxItems: 100 + type: array + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeout: + description: |- + timeout is the duration that a Machine must be in a given status for, + after which the Machine is considered unhealthy. + For example, with a value of "1h", the Machine must match the status + for at least 1 hour before being considered unhealthy. + type: string + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, Available, + HealthCheckSucceeded, OwnerRemediated, ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeout + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + unhealthyRange: + description: |- + unhealthyRange specifies the range of unhealthy machines allowed. + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "unhealthyRange". Takes precedence over maxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines + maxLength: 32 + minLength: 1 + pattern: ^\[[0-9]+-[0-9]+\]$ + type: string + type: object + machineInfrastructure: + description: |- + machineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. + properties: + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + metadata: + description: |- + metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + This field is supported if and only if the control plane provider template + referenced is Machine based. + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + namingStrategy: + description: namingStrategy allows changing the naming pattern + used when creating the control plane provider object. + properties: + template: + description: |- + template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + maxLength: 1024 + minLength: 1 + type: string + type: object + nodeDeletionTimeout: + description: |- + nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeDrainTimeout: + description: |- + nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeVolumeDetachTimeout: + description: |- + nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a condition with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + polarity: + description: |- + polarity of the conditionType specified in this readinessGate. + Valid values are Positive, Negative and omitted. + When omitted, the default behaviour will be Positive. + A positive polarity means that the condition should report a true status under normal conditions. + A negative polarity means that the condition should report a false status under normal conditions. + enum: + - Positive + - Negative + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to corev1.Taint, + but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid + values are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map + required: + - ref + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - class + x-kubernetes-list-type: map infrastructure: description: |- infrastructure is a reference to a provider-specific template that holds @@ -736,6 +1219,22 @@ spec: Note: this will match the controlPlane and also the controlPlane machineInfrastructure (depending on the kind and apiVersion). type: boolean + controlPlaneClass: + description: |- + controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + .spec.controlPlane.classes. + syself new field. + properties: + names: + description: names selects templates by class + names. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + type: array + type: object infrastructureCluster: description: infrastructureCluster selects templates referenced in .spec.infrastructure. @@ -2817,6 +3316,15 @@ spec: controlPlane is a reference to a local struct that holds the details for provisioning the Control Plane for the Cluster. properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + maxLength: 1024 + type: string deletion: description: deletion contains configuration options for Machine deletion. @@ -3287,6 +3795,502 @@ spec: required: - templateRef type: object + controlPlaneClasses: + description: |- + controlPlaneClasses is a list of named control plane classes that can be referenced + from the Cluster topology. Each class defines a distinct control plane + configuration. The class name MUST be unique within this list. + When classes is defined, the Cluster topology can reference a specific + control plane class by name. + syself new field. + items: + description: ControlPlaneClass defines the class for the control + plane. + properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + maxLength: 1024 + type: string + deletion: + description: deletion contains configuration options for Machine + deletion. + minProperties: 1 + properties: + nodeDeletionTimeoutSeconds: + description: |- + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: + description: |- + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: + description: |- + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + type: object + healthCheck: + description: |- + healthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. + minProperties: 1 + properties: + checks: + description: |- + checks are the checks that are used to evaluate if a Machine is healthy. + + Independent of this configuration the MachineHealthCheck controller will always + flag Machines with `cluster.x-k8s.io/remediate-machine` annotation and + Machines with deleted Nodes as unhealthy. + + Furthermore, if checks.nodeStartupTimeoutSeconds is not set it + is defaulted to 10 minutes and evaluated accordingly. + minProperties: 1 + properties: + nodeStartupTimeoutSeconds: + description: |- + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck + to consider a Machine unhealthy if a corresponding Node isn't associated + through a `Spec.ProviderID` field. + + The duration set in this field is compared to the greatest of: + - Cluster's infrastructure ready condition timestamp (if and when available) + - Control Plane's initialized condition timestamp (if and when available) + - Machine's infrastructure ready condition timestamp (if and when available) + - Machine's metadata creation timestamp + + Defaults to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. + format: int32 + minimum: 0 + type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, Available, + HealthCheckSucceeded, OwnerRemediated, ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + unhealthyNodeConditions: + description: |- + unhealthyNodeConditions contains a list of conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: |- + UnhealthyNodeCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + minLength: 1 + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a node must be in a given status for, + after which the node is considered unhealthy. + For example, with a value of "3600", the node must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Node condition + minLength: 1 + type: string + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + type: object + remediation: + description: |- + remediation configures if and how remediations are triggered if a Machine is unhealthy. + + If remediation or remediation.triggerIf is not set, + remediation will always be triggered for unhealthy Machines. + + If remediation or remediation.templateRef is not set, + the OwnerRemediated condition will be set on unhealthy Machines to trigger remediation via + the owner of the Machines, for example a MachineSet or a KubeadmControlPlane. + minProperties: 1 + properties: + templateRef: + description: |- + templateRef is a reference to a remediation template + provided by an infrastructure provider. + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. + properties: + apiVersion: + description: |- + apiVersion of the remediation template. + apiVersion must be fully qualified domain name followed by / and a version. + NOTE: This field must be kept in sync with the APIVersion of the remediation template. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the remediation template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the remediation template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + triggerIf: + description: |- + triggerIf configures if remediations are triggered. + If this field is not set, remediations are always triggered. + minProperties: 1 + properties: + unhealthyInRange: + description: |- + unhealthyInRange specifies that remediations are only triggered if the number of + unhealthy Machines is in the configured range. + Takes precedence over unhealthyLessThanOrEqualTo. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy Machines (and) + (b) there are at most 5 unhealthy Machines + maxLength: 32 + minLength: 1 + pattern: ^\[[0-9]+-[0-9]+\]$ + type: string + unhealthyLessThanOrEqualTo: + anyOf: + - type: integer + - type: string + description: |- + unhealthyLessThanOrEqualTo specifies that remediations are only triggered if the number of + unhealthy Machines is less than or equal to the configured value. + unhealthyInRange takes precedence if set. + x-kubernetes-int-or-string: true + type: object + type: object + type: object + machineInfrastructure: + description: |- + machineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. + properties: + templateRef: + description: templateRef is a required reference to the + template for a MachineInfrastructure of a ControlPlane. + properties: + apiVersion: + description: |- + apiVersion of the template. + apiVersion must be fully qualified domain name followed by / and a version. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - templateRef + type: object + metadata: + description: |- + metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + This field is supported if and only if the control plane provider template + referenced is Machine based. + minProperties: 1 + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + naming: + description: naming allows changing the naming pattern used + when creating the control plane provider object. + minProperties: 1 + properties: + template: + description: |- + template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + maxLength: 1024 + minLength: 1 + type: string + type: object + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a condition with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + polarity: + description: |- + polarity of the conditionType specified in this readinessGate. + Valid values are Positive, Negative and omitted. + When omitted, the default behaviour will be Positive. + A positive polarity means that the condition should report a true status under normal conditions. + A negative polarity means that the condition should report a false status under normal conditions. + enum: + - Positive + - Negative + type: string + required: + - conditionType + type: object + maxItems: 32 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to corev1.Taint, + but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid + values are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map + templateRef: + description: templateRef contains the reference to a provider-specific + control plane template. + properties: + apiVersion: + description: |- + apiVersion of the template. + apiVersion must be fully qualified domain name followed by / and a version. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - templateRef + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - class + x-kubernetes-list-type: map infrastructure: description: |- infrastructure is a reference to a local struct that holds the details @@ -3476,6 +4480,23 @@ spec: Note: this will match the controlPlane and also the controlPlane machineInfrastructure (depending on the kind and apiVersion). type: boolean + controlPlaneClass: + description: |- + controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + .spec.controlPlane.classes. + syself new field. + properties: + names: + description: names selects templates by class + names. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + type: array + x-kubernetes-list-type: atomic + type: object infrastructureCluster: description: infrastructureCluster selects templates referenced in .spec.infrastructure. diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 9f0b51e70452..b81b380e0c98 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -280,6 +280,15 @@ spec: controlPlane: description: controlPlane describes the cluster control plane. properties: + class: + description: |- + class is the name of the ControlPlaneClass used to create the set of control plane nodes. + This should match one of the control plane classes defined in the ClusterClass object. + If left empty `clusterclass.Spec.ControlPlane` is used. + syself new field. + maxLength: 256 + minLength: 1 + type: string machineHealthCheck: description: |- machineHealthCheck allows to enable, disable and override @@ -2081,6 +2090,15 @@ spec: description: controlPlane describes the cluster control plane. minProperties: 1 properties: + class: + description: |- + class is the name of the ControlPlaneClass used to create the set of control plane nodes. + This should match one of the control plane classes defined in the ClusterClass object. + If left empty `clusterclass.Spec.ControlPlane` is used. + syself new field. + maxLength: 256 + minLength: 1 + type: string deletion: description: deletion contains configuration options for Machine deletion. diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 6d788c94cef7..000ee60c5ea2 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -274,7 +274,11 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu // that should be referenced by the ControlPlane object. func (g *generator) computeControlPlaneInfrastructureMachineTemplate(ctx context.Context, s *scope.Scope) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.InfrastructureMachineTemplate - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) + + // syself change + // + // no nil check for s.BluePrint.ControlPlaneClass because we already resolved the reference in internal/controllers/topology/cluster/blueprint.go 's resolveControlPlaneClass function. + templateClonedFromRef := s.Blueprint.ControlPlaneClass.MachineInfrastructure.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) cluster := s.Current.Cluster // Check if the current control plane object has a machineTemplate.infrastructureRef already defined. @@ -319,7 +323,9 @@ func (g *generator) computeControlPlaneInfrastructureMachineTemplate(ctx context // corresponding template defined in the blueprint. func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) + + // syself change + templateClonedFromRef := s.Blueprint.ControlPlaneClass.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) cluster := s.Current.Cluster currentRef := cluster.Spec.ControlPlaneRef @@ -327,7 +333,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // We merge the labels and annotations from topology and ClusterClass. // We also add the cluster-name and the topology owned labels, so they are propagated down. topologyMetadata := s.Blueprint.Topology.ControlPlane.Metadata - clusterClassMetadata := s.Blueprint.ClusterClass.Spec.ControlPlane.Metadata + clusterClassMetadata := s.Blueprint.ControlPlaneClass.Metadata controlPlaneLabels := util.MergeMap(topologyMetadata.Labels, clusterClassMetadata.Labels) if controlPlaneLabels == nil { @@ -339,8 +345,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations) nameTemplate := "{{ .cluster.name }}-{{ .random }}" - if s.Blueprint.ClusterClass.Spec.ControlPlane.Naming.Template != "" { - nameTemplate = s.Blueprint.ClusterClass.Spec.ControlPlane.Naming.Template + if s.Blueprint.ControlPlaneClass.Naming.Template != "" { + nameTemplate = s.Blueprint.ControlPlaneClass.Naming.Template } controlPlane, err := templateToObject(templateToInput{ @@ -447,8 +453,9 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.Topology.ControlPlane.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Path()) } - } else if s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates != nil { - if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates); err != nil { + // syself change + } else if s.Blueprint.ControlPlaneClass.ReadinessGates != nil { + if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.ControlPlaneClass.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Path()) } } @@ -460,14 +467,14 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf if err := contract.ControlPlane().MachineTemplate().Taints().Set(controlPlane, s.Blueprint.Topology.ControlPlane.Taints); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().Taints().Path()) } - } else if s.Blueprint.ClusterClass.Spec.ControlPlane.Taints != nil { - if err := contract.ControlPlane().MachineTemplate().Taints().Set(controlPlane, s.Blueprint.ClusterClass.Spec.ControlPlane.Taints); err != nil { + } else if s.Blueprint.ControlPlaneClass.Taints != nil { + if err := contract.ControlPlane().MachineTemplate().Taints().Set(controlPlane, s.Blueprint.ControlPlaneClass.Taints); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().Taints().Path()) } } - // If it is required to manage the NodeDrainTimeoutSeconds for the control plane, set the corresponding field. - nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeDrainTimeoutSeconds + // If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field. + nodeDrainTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeDrainTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeDrainTimeoutSeconds != nil { nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeDrainTimeoutSeconds } @@ -483,8 +490,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } } - // If it is required to manage the NodeVolumeDetachTimeoutSeconds for the control plane, set the corresponding field. - nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds + // If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field. + nodeVolumeDetachTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeVolumeDetachTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds != nil { nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds } @@ -500,8 +507,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } } - // If it is required to manage the NodeDeletionTimeoutSeconds for the control plane, set the corresponding field. - nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeDeletionTimeoutSeconds + // If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field. + nodeDeletionTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeDeletionTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeDeletionTimeoutSeconds != nil { nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeDeletionTimeoutSeconds } diff --git a/exp/topology/scope/blueprint.go b/exp/topology/scope/blueprint.go index 5a6786519dc7..448176e786dd 100644 --- a/exp/topology/scope/blueprint.go +++ b/exp/topology/scope/blueprint.go @@ -31,6 +31,12 @@ type ClusterBlueprint struct { // ClusterClass holds the ClusterClass object referenced from Cluster.Spec.Topology. ClusterClass *clusterv1.ClusterClass + // syself change + // ControlPlaneClass holds the resolved ControlPlaneClass from the ClusterClass. + // This is the ControlPlaneClass selected based on the Cluster topology's control plane class field. + // If the topology does not specify a class, this is the inline ControlPlaneClass from ClusterClass.Spec.ControlPlane. + ControlPlaneClass *clusterv1.ControlPlaneClass + // InfrastructureClusterTemplate holds the InfrastructureClusterTemplate referenced from ClusterClass. InfrastructureClusterTemplate *unstructured.Unstructured @@ -93,7 +99,12 @@ type MachinePoolBlueprint struct { // HasControlPlaneInfrastructureMachine checks whether the clusterClass mandates the controlPlane has infrastructureMachines. func (b *ClusterBlueprint) HasControlPlaneInfrastructureMachine() bool { - return b.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() + // syself change. + if b.ControlPlaneClass == nil { + return b.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() + } + + return b.ControlPlaneClass.MachineInfrastructure.TemplateRef.IsDefined() } // IsControlPlaneMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the control plane. @@ -102,18 +113,37 @@ func (b *ClusterBlueprint) IsControlPlaneMachineHealthCheckEnabled() bool { if !b.HasControlPlaneInfrastructureMachine() { return false } - // If no MachineHealthCheck is defined in the ClusterClass or in the Cluster Topology then return false. - if !b.ClusterClass.Spec.ControlPlane.HealthCheck.IsDefined() && !b.Topology.ControlPlane.HealthCheck.IsDefined() { - return false + + // syself change. + // If no MachineHealthCheck is defined in the resolved ControlPlaneClass or in the Cluster Topology then return false. + cpClassMHC := b.controlPlaneClassMachineHealthCheck() + if !b.Topology.ControlPlane.HealthCheck.IsDefined() { + if cpClassMHC == nil { + return false + } + if !cpClassMHC.IsDefined() { + return false + } } + // If `enable` is not set then consider it as true. A MachineHealthCheck will be created from either ClusterClass or Cluster Topology. if b.Topology.ControlPlane.HealthCheck.Enabled == nil { return true } + // If `enable` is explicitly set, use the value. return *b.Topology.ControlPlane.HealthCheck.Enabled } +// controlPlaneClassMachineHealthCheck returns the MachineHealthCheck from the resolved ControlPlaneClass. +// syself change. +func (b *ClusterBlueprint) controlPlaneClassMachineHealthCheck() *clusterv1.ControlPlaneClassHealthCheck { + if b.ControlPlaneClass == nil { + return &b.ClusterClass.Spec.ControlPlane.HealthCheck + } + return &b.ControlPlaneClass.HealthCheck +} + // ControlPlaneMachineHealthCheckClass returns the MachineHealthCheckClass that should be used to create the MachineHealthCheck object. func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() (clusterv1.MachineHealthCheckChecks, clusterv1.MachineHealthCheckRemediation) { if b.Topology.ControlPlane.HealthCheck.IsDefined() { @@ -143,9 +173,19 @@ func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() (clusterv1.Mach } } -// HasControlPlaneMachineHealthCheck returns true if the ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. +// HasControlPlaneMachineHealthCheck returns true if the resolved ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. func (b *ClusterBlueprint) HasControlPlaneMachineHealthCheck() bool { - return b.HasControlPlaneInfrastructureMachine() && b.ClusterClass.Spec.ControlPlane.HealthCheck.IsDefined() + // syself change. + if !b.HasControlPlaneInfrastructureMachine() { + return false + } + + mhc := b.controlPlaneClassMachineHealthCheck() + if mhc == nil { + return false + } + + return mhc.IsDefined() } // IsMachineDeploymentMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the MachineDeployment. diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 98ff2932f5ac..a32bca714b3b 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -95,7 +95,6 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt ). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)). Complete(r) - if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -213,6 +212,14 @@ func (r *Reconciler) reconcileExternalReferences(ctx context.Context, s *scope) clusterClass.Spec.ControlPlane.TemplateRef, } refs = append(refs, clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef) + + // Also collect refs from ControlPlaneClasses so that ownership and API version checks + // are applied to templates referenced by named control plane classes. + // syself change. + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + refs = append(refs, cpClass.TemplateRef, cpClass.MachineInfrastructure.TemplateRef) + } + for _, mdClass := range clusterClass.Spec.Workers.MachineDeployments { refs = append(refs, mdClass.Bootstrap.TemplateRef, mdClass.Infrastructure.TemplateRef) } @@ -377,7 +384,8 @@ func addNewStatusVariable(variable clusterv1.ClusterClassVariable, from string) DeprecatedV1Beta1Metadata: variable.DeprecatedV1Beta1Metadata, Schema: variable.Schema, }, - }} + }, + } } func addDefinitionToExistingStatusVariable(variable clusterv1.ClusterClassVariable, from string, existingVariable *clusterv1.ClusterClassStatusVariable) *clusterv1.ClusterClassStatusVariable { diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index e1f95b0e4738..1d0cc42fde78 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -24,6 +24,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/exp/topology/scope" + topologyselectors "sigs.k8s.io/cluster-api/internal/topology/selectors" ) // getBlueprint gets a ClusterBlueprint with the ClusterClass and the referenced templates to be used for a managed Cluster topology. @@ -44,16 +45,26 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } - // Get ClusterClass.spec.controlPlane. + // syself change + // Resolve the ControlPlaneClass to use. + // If the Cluster topology specifies a control plane class, look it up from ClusterClass.spec.controlPlane.classes. + // Otherwise, fall back to the inline ClusterClass.spec.controlPlane definition. + controlPlaneClass, err := topologyselectors.ResolveControlPlaneClass(cluster, clusterClass) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve control plane class for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) + } + + blueprint.ControlPlaneClass = controlPlaneClass + blueprint.ControlPlane = &scope.ControlPlaneBlueprint{} - blueprint.ControlPlane.Template, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.TemplateRef.ToObjectReference(clusterClass.Namespace)) + blueprint.ControlPlane.Template, err = r.getReference(ctx, controlPlaneClass.TemplateRef.ToObjectReference(clusterClass.Namespace)) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } // If the clusterClass mandates the controlPlane has infrastructureMachines, read it. if blueprint.HasControlPlaneInfrastructureMachine() { - blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(clusterClass.Namespace)) + blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, controlPlaneClass.MachineInfrastructure.TemplateRef.ToObjectReference(clusterClass.Namespace)) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane's machine template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } @@ -61,7 +72,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // If the clusterClass defines a valid MachineHealthCheck (including a defined MachineInfrastructure) set the blueprint MachineHealthCheck. if blueprint.HasControlPlaneMachineHealthCheck() { - blueprint.ControlPlane.HealthCheck = blueprint.ClusterClass.Spec.ControlPlane.HealthCheck + blueprint.ControlPlane.HealthCheck = blueprint.ControlPlaneClass.HealthCheck } // Loop over the machine deployments classes in ClusterClass diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 87509208312f..4f53da751001 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -176,8 +176,9 @@ func addVariablesForPatch(blueprint *scope.ClusterBlueprint, desired *scope.Clus } req.Variables = globalVariables + // syself change // Calculate the Control Plane variables. - controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, patchVariableDefinitions) + controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, blueprint.Topology.ControlPlane.Class, patchVariableDefinitions) if err != nil { return errors.Wrapf(err, "failed to calculate ControlPlane variables") } diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 9fccf0a9ba19..8cd40a9ed5ea 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -161,6 +161,36 @@ func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVar } } + // ControlPlaneClass selector targets templates belonging to a specific named control plane class + // from ClusterClass.spec.controlPlaneClasses. It reads the controlPlane.class variable set on the + // template to determine which class the template belongs to, then checks whether that class name matches + // one of the names listed in the selector (exact match, or wildcard prefix/suffix with "*"). + // This mirrors how MachineDeploymentClass selectors work for worker node templates. + if selector.MatchResources.ControlPlaneClass != nil { + if (req.HolderReference.Kind == "Cluster" && req.HolderReference.FieldPath == "spec.controlPlaneRef") || + req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), ".") { + // Read the builtin.controlPlane.class variable. + templateCPClassJSON, err := patchvariables.GetVariableValue(templateVariables, "builtin.controlPlane.class") + + // If the builtin variable could be read. + if err == nil { + // If templateCPClass matches one of the configured ControlPlaneClasses. + for _, cpClass := range selector.MatchResources.ControlPlaneClass.Names { + if cpClass == "*" || string(templateCPClassJSON.Raw) == strconv.Quote(cpClass) { + return true + } + unquoted, _ := strconv.Unquote(string(templateCPClassJSON.Raw)) + if strings.HasPrefix(cpClass, "*") && strings.HasSuffix(unquoted, strings.TrimPrefix(cpClass, "*")) { + return true + } + if strings.HasSuffix(cpClass, "*") && strings.HasPrefix(unquoted, strings.TrimSuffix(cpClass, "*")) { + return true + } + } + } + } + } + // Check if the request is for a BootstrapConfigTemplate or an InfrastructureMachineTemplate // of one of the configured MachineDeploymentClasses. if selector.MatchResources.MachineDeploymentClass != nil { diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index aa53b5a0b127..d54fb1a5bd62 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -103,7 +103,7 @@ func Global(clusterTopology clusterv1.Topology, cluster *clusterv1.Cluster, patc } // ControlPlane returns variables that apply to templates belonging to the ControlPlane. -func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { +func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, controlPlaneClass string, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { variables := []runtimehooksv1.Variable{} // Add variables overrides for the ControlPlane. @@ -114,10 +114,12 @@ func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructu } } + // syself change // Construct builtin variable. builtin := runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ - Name: cp.GetName(), + Name: cp.GetName(), + Class: controlPlaneClass, }, } diff --git a/internal/controllers/topology/cluster/patches/variables/variables_test.go b/internal/controllers/topology/cluster/patches/variables/variables_test.go index 08a01a2e0192..38207adf6411 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables_test.go +++ b/internal/controllers/topology/cluster/patches/variables/variables_test.go @@ -658,7 +658,7 @@ func TestControlPlane(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, tt.variableDefinitionsForPatch) + got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, "", tt.variableDefinitionsForPatch) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeComparableTo(tt.want)) }) diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 89e1790de7ef..c75caaffcd04 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -361,13 +361,36 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) } } - // Create or update the MachineInfrastructureTemplate of the control plane. + // syself change: determine if control plane class has changed. + currentCPInfraMachineTemplate := s.Current.ControlPlane.InfrastructureMachineTemplate + cpInfraKindChanged := false + if s.Current.ControlPlane.InfrastructureMachineTemplate != nil && + s.Desired.ControlPlane.InfrastructureMachineTemplate != nil && + s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind() != s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind() { + cpInfraKindChanged = true + log.Info( + "Control plane infrastructure kind changed", + "currentKind", s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind(), + "desiredKind", s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), + ) + + // Setting currentCPInfraMachineTemplate as nil so that method reconcileReferencedTemplate do not + // try to patch the existing template. Otherwise patching will fail as we cannot patch the `Kind` + // of an object. + currentCPInfraMachineTemplate = nil + } + + compatibilityChecker := check.ObjectsAreCompatible + if cpInfraKindChanged { + compatibilityChecker = check.ObjectsAreInTheSameNamespace + } + createdInfrastructureTemplate, err := r.reconcileReferencedTemplate(ctx, reconcileReferencedTemplateInput{ cluster: s.Current.Cluster, ref: cpInfraRef, - current: s.Current.ControlPlane.InfrastructureMachineTemplate, + current: currentCPInfraMachineTemplate, desired: s.Desired.ControlPlane.InfrastructureMachineTemplate, - compatibilityChecker: check.ObjectsAreCompatible, + compatibilityChecker: compatibilityChecker, templateNamePrefix: topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name), }) if err != nil { @@ -1194,10 +1217,15 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile return true, nil } + // syself change + // ObjectsAreStrictlyCompatible is intentionally skipped here. + // When switching a cluster's control plane class (e.g. from hcloud VMs to bare-metal), + // the InfrastructureMachineTemplate kind changes, which would fail the strict compatibility check. + // Skipping it allows rolling a control plane from one infrastructure type to another. // Check if the current and desired referenced object are compatible. - if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { - return false, allErrs.ToAggregate() - } + // if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { + // return false, allErrs.ToAggregate() + // } log = log.WithValues(in.current.GetKind(), klog.KObj(in.current)) ctx = ctrl.LoggerInto(ctx, log) diff --git a/internal/topology/check/compatibility.go b/internal/topology/check/compatibility.go index e6935f72523e..b05f837c7f4c 100644 --- a/internal/topology/check/compatibility.go +++ b/internal/topology/check/compatibility.go @@ -177,21 +177,30 @@ func ClusterClassesAreCompatible(current, desired *clusterv1.ClusterClass) field field.NewPath("spec", "infrastructure", "templateRef"))...) // Validate control plane changes desired a compatible way. - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.TemplateRef, desired.Spec.ControlPlane.TemplateRef, - field.NewPath("spec", "controlPlane", "templateRef"))...) - if desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && !current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, - field.NewPath("spec", "controlPlane", "machineInfrastructure", "templateRef"))...) - } - if !desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, - field.NewPath("spec", "controlPlane", "machineInfrastructure", "templateRef"))...) - } - if desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, - field.NewPath("spec", "controlPlane", "machineInfrastructure", "templateRef"))...) + // syself change. + if current.Spec.ControlPlane.TemplateRef.IsDefined() && desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.TemplateRef, desired.Spec.ControlPlane.TemplateRef, + field.NewPath("spec", "controlPlane"))...) + if desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, + field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) + } } + // Validate named control plane class changes in a compatible way. + // syself change. + for _, desiredClass := range desired.Spec.ControlPlaneClasses { + for i, currentClass := range current.Spec.ControlPlaneClasses { + if desiredClass.Class == currentClass.Class { + classPath := field.NewPath("spec", "controlPlaneClasses").Index(i) + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(currentClass.TemplateRef, desiredClass.TemplateRef, classPath)...) + if desiredClass.MachineInfrastructure.TemplateRef.IsDefined() && currentClass.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(currentClass.MachineInfrastructure.TemplateRef, desiredClass.MachineInfrastructure.TemplateRef, + classPath.Child("machineInfrastructure"))...) + } + } + } + } // Validate changes to MachineDeployments. allErrs = append(allErrs, MachineDeploymentClassesAreCompatible(current, desired)...) @@ -223,6 +232,26 @@ func MachineDeploymentClassesAreCompatible(current, desired *clusterv1.ClusterCl return allErrs } +// ControlPlaneClassesAreUnique checks that no two ControlPlaneClasses in a ClusterClass share a name. +// syself change. +func ControlPlaneClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + classes := sets.Set[string]{} + for i, class := range clusterClass.Spec.ControlPlaneClasses { + if classes.Has(class.Class) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "controlplane", "classes").Index(i).Child("class"), + class.Class, + fmt.Sprintf("ControlPlane class must be unique. ControlPlane with class %q is defined more than once", class.Class), + ), + ) + } + classes.Insert(class.Class) + } + return allErrs +} + // MachineDeploymentClassesAreUnique checks that no two MachineDeploymentClasses in a ClusterClass share a name. func MachineDeploymentClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -379,8 +408,33 @@ func MachinePoolTopologiesAreValidAndDefinedInClusterClass(desired *clusterv1.Cl return allErrs } -// ClusterClassTemplatesAreValid checks that each template reference in the ClusterClass is valid . -func ClusterClassTemplatesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { +// ControlPlaneTopologyClassIsDefinedInClusterClass checks that the control plane class referenced +// in the Cluster topology (if set) is defined in the ClusterClass. +// syself change. +func ControlPlaneTopologyClassIsDefinedInClusterClass(desired *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + cpClass := desired.Spec.Topology.ControlPlane.Class + if cpClass == "" { + return nil + } + for _, class := range clusterClass.Spec.ControlPlaneClasses { + if class.Class == cpClass { + return nil + } + } + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "topology", "controlPlane", "class"), + cpClass, + fmt.Sprintf("ControlPlaneClass with name %q does not exist in ClusterClass %q", + cpClass, clusterClass.Name), + ), + ) + return allErrs +} + +// ClusterClassReferencesAreValid checks that each template reference in the ClusterClass is valid. +func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, ClusterClassTemplateIsValid(clusterClass.Spec.Infrastructure.TemplateRef, field.NewPath("spec", "infrastructure"))...) @@ -389,6 +443,16 @@ func ClusterClassTemplatesAreValid(clusterClass *clusterv1.ClusterClass) field.E allErrs = append(allErrs, ClusterClassTemplateIsValid(clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef, field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) } + // validate each named control plane class. + // syself change. + for i, cpc := range clusterClass.Spec.ControlPlaneClasses { + classPath := field.NewPath("spec", "controlPlane", "controlPlaneClasses").Index(i) + allErrs = append(allErrs, ClusterClassTemplateIsValid(cpc.TemplateRef, classPath)...) + if cpc.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateIsValid(cpc.MachineInfrastructure.TemplateRef, classPath.Child("machineInfrastructure"))...) + } + } + for i := range clusterClass.Spec.Workers.MachineDeployments { mdc := clusterClass.Spec.Workers.MachineDeployments[i] allErrs = append(allErrs, ClusterClassTemplateIsValid(mdc.Bootstrap.TemplateRef, field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("template", "bootstrap"))...) diff --git a/internal/topology/check/compatibility_test.go b/internal/topology/check/compatibility_test.go index e655be703951..8bf438bd811d 100644 --- a/internal/topology/check/compatibility_test.go +++ b/internal/topology/check/compatibility_test.go @@ -960,6 +960,74 @@ func TestMachinePoolClassesAreCompatible(t *testing.T) { } } +// syself change. +func TestControlPlaneClassesAreUnique(t *testing.T) { + tests := []struct { + name string + clusterClass *clusterv1.ClusterClass + wantErr bool + }{ + { + name: "pass if ControlPlaneClasses are unique", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "bb"}, + }, + }, + }, + wantErr: false, + }, + { + name: "pass if no ControlPlaneClasses are defined", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlane: clusterv1.ControlPlaneClass{}, + }, + }, + wantErr: false, + }, + { + name: "fail if ControlPlaneClasses are duplicated", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + { + name: "fail if multiple ControlPlaneClasses are identical", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + allErrs := ControlPlaneClassesAreUnique(tt.clusterClass) + if tt.wantErr { + g.Expect(allErrs).ToNot(BeEmpty()) + return + } + g.Expect(allErrs).To(BeEmpty()) + }) + } +} + func TestMachineDeploymentClassesAreUnique(t *testing.T) { tests := []struct { name string @@ -1530,7 +1598,7 @@ func TestMachinePoolTopologiesAreUniqueAndDefinedInClusterClass(t *testing.T) { } } -func TestClusterClassTemplatesAreValid(t *testing.T) { +func TestClusterClassReferencesAreValid(t *testing.T) { ref := &clusterv1.ClusterClassTemplateReference{ APIVersion: "group.test.io/foo", Kind: "barTemplate", @@ -1665,7 +1733,7 @@ func TestClusterClassTemplatesAreValid(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - allErrs := ClusterClassTemplatesAreValid(tt.clusterClass) + allErrs := ClusterClassReferencesAreValid(tt.clusterClass) if tt.wantErr { g.Expect(allErrs).ToNot(BeEmpty()) return diff --git a/internal/topology/selectors/selectors.go b/internal/topology/selectors/selectors.go index 510257a9391d..0f0ab1f0fbd9 100644 --- a/internal/topology/selectors/selectors.go +++ b/internal/topology/selectors/selectors.go @@ -18,6 +18,8 @@ limitations under the License. package selectors import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -27,11 +29,32 @@ import ( func ForMachineDeploymentMHC(md *clusterv1.MachineDeployment) *metav1.LabelSelector { // The selector returned here is the minimal common selector for all MachineSets belonging to a MachineDeployment. // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. - return &metav1.LabelSelector{MatchLabels: map[string]string{ - clusterv1.ClusterTopologyOwnedLabel: "", - clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], - }, + return &metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], + }, + } +} + +// ResolveControlPlaneClass returns the ControlPlaneClass to use for the given Cluster. +// If the topology specifies a control plane class name, it is looked up from +// ClusterClass.spec.controlPlaneClasses. Otherwise the inline ClusterClass.spec.controlPlane is used. +// syself change. +func ResolveControlPlaneClass(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) (*clusterv1.ControlPlaneClass, error) { + if cluster.Spec.Topology.ControlPlane.Class == "" { + return &clusterClass.Spec.ControlPlane, nil + } + for i := range clusterClass.Spec.ControlPlaneClasses { + if clusterClass.Spec.ControlPlaneClasses[i].Class == cluster.Spec.Topology.ControlPlane.Class { + return &clusterClass.Spec.ControlPlaneClasses[i], nil + } } + return nil, fmt.Errorf("control plane class %q not found in ClusterClass %s/%s", + cluster.Spec.Topology.ControlPlane.Class, + clusterClass.Namespace, + clusterClass.Name, + ) } // ForControlPlaneMHC generates a selector for control plane MHCs. diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index a822ff0e809e..0635c48c2622 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" "sigs.k8s.io/cluster-api/internal/topology/check" + topologyselectors "sigs.k8s.io/cluster-api/internal/topology/selectors" "sigs.k8s.io/cluster-api/internal/topology/variables" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util/conditions" @@ -77,8 +78,10 @@ type Cluster struct { decoder admission.Decoder } -var _ admission.Defaulter[*clusterv1.Cluster] = &Cluster{} -var _ admission.Validator[*clusterv1.Cluster] = &Cluster{} +var ( + _ admission.Defaulter[*clusterv1.Cluster] = &Cluster{} + _ admission.Validator[*clusterv1.Cluster] = &Cluster{} +) // Default satisfies the defaulting webhook interface. func (webhook *Cluster) Default(ctx context.Context, cluster *clusterv1.Cluster) error { @@ -665,10 +668,16 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust fldPath := field.NewPath("spec", "topology", "controlPlane", "healthCheck") + cpClass, err := topologyselectors.ResolveControlPlaneClass(cluster, clusterClass) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + return allErrs + } + // Validate ControlPlane MachineHealthCheck if defined. if cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() { - // Ensure ControlPlane does not define a MachineHealthCheck if the ClusterClass does not define MachineInfrastructure. - if !clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + // Ensure ControlPlane does not define a MachineHealthCheck if the ControlPlaneClass does not define MachineInfrastructure. + if !cpClass.MachineInfrastructure.TemplateRef.IsDefined() { allErrs = append(allErrs, field.Forbidden( fldPath, "can be only set if spec.controlPlane.machineInfrastructure is set in ClusterClass", @@ -685,7 +694,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust // Check if the machineHealthCheck is explicitly enabled in the ControlPlaneTopology. if cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled != nil && *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled { // Ensure the MHC is defined in at least one of the ControlPlaneTopology of the Cluster or the ControlPlaneClass of the ClusterClass. - if !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() && !clusterClass.Spec.ControlPlane.HealthCheck.IsDefined() { + if !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() && !cpClass.HealthCheck.IsDefined() { allErrs = append(allErrs, field.Forbidden( fldPath.Child("enable"), fmt.Sprintf("cannot be set to %t as healthCheck definition is not available in the Cluster topology or the ClusterClass", *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled), @@ -737,6 +746,16 @@ func machineDeploymentClassOfName(clusterClass *clusterv1.ClusterClass, name str return nil } +// syself change. +func controlPlaneClassOfName(clusterClass *clusterv1.ClusterClass, name string) *clusterv1.ControlPlaneClass { + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + if cpClass.Class == name { + return &cpClass + } + } + return nil +} + // validateCIDRBlocks ensures the passed CIDR is valid. func validateCIDRBlocks(fldPath *field.Path, cidrs []string) field.ErrorList { var allErrs field.ErrorList @@ -922,6 +941,8 @@ func ValidateClusterForClusterClass(cluster *clusterv1.Cluster, clusterClass *cl )) } } + // syself change + allErrs = append(allErrs, check.ControlPlaneTopologyClassIsDefinedInClusterClass(cluster, clusterClass)...) allErrs = append(allErrs, check.MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) @@ -965,7 +986,7 @@ func (webhook *Cluster) validateClusterClassExistsAndIsReconciled(ctx context.Co // pollClusterClassForCluster will retry getting the ClusterClass referenced in the Cluster for two seconds. func (webhook *Cluster) pollClusterClassForCluster(ctx context.Context, cluster *clusterv1.Cluster) (_ *clusterv1.ClusterClass, clusterClassNotReconciled, clusterClassNotFound bool, _ error) { - var errClusterClassNotReconciled = errors.New("ClusterClass is not successfully reconciled") + errClusterClassNotReconciled := errors.New("ClusterClass is not successfully reconciled") clusterClass := &clusterv1.ClusterClass{} var clusterClassPollErr error diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index 197085ccafd8..ddd36b352f11 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -97,7 +97,7 @@ func (webhook *ClusterClass) validate(ctx context.Context, oldClusterClass, newC var allErrs field.ErrorList // Ensure all template references are valid. - allErrs = append(allErrs, check.ClusterClassTemplatesAreValid(newClusterClass)...) + allErrs = append(allErrs, check.ClusterClassReferencesAreValid(newClusterClass)...) // Ensure all MachineDeployment classes are unique. allErrs = append(allErrs, check.MachineDeploymentClassesAreUnique(newClusterClass)...) @@ -200,6 +200,43 @@ func validateUpdatesToMachineHealthCheckClasses(clusters []clusterv1.Cluster, ol } } + // syself change + // For each ControlPlaneClass check if the MachineHealthCheck definition is dropped. + for _, newCPClass := range newClusterClass.Spec.ControlPlaneClasses { + oldCPClass := controlPlaneClassOfName(oldClusterClass, newCPClass.Class) + if oldCPClass == nil { + // New ControlPlaneClass. Nothing to validate. + continue + } + + // If the MachineHealthCheck was dropped then check that no cluster is using it. + if oldCPClass.HealthCheck.IsDefined() && !newCPClass.HealthCheck.IsDefined() { + clustersUsingMHC := []string{} + + for _, cluster := range clusters { + if cluster.Spec.Topology.ControlPlane.Class != newCPClass.Class { + continue + } + + if cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled != nil && + *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled && + !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() { + clustersUsingMHC = append(clustersUsingMHC, cluster.Name) + } + } + + if len(clustersUsingMHC) != 0 { + allErrs = append(allErrs, field.Forbidden( + field.NewPath("spec", "controlPlaneClasses").Key(newCPClass.Class).Child("machineHealthCheck"), + fmt.Sprintf( + "MachineHealthCheck cannot be deleted because it is used by Cluster(s) %q", + strings.Join(clustersUsingMHC, ","), + ), + )) + } + } + } + // For each MachineDeploymentClass check if the MachineHealthCheck definition is dropped. for _, newMdClass := range newClusterClass.Spec.Workers.MachineDeployments { oldMdClass := machineDeploymentClassOfName(oldClusterClass, newMdClass.Class) @@ -456,12 +493,34 @@ func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorL } } - for _, md := range clusterClass.Spec.Workers.MachineDeployments { + // syself change + // Validate naming strategies for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + if cp.Naming.Template == "" { + continue + } + name, err := topologynames.ControlPlaneNameGenerator(cp.Naming.Template, "cluster").GenerateName() + templateFldPath := field.NewPath("spec", "controlPlaneClasses").Index(i).Child("namingStrategy", "template") + if err != nil { + allErrs = append(allErrs, + field.Invalid( + templateFldPath, + cp.Naming.Template, + fmt.Sprintf("invalid ControlPlaneClass name template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, field.Invalid(templateFldPath, cp.Naming.Template, err)) + } + } + } + + for i, md := range clusterClass.Spec.Workers.MachineDeployments { if md.Naming.Template == "" { continue } name, err := topologynames.MachineDeploymentNameGenerator(md.Naming.Template, "cluster", "mdtopology").GenerateName() - templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Key(md.Class).Child("naming", "template") + templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( @@ -505,6 +564,15 @@ func validateClusterClassMetadata(clusterClass *clusterv1.ClusterClass) field.Er for _, m := range clusterClass.Spec.Workers.MachineDeployments { allErrs = append(allErrs, m.Metadata.Validate(field.NewPath("spec", "workers", "machineDeployments").Key(m.Class).Child("template", "metadata"))...) } + + // syself change + // Validate metadata for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + allErrs = append(allErrs, + cp.Metadata.Validate( + field.NewPath("spec", "controlPlaneClasses").Index(i).Child("metadata"))...) + } + for _, m := range clusterClass.Spec.Workers.MachinePools { allErrs = append(allErrs, m.Metadata.Validate(field.NewPath("spec", "workers", "machinePools").Key(m.Class).Child("template", "metadata"))...) } diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index a1d447bbb714..1b1933567f2d 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -167,6 +167,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste // Return an error if none of the possible selectors are enabled. if !ptr.Deref(selector.MatchResources.InfrastructureCluster, false) && !ptr.Deref(selector.MatchResources.ControlPlane, false) && + (selector.MatchResources.ControlPlaneClass == nil || len(selector.MatchResources.ControlPlaneClass.Names) == 0) && (selector.MatchResources.MachineDeploymentClass == nil || len(selector.MatchResources.MachineDeploymentClass.Names) == 0) && (selector.MatchResources.MachinePoolClass == nil || len(selector.MatchResources.MachinePoolClass.Names) == 0) { return append(allErrs, @@ -205,6 +206,47 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste } } + // Validate selectors for control plane classes + // syself change. + if selector.MatchResources.ControlPlaneClass != nil && len(selector.MatchResources.ControlPlaneClass.Names) > 0 { + for i, name := range selector.MatchResources.ControlPlaneClass.Names { + match := false + err := validateSelectorName(name, path, "controlPlaneClass", i) + if err != nil { + allErrs = append(allErrs, err) + break + } + for _, cp := range class.Spec.ControlPlaneClasses { + var matches bool + // "*" matches every control plane class (apply patch to all classes). + // "*suffix" matches any class whose name ends with "suffix". + // "prefix*" matches any class whose name starts with "prefix". + if cp.Class == name || name == "*" { + matches = true + } else if strings.HasPrefix(name, "*") && strings.HasSuffix(cp.Class, strings.TrimPrefix(name, "*")) { + matches = true + } else if strings.HasSuffix(name, "*") && strings.HasPrefix(cp.Class, strings.TrimSuffix(name, "*")) { + matches = true + } + + if matches { + if selectorMatchTemplate(selector, cp.TemplateRef) || + selectorMatchTemplate(selector, cp.MachineInfrastructure.TemplateRef) { + match = true + break + } + } + } + if !match { + allErrs = append(allErrs, field.Invalid( + path.Child("matchResources", "controlPlaneClass", "names").Index(i), + name, + "selector is enabled but matches neither the controlPlane ref nor the controlPlane machineInfrastructure ref of a ControlPlane class", + )) + } + } + } + if selector.MatchResources.MachineDeploymentClass != nil && len(selector.MatchResources.MachineDeploymentClass.Names) > 0 { for i, name := range selector.MatchResources.MachineDeploymentClass.Names { match := false From be525ef67b596ca495e409006db2f25bbeedd173 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Thu, 30 Apr 2026 16:23:02 +0530 Subject: [PATCH 2/9] add development guide for our fork --- README.md | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 586597ac88f2..26d3d442f076 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,65 @@ Participation in the Kubernetes community is governed by the [Kubernetes Code of [Good first issue]: https://github.com/kubernetes-sigs/cluster-api/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [Help wanted]: https://github.com/kubernetes-sigs/cluster-api/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+ -# Release/Development (Syself Fork) +# Development (Syself Fork) + +## Enabling Multiple Control Plane Classes on top of CAPI v1.13.1 + +This repository maintains a fork of Cluster API to support **multiple control plane classes**, a feature not available in upstream. + +Every time a new upstream release is adopted, the fork must be rebased and the feature reapplied. + +## Workflow: Updating the Fork to a New Cluster API Release + +Follow the steps below to update the fork to a new upstream version (example: `v1.13.1`): + +1. **Configure upstream (if not already configured)** + + ```sh + git remote add upstream https://github.com/kubernetes-sigs/cluster-api.git + ``` + +2. **Fetch latest tags from upstream** + + ```sh + git fetch upstream --tags + ``` + +3. **Create a tracking branch from the upstream release tag** + + ```sh + git checkout -b syself-1.13.1 v1.13.1 + ``` + + This branch represents a clean base aligned with the upstream release. + +4. **Push the tracking branch to origin** + + ```sh + git push origin syself-1.13.1 + ``` + +5. **Create a feature branch for applying Syself-specific changes** + + ```sh + git checkout -b 1-13-1-cp-classes + ``` + +6. **Reapply the multiple control plane classes feature** + + Cherry-pick the relevant commit(s) from the previous release branch: + + ```sh + git cherry-pick + ``` + + > [!NOTE] Resolve any conflicts carefully, especially around API changes between versions. + +7. **Create a pull request** + + Open a pull request from your feature branch to the tracking branch (`syself-1.13.1`). + +# Release (Syself Fork) ```console export RELEASE_TAG=v1.11.6-syself.XX && git tag -a $RELEASE_TAG -m $RELEASE_TAG && git push origin $RELEASE_TAG From dcbacae6aeedf05261aa17fe5796cba72192f9bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20G=C3=BCttler?= Date: Fri, 24 Apr 2026 09:41:11 +0200 Subject: [PATCH 3/9] create container images in CI. Fix container image push failure in CI - Changed REGISTRY from ghcr.io/syself/cluster-api-prod to ghcr.io/syself - This fixes the docker push failure by using the standard GHCR image naming format - Images will now be pushed as ghcr.io/syself/cluster-api-controller-amd64:v1.11.6-syself.1.2 --- .github/workflows/release.yaml | 108 ++++++++++++++++++++++++++++++++- Makefile | 6 +- 2 files changed, 112 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 89b8fc0091e6..5e672683d36e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -4,14 +4,18 @@ on: push: branches: - main + tags: + - 'v*.*.*' paths: - 'CHANGELOG/*.md' permissions: - contents: write # Allow to push a tag and create a release branch. + contents: write # Allow to push a tag, create a release branch and publish a draft release. + packages: write jobs: push_release_tags: + if: github.ref == 'refs/heads/main' runs-on: ubuntu-latest steps: - name: Checkout code @@ -76,3 +80,105 @@ jobs: git push origin ${RELEASE_VERSION} git push origin test/${RELEASE_VERSION} echo "Created tags $RELEASE_VERSION and test/${RELEASE_VERSION}" + release: + name: create draft release + if: github.ref == 'refs/heads/main' + runs-on: ubuntu-latest + needs: push_release_tags + steps: + - name: Set env + run: echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV + env: + RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} + - name: checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + ref: ${{ env.RELEASE_TAG }} + - name: Calculate go version + run: echo "go_version=$(make go-version)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + with: + go-version: ${{ env.go_version }} + - name: generate release artifacts + run: | + make release + - name: get release notes + run: | + curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ + -o "${{ env.RELEASE_TAG }}.md" + - name: Release + uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + with: + draft: true + files: out/* + body_path: ${{ env.RELEASE_TAG }}.md + tag_name: ${{ env.RELEASE_TAG }} + push_release_images: + name: build and push ${{ matrix.arch }} images + if: startsWith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + timeout-minutes: 90 + strategy: + fail-fast: false + matrix: + arch: [amd64, arm, arm64, ppc64le, s390x] + env: + REGISTRY: ghcr.io/syself + ALL_DOCKER_BUILD: core + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Build images + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-build + - name: Log in to ghcr.io + uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + - name: Push images + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-push + push_release_image_manifests: + name: push multi-arch manifests + if: startsWith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: push_release_images + env: + REGISTRY: ghcr.io/syself + ALL_DOCKER_BUILD: core + ALL_ARCH: amd64 arm arm64 ppc64le s390x + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Log in to ghcr.io + uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + - name: Push multi-arch manifests + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ALL_ARCH="${ALL_ARCH}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-push-manifests diff --git a/Makefile b/Makefile index 39b074c5ae0e..be4c6658e786 100644 --- a/Makefile +++ b/Makefile @@ -1241,7 +1241,11 @@ docker-image-verify: ## Verifies all built images to contain the correct binary .PHONY: docker-push-all docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests - $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(SYSELF_RELEVANT_DOCKER_BUILD)) + $(MAKE) ALL_ARCH="$(ALL_ARCH)" docker-push-manifests + +.PHONY: docker-push-manifests +docker-push-manifests: ## Push only the related multiarch manifests for all docker images + $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(ALL_DOCKER_BUILD)) docker-push-%: $(MAKE) ARCH=$* docker-push From 765c18d8ae9e7fd4a7f4ead4986866bee22d7fa3 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Mon, 4 May 2026 11:48:57 +0530 Subject: [PATCH 4/9] fix bug: wrong image in manifests --- .github/workflows/release.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5e672683d36e..f553c614ee73 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,6 +17,8 @@ jobs: push_release_tags: if: github.ref == 'refs/heads/main' runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.release-version.outputs.RELEASE_VERSION }} steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 @@ -103,7 +105,7 @@ jobs: go-version: ${{ env.go_version }} - name: generate release artifacts run: | - make release + make RELEASE_TAG=${{ env.RELEASE_TAG }} release - name: get release notes run: | curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ From c8fc26983808423dbfadb396914b5a7abdb364bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20G=C3=BCttler?= Date: Wed, 22 Apr 2026 13:04:05 +0200 Subject: [PATCH 5/9] Create GitHub release on tag push --- .github/workflows/release.yaml | 107 +---------------------------- .github/workflows/tag-release.yaml | 4 +- 2 files changed, 3 insertions(+), 108 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f553c614ee73..32d900db63fc 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,15 +10,12 @@ on: - 'CHANGELOG/*.md' permissions: - contents: write # Allow to push a tag, create a release branch and publish a draft release. - packages: write + contents: write # Allow to push a tag and create a release branch. jobs: push_release_tags: if: github.ref == 'refs/heads/main' runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.release-version.outputs.RELEASE_VERSION }} steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2 @@ -82,105 +79,3 @@ jobs: git push origin ${RELEASE_VERSION} git push origin test/${RELEASE_VERSION} echo "Created tags $RELEASE_VERSION and test/${RELEASE_VERSION}" - release: - name: create draft release - if: github.ref == 'refs/heads/main' - runs-on: ubuntu-latest - needs: push_release_tags - steps: - - name: Set env - run: echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV - env: - RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} - - name: checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 - with: - fetch-depth: 0 - ref: ${{ env.RELEASE_TAG }} - - name: Calculate go version - run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 - with: - go-version: ${{ env.go_version }} - - name: generate release artifacts - run: | - make RELEASE_TAG=${{ env.RELEASE_TAG }} release - - name: get release notes - run: | - curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ - -o "${{ env.RELEASE_TAG }}.md" - - name: Release - uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 - with: - draft: true - files: out/* - body_path: ${{ env.RELEASE_TAG }}.md - tag_name: ${{ env.RELEASE_TAG }} - push_release_images: - name: build and push ${{ matrix.arch }} images - if: startsWith(github.ref, 'refs/tags/v') - runs-on: ubuntu-latest - timeout-minutes: 90 - strategy: - fail-fast: false - matrix: - arch: [amd64, arm, arm64, ppc64le, s390x] - env: - REGISTRY: ghcr.io/syself - ALL_DOCKER_BUILD: core - steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 - with: - fetch-depth: 0 - - name: Calculate go version - id: vars - run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 - with: - go-version: ${{ steps.vars.outputs.go_version }} - - name: Build images - run: | - make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-build - - name: Log in to ghcr.io - uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ github.token }} - - name: Push images - run: | - make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-push - push_release_image_manifests: - name: push multi-arch manifests - if: startsWith(github.ref, 'refs/tags/v') - runs-on: ubuntu-latest - timeout-minutes: 30 - needs: push_release_images - env: - REGISTRY: ghcr.io/syself - ALL_DOCKER_BUILD: core - ALL_ARCH: amd64 arm arm64 ppc64le s390x - steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 - with: - fetch-depth: 0 - - name: Calculate go version - id: vars - run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 - with: - go-version: ${{ steps.vars.outputs.go_version }} - - name: Log in to ghcr.io - uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ github.token }} - - name: Push multi-arch manifests - run: | - make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ALL_ARCH="${ALL_ARCH}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-push-manifests diff --git a/.github/workflows/tag-release.yaml b/.github/workflows/tag-release.yaml index ed3452b48f08..b0ce58b843cf 100644 --- a/.github/workflows/tag-release.yaml +++ b/.github/workflows/tag-release.yaml @@ -21,14 +21,14 @@ jobs: - name: Calculate go version run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # tag=v5.3.0 with: go-version: ${{ env.go_version }} - name: generate release artifacts run: | make release - name: Release - uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # tag=v2.2.1 with: draft: true files: out/* From 4ef9dc9b519f8fc8416e98e3dd281a4dbba5a2b4 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Mon, 4 May 2026 12:19:05 +0530 Subject: [PATCH 6/9] fix: pass RELEASE_TAG explicitly in tag-release workflow Prevents git describe --abbrev=0 from picking up an older tag when multiple tags point to the same commit. Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/tag-release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tag-release.yaml b/.github/workflows/tag-release.yaml index b0ce58b843cf..5a8e4d339379 100644 --- a/.github/workflows/tag-release.yaml +++ b/.github/workflows/tag-release.yaml @@ -26,7 +26,7 @@ jobs: go-version: ${{ env.go_version }} - name: generate release artifacts run: | - make release + make release RELEASE_TAG=${{ github.ref_name }} - name: Release uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # tag=v2.2.1 with: From 06820d3f79614be67f3d707f87a2e7f0aedc9192 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Mon, 4 May 2026 12:25:05 +0530 Subject: [PATCH 7/9] feat: build and push release images on tag push Adds push_release_images and push_release_image_manifests jobs to the tag-release workflow to build and push multi-arch container images to ghcr.io/syself on every version tag. Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/tag-release.yaml | 67 ++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/.github/workflows/tag-release.yaml b/.github/workflows/tag-release.yaml index 5a8e4d339379..c5a027f431ee 100644 --- a/.github/workflows/tag-release.yaml +++ b/.github/workflows/tag-release.yaml @@ -7,8 +7,75 @@ on: permissions: contents: write + packages: write jobs: + push_release_images: + name: build and push ${{ matrix.arch }} images + runs-on: ubuntu-latest + timeout-minutes: 90 + strategy: + fail-fast: false + matrix: + arch: [amd64, arm, arm64, ppc64le, s390x] + env: + REGISTRY: ghcr.io/syself + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # tag=v5.3.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Build images + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" docker-build + - name: Log in to ghcr.io + uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + - name: Push images + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ARCH="${{ matrix.arch }}" docker-push + + push_release_image_manifests: + name: push multi-arch manifests + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: push_release_images + env: + REGISTRY: ghcr.io/syself + ALL_ARCH: amd64 arm arm64 ppc64le s390x + ALL_DOCKER_BUILD: core + steps: + - name: Checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # tag=v5.3.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Log in to ghcr.io + uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # tag=v4.1.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + - name: Push multi-arch manifests + run: | + make REGISTRY="${REGISTRY}" TAG="${GITHUB_REF_NAME}" ALL_ARCH="${ALL_ARCH}" ALL_DOCKER_BUILD="${ALL_DOCKER_BUILD}" docker-push-manifests + release: name: create draft release runs-on: ubuntu-latest From f948959ee8bff5ccd2ba67f5e9e7a7022e54c672 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Tue, 5 May 2026 18:29:33 +0530 Subject: [PATCH 8/9] (bug): fix matchselector to match also the v1beta1 ref to controlplane's infra machine template --- .../topology/cluster/patches/inline/json_patch_generator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 8cd40a9ed5ea..4a55e2264c2a 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -168,6 +168,7 @@ func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVar // This mirrors how MachineDeploymentClass selectors work for worker node templates. if selector.MatchResources.ControlPlaneClass != nil { if (req.HolderReference.Kind == "Cluster" && req.HolderReference.FieldPath == "spec.controlPlaneRef") || + req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureV1Beta1Ref().Path(), ".") || req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), ".") { // Read the builtin.controlPlane.class variable. templateCPClassJSON, err := patchvariables.GetVariableValue(templateVariables, "builtin.controlPlane.class") From 5f777099fb134ab83e9ac41d747b99ce2b738fa8 Mon Sep 17 00:00:00 2001 From: Dhairya Arora Date: Fri, 8 May 2026 15:30:04 +0530 Subject: [PATCH 9/9] implement feedback from claude --- api/core/v1beta2/clusterclass_types.go | 2 +- config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml | 4 ++-- internal/topology/check/compatibility.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index f87b078f8adc..2660305ea9ed 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -194,7 +194,7 @@ type ControlPlaneClass struct { // syself new field. // +optional // +default="" - // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MaxLength=256 Class string `json:"class,omitempty"` //nolint:kubeapilinter // templateRef contains the reference to a provider-specific control plane template. diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index a8df4539f3df..9fbbf1aca20c 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -3323,7 +3323,7 @@ spec: When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology. syself new field. - maxLength: 1024 + maxLength: 256 type: string deletion: description: deletion contains configuration options for Machine @@ -3814,7 +3814,7 @@ spec: When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology. syself new field. - maxLength: 1024 + maxLength: 256 type: string deletion: description: deletion contains configuration options for Machine diff --git a/internal/topology/check/compatibility.go b/internal/topology/check/compatibility.go index b05f837c7f4c..02f93c31742d 100644 --- a/internal/topology/check/compatibility.go +++ b/internal/topology/check/compatibility.go @@ -241,7 +241,7 @@ func ControlPlaneClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.Er if classes.Has(class.Class) { allErrs = append(allErrs, field.Invalid( - field.NewPath("spec", "controlplane", "classes").Index(i).Child("class"), + field.NewPath("spec", "controlplane", "controlPlaneClasses").Index(i), class.Class, fmt.Sprintf("ControlPlane class must be unique. ControlPlane with class %q is defined more than once", class.Class), ),