diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 8a65e8c4a18c..e0ffe23ce75a 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -1,4 +1,4 @@ -name: PR CI +name: PR golangci-lint on: pull_request: @@ -34,51 +34,3 @@ jobs: working-directory: ${{matrix.working-directory}} - name: Lint API run: make lint-api - - unit-tests: - name: unit-tests (${{ matrix.name }}) - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - include: - - name: cluster-api - target: test-junit - - name: docker-infrastructure - target: test-docker-infrastructure-junit - - name: test-extension - target: test-test-extension-junit - - name: test-framework - target: test-framework-junit - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 - - name: Calculate go version - id: vars - run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 - with: - go-version: ${{ steps.vars.outputs.go_version }} - - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # tag=v4.2.3 - name: Restore go cache - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - hack/tools/bin - key: ${{ runner.os }}-go-${{ steps.vars.outputs.go_version }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ steps.vars.outputs.go_version }}- - ${{ runner.os }}-go- - - name: Create artifacts directory - run: mkdir -p _artifacts - - name: Run unit tests - run: make ${{ matrix.target }} - - name: Upload test artifacts - if: failure() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # tag=v4.6.2 - with: - name: unit-tests-${{ matrix.name }} - path: _artifacts - if-no-files-found: ignore - retention-days: 7 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d1a0e5041b08..9a3dacfbae83 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,4 @@ -name: Create Release +name: Push Release Tags on: push: @@ -8,13 +8,11 @@ on: - 'CHANGELOG/*.md' permissions: - contents: write # Allow to push a tag, create a release branch and publish a draft release. + contents: write # Allow to push a tag and create a release branch. jobs: push_release_tags: runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.release-version.outputs.release_version }} steps: - name: Checkout code uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 @@ -78,37 +76,3 @@ jobs: git push origin ${RELEASE_VERSION} git push origin test/${RELEASE_VERSION} echo "Created tags $RELEASE_VERSION and test/${RELEASE_VERSION}" - release: - name: create draft release - runs-on: ubuntu-latest - needs: push_release_tags - steps: - - name: Set env - run: echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV - env: - RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} - - name: checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 - with: - fetch-depth: 0 - ref: ${{ env.RELEASE_TAG }} - - name: Calculate go version - run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 - with: - go-version: ${{ env.go_version }} - - name: generate release artifacts - run: | - make release - - name: get release notes - run: | - curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ - -o "${{ env.RELEASE_TAG }}.md" - - name: Release - uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 - with: - draft: true - files: out/* - body_path: ${{ env.RELEASE_TAG }}.md - tag_name: ${{ env.RELEASE_TAG }} diff --git a/.github/workflows/tag-release.yaml b/.github/workflows/tag-release.yaml new file mode 100644 index 000000000000..ed3452b48f08 --- /dev/null +++ b/.github/workflows/tag-release.yaml @@ -0,0 +1,36 @@ +name: Create Release + +on: + push: + tags: + - "v*" + +permissions: + contents: write + +jobs: + release: + name: create draft release + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} + - name: Calculate go version + run: echo "go_version=$(make go-version)" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + with: + go-version: ${{ env.go_version }} + - name: generate release artifacts + run: | + make release + - name: Release + uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + with: + draft: true + files: out/* + name: ${{ github.ref_name }} + tag_name: ${{ github.ref_name }} diff --git a/Makefile b/Makefile index 3178843912ea..525e1c7d00a8 100644 --- a/Makefile +++ b/Makefile @@ -214,11 +214,14 @@ TILT_PREPARE_BIN := tilt-prepare TILT_PREPARE := $(abspath $(TOOLS_BIN_DIR)/$(TILT_PREPARE_BIN)) # Define Docker related variables. Releases should modify and double check these vars. -REGISTRY ?= gcr.io/$(shell gcloud config get-value project) +REGISTRY ?= gcr.io/xxxxxx + +# For string inside YAML files (in "out" directory) PROD_REGISTRY ?= registry.k8s.io/cluster-api +# For string inside YAML files (in "out" directory) STAGING_REGISTRY ?= gcr.io/k8s-staging-cluster-api -STAGING_BUCKET ?= k8s-staging-cluster-api +#STAGING_BUCKET ?= k8s-staging-cluster-api # core IMAGE_NAME ?= cluster-api-controller @@ -252,7 +255,7 @@ CAPI_KIND_CLUSTER_NAME ?= capi-test TAG ?= dev ARCH ?= $(shell go env GOARCH) -ALL_ARCH ?= amd64 arm arm64 ppc64le s390x +ALL_ARCH ?= amd64 # Allow overriding the imagePullPolicy PULL_POLICY ?= Always @@ -851,10 +854,11 @@ docker-build-%: # Choice of images to build/push ALL_DOCKER_BUILD ?= core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension clusterctl +SYSELF_RELEVANT_DOCKER_BUILD ?= core .PHONY: docker-build docker-build: docker-pull-prerequisites ## Run docker-build-* targets for all the images - $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(ALL_DOCKER_BUILD)) + $(MAKE) ARCH=$(ARCH) $(addprefix docker-build-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ALL_DOCKER_BUILD_E2E = core kubeadm-bootstrap kubeadm-control-plane docker-infrastructure test-extension @@ -1069,9 +1073,9 @@ $(RELEASE_NOTES_DIR): .PHONY: release release: clean-release ## Build and push container images using the latest git tag for the commit - @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi - @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi - git checkout "${RELEASE_TAG}" + #@if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi + #@if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi + #git checkout "${RELEASE_TAG}" # Build binaries first. GIT_VERSION=$(RELEASE_TAG) $(MAKE) release-binaries # Set the manifest images to the staging/production bucket and Builds the manifests to publish with a release. @@ -1147,11 +1151,11 @@ release-manifests-dev: $(RELEASE_DIR) $(KUSTOMIZE) ## Build the development mani .PHONY: release-binaries release-binaries: ## Build the binaries to publish with a release RELEASE_BINARY=clusterctl-linux-amd64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-arm64 BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-amd64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-darwin-arm64 BUILD_PATH=./cmd/clusterctl GOOS=darwin GOARCH=arm64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-windows-amd64.exe BUILD_PATH=./cmd/clusterctl GOOS=windows GOARCH=amd64 $(MAKE) release-binary +# RELEASE_BINARY=clusterctl-linux-ppc64le BUILD_PATH=./cmd/clusterctl GOOS=linux GOARCH=ppc64le $(MAKE) release-binary .PHONY: release-binary release-binary: $(RELEASE_DIR) @@ -1160,9 +1164,11 @@ release-binary: $(RELEASE_DIR) -e CGO_ENABLED=0 \ -e GOOS=$(GOOS) \ -e GOARCH=$(GOARCH) \ - -e GOCACHE=/tmp/ \ + -e GOCACHE=/go/build-cache/ \ --user $$(id -u):$$(id -g) \ -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ + -v "$$(go env GOMODCACHE):/go/pkg/mod" \ + -v "$$(go env GOCACHE):/go/build-cache" \ -w /workspace \ golang:$(GO_VERSION) \ go build -a -trimpath -ldflags "$(LDFLAGS) -extldflags '-static'" \ @@ -1184,7 +1190,8 @@ release-staging: ## Build and push container images to the staging bucket $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/main/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) + + ##gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(RELEASE_ALIAS_TAG) .PHONY: release-staging-nightly release-staging-nightly: ## Tag and push container images to the staging bucket. Example image tag: cluster-api-controller:nightly_main_20210121 @@ -1201,16 +1208,17 @@ release-staging-nightly: ## Tag and push container images to the staging bucket. $(MAKE) release-manifests-dev # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20240425/core-components.yaml # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. - gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) + #gsutil cp $(RELEASE_DIR)/* gs://$(STAGING_BUCKET)/components/$(NEW_RELEASE_ALIAS_TAG) .PHONY: release-alias-tag release-alias-tag: ## Add the release alias tag to the last build tag - gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) + echo "Syself: skipping" +# gcloud container images add-tag $(CONTROLLER_IMG):$(TAG) $(CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(KUBEADM_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(TAG) $(KUBEADM_CONTROL_PLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CLUSTERCTL_IMG):$(TAG) $(CLUSTERCTL_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(CAPD_CONTROLLER_IMG):$(TAG) $(CAPD_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) +# gcloud container images add-tag $(TEST_EXTENSION_IMG):$(TAG) $(TEST_EXTENSION_IMG):$(RELEASE_ALIAS_TAG) .PHONY: release-notes-tool release-notes-tool: @@ -1246,13 +1254,13 @@ docker-image-verify: ## Verifies all built images to contain the correct binary .PHONY: docker-push-all docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) ## Push the docker images to be included in the release for all architectures + related multiarch manifests - $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(ALL_DOCKER_BUILD)) + $(MAKE) ALL_ARCH="$(ALL_ARCH)" $(addprefix docker-push-manifest-,$(SYSELF_RELEVANT_DOCKER_BUILD)) docker-push-%: $(MAKE) ARCH=$* docker-push .PHONY: docker-push -docker-push: $(addprefix docker-push-,$(ALL_DOCKER_BUILD)) ## Push the docker images to be included in the release +docker-push: $(addprefix docker-push-,$(SYSELF_RELEVANT_DOCKER_BUILD)) ## Push the docker images to be included in the release .PHONY: docker-push-core docker-push-core: ## Push the core docker image diff --git a/README.md b/README.md index 2f87ecc9b480..586597ac88f2 100644 --- a/README.md +++ b/README.md @@ -62,4 +62,23 @@ Participation in the Kubernetes community is governed by the [Kubernetes Code of [Good first issue]: https://github.com/kubernetes-sigs/cluster-api/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [Help wanted]: https://github.com/kubernetes-sigs/cluster-api/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+ - +# Release/Development (Syself Fork) + +```console +export RELEASE_TAG=v1.11.6-syself.XX && git tag -a $RELEASE_TAG -m $RELEASE_TAG && git push origin $RELEASE_TAG +``` + +Then a Github Action starts and builds a draft release. + +You can get notified when the action is finished like this: + +```console +gh run watch -i 20 ; music +``` + +Then open Git repo `autopilot`. Use branch `main` for deploy to prod and branch `syself/oci` for +deploy to testing-cluster. + +Update the capi version. + +Follow the Autpilot release docs: [autopilot README](https://github.com/syself/autopilot/). diff --git a/api/core/v1beta1/cluster_types.go b/api/core/v1beta1/cluster_types.go index c6ff0853b180..64ad23f65beb 100644 --- a/api/core/v1beta1/cluster_types.go +++ b/api/core/v1beta1/cluster_types.go @@ -597,6 +597,15 @@ type ControlPlaneTopology struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class is the name of the ControlPlaneClass used to create the set of control plane nodes. + // This should match one of the control plane classes defined in the ClusterClass object. + // If left empty `clusterclass.Spec.ControlPlane` is used. + // syself new field. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Class string `json:"class,omitempty"` + // replicas is the number of control plane nodes. // If the value is nil, the ControlPlane object is created without the number of Replicas // and it's assumed that the control plane controller does not implement support for this field. diff --git a/api/core/v1beta1/clusterclass_types.go b/api/core/v1beta1/clusterclass_types.go index 058507504fe0..905f4f6edb53 100644 --- a/api/core/v1beta1/clusterclass_types.go +++ b/api/core/v1beta1/clusterclass_types.go @@ -116,6 +116,18 @@ type ClusterClassSpec struct { // +optional ControlPlane ControlPlaneClass `json:"controlPlane,omitempty"` + // controlPlaneClasses is a list of named control plane classes that can be referenced + // from the Cluster topology. Each class defines a distinct control plane + // configuration. The class name MUST be unique within this list. + // When classes is defined, the Cluster topology can reference a specific + // control plane class by name. + // syself new field. + // +optional + // +listType=map + // +listMapKey=class + // +kubebuilder:validation:MaxItems=100 + ControlPlaneClasses []ControlPlaneClass `json:"controlPlaneClasses,omitempty"` + // workers describes the worker nodes for the cluster. // It is a collection of node types which can be used to create // the worker nodes of the cluster. @@ -148,6 +160,15 @@ type ControlPlaneClass struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty"` + // class denotes a type of control-plane node present in the cluster. + // When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + // within the list and can be referenced from the Cluster topology. + // syself new field. + // +optional + // +default="" + // +kubebuilder:validation:MaxLength=1024 + Class string `json:"class,omitempty"` //nolint:kubeapilinter + // LocalObjectTemplate contains the reference to the control plane provider. LocalObjectTemplate `json:",inline"` @@ -1013,6 +1034,12 @@ type PatchSelectorMatch struct { // +optional InfrastructureCluster bool `json:"infrastructureCluster,omitempty"` + // controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + // .spec.controlPlane.classes. + // syself new field. + // +optional + ControlPlaneClass *PatchSelectorMatchControlPlaneClass `json:"controlPlaneClass,omitempty"` + // machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in // .spec.workers.machineDeployments. // +optional @@ -1024,8 +1051,30 @@ type PatchSelectorMatch struct { MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } -// PatchSelectorMatchMachineDeploymentClass selects templates referenced -// in specific MachineDeploymentClasses in .spec.workers.machineDeployments. +// PatchSelectorMatchControlPlaneClass provides a way to target patch operations +// at templates that are associated with specific ControlPlane classes. In a +// ClusterClass definition, the .spec.controlPlane.classes field defines one or +// more named classes, each of which references infrastructure and bootstrap +// templates. This selector lets you narrow down which of those classes (and +// therefore which templates) a given patch should apply to, rather than +// applying the patch to all control plane templates indiscriminately. +// syself new type. +type PatchSelectorMatchControlPlaneClass struct { + // names selects templates by class names. + // +optional + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + Names []string `json:"names,omitempty"` +} + +// PatchSelectorMatchMachineDeploymentClass provides a way to target patch +// operations at templates associated with specific MachineDeployment classes. +// In a ClusterClass definition, .spec.workers.machineDeployments defines named +// classes that each reference infrastructure and bootstrap templates for worker +// nodes. This selector lets you scope a patch so it only affects the templates +// tied to particular MachineDeployment classes. +// syself change in comment. type PatchSelectorMatchMachineDeploymentClass struct { // names selects templates by class names. // +optional diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index eb54f254dd5a..e95d82a16e25 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -444,6 +444,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*PatchSelectorMatchControlPlaneClass)(nil), (*v1beta2.PatchSelectorMatchControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(a.(*PatchSelectorMatchControlPlaneClass), b.(*v1beta2.PatchSelectorMatchControlPlaneClass), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.PatchSelectorMatchControlPlaneClass)(nil), (*PatchSelectorMatchControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(a.(*v1beta2.PatchSelectorMatchControlPlaneClass), b.(*PatchSelectorMatchControlPlaneClass), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*PatchSelectorMatchMachineDeploymentClass)(nil), (*v1beta2.PatchSelectorMatchMachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_PatchSelectorMatchMachineDeploymentClass_To_v1beta2_PatchSelectorMatchMachineDeploymentClass(a.(*PatchSelectorMatchMachineDeploymentClass), b.(*v1beta2.PatchSelectorMatchMachineDeploymentClass), scope) }); err != nil { @@ -1146,6 +1156,17 @@ func autoConvert_v1beta1_ClusterClassSpec_To_v1beta2_ClusterClassSpec(in *Cluste if err := Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]v1beta2.ControlPlaneClass, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ControlPlaneClasses = nil + } if err := Convert_v1beta1_WorkersClass_To_v1beta2_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -1182,6 +1203,17 @@ func autoConvert_v1beta2_ClusterClassSpec_To_v1beta1_ClusterClassSpec(in *v1beta if err := Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + if err := Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ControlPlaneClasses = nil + } if err := Convert_v1beta2_WorkersClass_To_v1beta1_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -1601,6 +1633,7 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *Cont if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class // WARNING: in.LocalObjectTemplate requires manual conversion: does not exist in peer-type // WARNING: in.MachineInfrastructure requires manual conversion: inconvertible types (*sigs.k8s.io/cluster-api/api/core/v1beta1.LocalObjectTemplate vs sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassMachineInfrastructureTemplate) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type @@ -1616,6 +1649,7 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1be if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class // WARNING: in.TemplateRef requires manual conversion: does not exist in peer-type // WARNING: in.MachineInfrastructure requires manual conversion: inconvertible types (sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassMachineInfrastructureTemplate vs *sigs.k8s.io/cluster-api/api/core/v1beta1.LocalObjectTemplate) // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type @@ -1629,6 +1663,7 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type @@ -1643,6 +1678,7 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + out.Class = in.Class out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.Deletion requires manual conversion: does not exist in peer-type @@ -3390,6 +3426,7 @@ func autoConvert_v1beta1_PatchSelectorMatch_To_v1beta2_PatchSelectorMatch(in *Pa if err := v1.Convert_bool_To_Pointer_bool(&in.InfrastructureCluster, &out.InfrastructureCluster, s); err != nil { return err } + out.ControlPlaneClass = (*v1beta2.PatchSelectorMatchControlPlaneClass)(unsafe.Pointer(in.ControlPlaneClass)) out.MachineDeploymentClass = (*v1beta2.PatchSelectorMatchMachineDeploymentClass)(unsafe.Pointer(in.MachineDeploymentClass)) out.MachinePoolClass = (*v1beta2.PatchSelectorMatchMachinePoolClass)(unsafe.Pointer(in.MachinePoolClass)) return nil @@ -3407,6 +3444,7 @@ func autoConvert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in *v1 if err := v1.Convert_Pointer_bool_To_bool(&in.InfrastructureCluster, &out.InfrastructureCluster, s); err != nil { return err } + out.ControlPlaneClass = (*PatchSelectorMatchControlPlaneClass)(unsafe.Pointer(in.ControlPlaneClass)) out.MachineDeploymentClass = (*PatchSelectorMatchMachineDeploymentClass)(unsafe.Pointer(in.MachineDeploymentClass)) out.MachinePoolClass = (*PatchSelectorMatchMachinePoolClass)(unsafe.Pointer(in.MachinePoolClass)) return nil @@ -3417,6 +3455,26 @@ func Convert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in *v1beta return autoConvert_v1beta2_PatchSelectorMatch_To_v1beta1_PatchSelectorMatch(in, out, s) } +func autoConvert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in *PatchSelectorMatchControlPlaneClass, out *v1beta2.PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + return nil +} + +// Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass is an autogenerated conversion function. +func Convert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in *PatchSelectorMatchControlPlaneClass, out *v1beta2.PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + return autoConvert_v1beta1_PatchSelectorMatchControlPlaneClass_To_v1beta2_PatchSelectorMatchControlPlaneClass(in, out, s) +} + +func autoConvert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in *v1beta2.PatchSelectorMatchControlPlaneClass, out *PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + return nil +} + +// Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass is an autogenerated conversion function. +func Convert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in *v1beta2.PatchSelectorMatchControlPlaneClass, out *PatchSelectorMatchControlPlaneClass, s conversion.Scope) error { + return autoConvert_v1beta2_PatchSelectorMatchControlPlaneClass_To_v1beta1_PatchSelectorMatchControlPlaneClass(in, out, s) +} + func autoConvert_v1beta1_PatchSelectorMatchMachineDeploymentClass_To_v1beta2_PatchSelectorMatchMachineDeploymentClass(in *PatchSelectorMatchMachineDeploymentClass, out *v1beta2.PatchSelectorMatchMachineDeploymentClass, s conversion.Scope) error { out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) return nil diff --git a/api/core/v1beta1/zz_generated.deepcopy.go b/api/core/v1beta1/zz_generated.deepcopy.go index a1b090669cb4..f981c0daa218 100644 --- a/api/core/v1beta1/zz_generated.deepcopy.go +++ b/api/core/v1beta1/zz_generated.deepcopy.go @@ -217,6 +217,13 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { (*in).DeepCopyInto(*out) } in.ControlPlane.DeepCopyInto(&out.ControlPlane) + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Workers.DeepCopyInto(&out.Workers) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -2775,6 +2782,11 @@ func (in *PatchSelector) DeepCopy() *PatchSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = *in + if in.ControlPlaneClass != nil { + in, out := &in.ControlPlaneClass, &out.ControlPlaneClass + *out = new(PatchSelectorMatchControlPlaneClass) + (*in).DeepCopyInto(*out) + } if in.MachineDeploymentClass != nil { in, out := &in.MachineDeploymentClass, &out.MachineDeploymentClass *out = new(PatchSelectorMatchMachineDeploymentClass) @@ -2797,6 +2809,26 @@ func (in *PatchSelectorMatch) DeepCopy() *PatchSelectorMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopyInto(out *PatchSelectorMatchControlPlaneClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchControlPlaneClass. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopy() *PatchSelectorMatchControlPlaneClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchControlPlaneClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopyInto(out *PatchSelectorMatchMachineDeploymentClass) { *out = *in diff --git a/api/core/v1beta1/zz_generated.openapi.go b/api/core/v1beta1/zz_generated.openapi.go index 13a78b23719c..2380852ff2a6 100644 --- a/api/core/v1beta1/zz_generated.openapi.go +++ b/api/core/v1beta1/zz_generated.openapi.go @@ -116,6 +116,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchDefinition": schema_cluster_api_api_core_v1beta1_PatchDefinition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelector": schema_cluster_api_api_core_v1beta1_PatchSelector(ref), "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatch": schema_cluster_api_api_core_v1beta1_PatchSelectorMatch(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchControlPlaneClass": schema_cluster_api_api_core_v1beta1_PatchSelectorMatchControlPlaneClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachineDeploymentClass": schema_cluster_api_api_core_v1beta1_PatchSelectorMatchMachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachinePoolClass": schema_cluster_api_api_core_v1beta1_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta1.RemediationStrategy": schema_cluster_api_api_core_v1beta1_RemediationStrategy(ref), @@ -475,6 +476,28 @@ func schema_cluster_api_api_core_v1beta1_ClusterClassSpec(ref common.ReferenceCa Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ControlPlaneClass"), }, }, + "controlPlaneClasses": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "class", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClasses is a list of named control plane classes that can be referenced from the Cluster topology. Each class defines a distinct control plane configuration. The class name MUST be unique within this list. When classes is defined, the Cluster topology can reference a specific control plane class by name. syself new field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ControlPlaneClass"), + }, + }, + }, + }, + }, "workers": { SchemaProps: spec.SchemaProps{ Description: "workers describes the worker nodes for the cluster. It is a collection of node types which can be used to create the worker nodes of the cluster.", @@ -1262,6 +1285,14 @@ func schema_cluster_api_api_core_v1beta1_ControlPlaneClass(ref common.ReferenceC Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class denotes a type of control-plane node present in the cluster. When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology. syself new field.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, "ref": { SchemaProps: spec.SchemaProps{ Description: "ref is a required reference to a custom resource offered by a provider.", @@ -1369,6 +1400,13 @@ func schema_cluster_api_api_core_v1beta1_ControlPlaneTopology(ref common.Referen Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the name of the ControlPlaneClass used to create the set of control plane nodes. This should match one of the control plane classes defined in the ClusterClass object. If left empty `clusterclass.Spec.ControlPlane` is used. syself new field.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the number of control plane nodes. If the value is nil, the ControlPlane object is created without the number of Replicas and it's assumed that the control plane controller does not implement support for this field. When specified against a control plane provider that lacks support for this field, this value will be ignored.", @@ -4925,6 +4963,12 @@ func schema_cluster_api_api_core_v1beta1_PatchSelectorMatch(ref common.Reference Format: "", }, }, + "controlPlaneClass": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClass selects templates referenced in specific ControlPlaneClasses in .spec.controlPlane.classes. syself new field.", + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchControlPlaneClass"), + }, + }, "machineDeploymentClass": { SchemaProps: spec.SchemaProps{ Description: "machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", @@ -4941,7 +4985,35 @@ func schema_cluster_api_api_core_v1beta1_PatchSelectorMatch(ref common.Reference }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachinePoolClass"}, + "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchControlPlaneClass", "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta1.PatchSelectorMatchMachinePoolClass"}, + } +} + +func schema_cluster_api_api_core_v1beta1_PatchSelectorMatchControlPlaneClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchSelectorMatchControlPlaneClass provides a way to target patch operations at templates that are associated with specific ControlPlane classes. In a ClusterClass definition, the .spec.controlPlane.classes field defines one or more named classes, each of which references infrastructure and bootstrap templates. This selector lets you narrow down which of those classes (and therefore which templates) a given patch should apply to, rather than applying the patch to all control plane templates indiscriminately. syself new type", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "names selects templates by class names.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, } } @@ -4949,7 +5021,7 @@ func schema_cluster_api_api_core_v1beta1_PatchSelectorMatchMachineDeploymentClas return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PatchSelectorMatchMachineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", + Description: "PatchSelectorMatchMachineDeploymentClass provides a way to target patch operations at templates associated with specific MachineDeployment classes. In a ClusterClass definition, .spec.workers.machineDeployments defines named classes that each reference infrastructure and bootstrap templates for worker nodes. This selector lets you scope a patch so it only affects the templates tied to particular MachineDeployment classes. syself change in comment.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "names": { diff --git a/api/core/v1beta2/cluster_types.go b/api/core/v1beta2/cluster_types.go index af0969853155..12dfb0b73e36 100644 --- a/api/core/v1beta2/cluster_types.go +++ b/api/core/v1beta2/cluster_types.go @@ -607,6 +607,15 @@ type ControlPlaneTopology struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty,omitzero"` + // class is the name of the ControlPlaneClass used to create the set of control plane nodes. + // This should match one of the control plane classes defined in the ClusterClass object. + // If left empty `clusterclass.Spec.ControlPlane` is used. + // syself new field. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Class string `json:"class,omitempty"` + // replicas is the number of control plane nodes. // If the value is not set, the ControlPlane object is created without the number of Replicas // and it's assumed that the control plane controller does not implement support for this field. diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index 80d78f358553..36177fcd6c86 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -113,6 +113,18 @@ type ClusterClassSpec struct { // +required ControlPlane ControlPlaneClass `json:"controlPlane,omitempty,omitzero"` + // controlPlaneClasses is a list of named control plane classes that can be referenced + // from the Cluster topology. Each class defines a distinct control plane + // configuration. The class name MUST be unique within this list. + // When classes is defined, the Cluster topology can reference a specific + // control plane class by name. + // syself new field. + // +optional + // +listType=map + // +listMapKey=class + // +kubebuilder:validation:MaxItems=100 + ControlPlaneClasses []ControlPlaneClass `json:"controlPlaneClasses,omitempty"` + // workers describes the worker nodes for the cluster. // It is a collection of node types which can be used to create // the worker nodes of the cluster. @@ -160,6 +172,15 @@ type ControlPlaneClass struct { // +optional Metadata ObjectMeta `json:"metadata,omitempty,omitzero"` + // class denotes a type of control-plane node present in the cluster. + // When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + // within the list and can be referenced from the Cluster topology. + // syself new field. + // +optional + // +default="" + // +kubebuilder:validation:MaxLength=1024 + Class string `json:"class,omitempty"` //nolint:kubeapilinter + // templateRef contains the reference to a provider-specific control plane template. // +required TemplateRef ClusterClassTemplateReference `json:"templateRef,omitempty,omitzero"` @@ -1298,6 +1319,12 @@ type PatchSelectorMatch struct { // +optional InfrastructureCluster *bool `json:"infrastructureCluster,omitempty"` + // controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + // .spec.controlPlane.classes. + // syself new field. + // +optional + ControlPlaneClass *PatchSelectorMatchControlPlaneClass `json:"controlPlaneClass,omitempty"` + // machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in // .spec.workers.machineDeployments. // +optional @@ -1309,8 +1336,31 @@ type PatchSelectorMatch struct { MachinePoolClass *PatchSelectorMatchMachinePoolClass `json:"machinePoolClass,omitempty"` } -// PatchSelectorMatchMachineDeploymentClass selects templates referenced -// in specific MachineDeploymentClasses in .spec.workers.machineDeployments. +// PatchSelectorMatchControlPlaneClass provides a way to target patch operations +// at templates that are associated with specific ControlPlane classes. In a +// ClusterClass definition, the .spec.controlPlane.classes field defines one or +// more named classes, each of which references infrastructure and bootstrap +// templates. This selector lets you narrow down which of those classes (and +// therefore which templates) a given patch should apply to, rather than +// applying the patch to all control plane templates indiscriminately. +// syself new type. +type PatchSelectorMatchControlPlaneClass struct { + // names selects templates by class names. + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + Names []string `json:"names,omitempty"` +} + +// PatchSelectorMatchMachineDeploymentClass provides a way to target patch +// operations at templates associated with specific MachineDeployment classes. +// In a ClusterClass definition, .spec.workers.machineDeployments defines named +// classes that each reference infrastructure and bootstrap templates for worker +// nodes. This selector lets you scope a patch so it only affects the templates +// tied to particular MachineDeployment classes. +// syself change in comment. type PatchSelectorMatchMachineDeploymentClass struct { // names selects templates by class names. // +optional diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index 49d1f6655253..5739068a2b99 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -238,6 +238,13 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { } out.Infrastructure = in.Infrastructure in.ControlPlane.DeepCopyInto(&out.ControlPlane) + if in.ControlPlaneClasses != nil { + in, out := &in.ControlPlaneClasses, &out.ControlPlaneClasses + *out = make([]ControlPlaneClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } in.Workers.DeepCopyInto(&out.Workers) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -3577,6 +3584,11 @@ func (in *PatchSelectorMatch) DeepCopyInto(out *PatchSelectorMatch) { *out = new(bool) **out = **in } + if in.ControlPlaneClass != nil { + in, out := &in.ControlPlaneClass, &out.ControlPlaneClass + *out = new(PatchSelectorMatchControlPlaneClass) + (*in).DeepCopyInto(*out) + } if in.MachineDeploymentClass != nil { in, out := &in.MachineDeploymentClass, &out.MachineDeploymentClass *out = new(PatchSelectorMatchMachineDeploymentClass) @@ -3599,6 +3611,26 @@ func (in *PatchSelectorMatch) DeepCopy() *PatchSelectorMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopyInto(out *PatchSelectorMatchControlPlaneClass) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchSelectorMatchControlPlaneClass. +func (in *PatchSelectorMatchControlPlaneClass) DeepCopy() *PatchSelectorMatchControlPlaneClass { + if in == nil { + return nil + } + out := new(PatchSelectorMatchControlPlaneClass) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchSelectorMatchMachineDeploymentClass) DeepCopyInto(out *PatchSelectorMatchMachineDeploymentClass) { *out = *in diff --git a/api/core/v1beta2/zz_generated.openapi.go b/api/core/v1beta2/zz_generated.openapi.go index 6ada26c78cd5..836d7d20027c 100644 --- a/api/core/v1beta2/zz_generated.openapi.go +++ b/api/core/v1beta2/zz_generated.openapi.go @@ -168,6 +168,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchDefinition": schema_cluster_api_api_core_v1beta2_PatchDefinition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelector": schema_cluster_api_api_core_v1beta2_PatchSelector(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatch": schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchControlPlaneClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.Topology": schema_cluster_api_api_core_v1beta2_Topology(ref), @@ -572,6 +573,28 @@ func schema_cluster_api_api_core_v1beta2_ClusterClassSpec(ref common.ReferenceCa Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass"), }, }, + "controlPlaneClasses": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "class", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClasses is a list of named control plane classes that can be referenced from the Cluster topology. Each class defines a distinct control plane configuration. The class name MUST be unique within this list. When classes is defined, the Cluster topology can reference a specific control plane class by name. syself new field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass"), + }, + }, + }, + }, + }, "workers": { SchemaProps: spec.SchemaProps{ Description: "workers describes the worker nodes for the cluster. It is a collection of node types which can be used to create the worker nodes of the cluster.", @@ -1488,6 +1511,14 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneClass(ref common.ReferenceC Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class denotes a type of control-plane node present in the cluster. When used in ControlPlaneTopologyClass.Classes, this name MUST be unique within the list and can be referenced from the Cluster topology. syself new field.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, "templateRef": { SchemaProps: spec.SchemaProps{ Description: "templateRef contains the reference to a provider-specific control plane template.", @@ -1772,6 +1803,13 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneTopology(ref common.Referen Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"), }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the name of the ControlPlaneClass used to create the set of control plane nodes. This should match one of the control plane classes defined in the ClusterClass object. If left empty `clusterclass.Spec.ControlPlane` is used. syself new field.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the number of control plane nodes. If the value is not set, the ControlPlane object is created without the number of Replicas and it's assumed that the control plane controller does not implement support for this field. When specified against a control plane provider that lacks support for this field, this value will be ignored.", @@ -6366,6 +6404,12 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref common.Reference Format: "", }, }, + "controlPlaneClass": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneClass selects templates referenced in specific ControlPlaneClasses in .spec.controlPlane.classes. syself new field.", + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass"), + }, + }, "machineDeploymentClass": { SchemaProps: spec.SchemaProps{ Description: "machineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", @@ -6382,7 +6426,35 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatch(ref common.Reference }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchControlPlaneClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass"}, + } +} + +func schema_cluster_api_api_core_v1beta2_PatchSelectorMatchControlPlaneClass(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchSelectorMatchControlPlaneClass provides a way to target patch operations at templates that are associated with specific ControlPlane classes. In a ClusterClass definition, the .spec.controlPlane.classes field defines one or more named classes, each of which references infrastructure and bootstrap templates. This selector lets you narrow down which of those classes (and therefore which templates) a given patch should apply to, rather than applying the patch to all control plane templates indiscriminately. syself new type.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "names selects templates by class names.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, } } @@ -6390,7 +6462,7 @@ func schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachineDeploymentClas return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PatchSelectorMatchMachineDeploymentClass selects templates referenced in specific MachineDeploymentClasses in .spec.workers.machineDeployments.", + Description: "PatchSelectorMatchMachineDeploymentClass provides a way to target patch operations at templates associated with specific MachineDeployment classes. In a ClusterClass definition, .spec.workers.machineDeployments defines named classes that each reference infrastructure and bootstrap templates for worker nodes. This selector lets you scope a patch so it only affects the templates tied to particular MachineDeployment classes. syself change in comment.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "names": { diff --git a/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go b/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go index 2a9c9aa917bc..791c71ea3ad2 100644 --- a/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go +++ b/api/runtime/hooks/v1alpha1/topologymutation_variable_types.go @@ -141,6 +141,11 @@ type ControlPlaneBuiltins struct { // +optional Name string `json:"name,omitempty"` + // class is the class name of the ControlPlane, + // to which the current template belongs to. + // +optional + Class string `json:"class,omitempty"` + // replicas is the value of the replicas field of the ControlPlane object. // +optional Replicas *int32 `json:"replicas,omitempty"` diff --git a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go index e72d12416126..a7bb47f45e10 100644 --- a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go +++ b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go @@ -1061,6 +1061,13 @@ func schema_api_runtime_hooks_v1alpha1_ControlPlaneBuiltins(ref common.Reference Format: "", }, }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "class is the class name of the ControlPlane, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the value of the replicas field of the ControlPlane object.", diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index 83bd433c2a03..1bab8633a77c 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -43,9 +43,11 @@ import ( secretutil "sigs.k8s.io/cluster-api/util/secret" ) -const clusterTopologyNameKey = "cluster.spec.topology.class" -const clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" -const clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +const ( + clusterTopologyNameKey = "cluster.spec.topology.class" + clusterTopologyNamespaceKey = "cluster.spec.topology.classNamespace" + clusterResourceSetBindingClusterNameKey = "clusterresourcesetbinding.spec.clustername" +) type empty struct{} @@ -523,12 +525,29 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { errs := []error{} _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.Infrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) - _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.TemplateRef.ToObjectReference(cc.Namespace)) - errs = append(errs, err) + + // syself change. + // Fetch inline control plane refs (if defined). + if cc.Spec.ControlPlane.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } _, err = o.fetchRef(ctx, discoveryBackoff, cc.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) + // Fetch refs from named control plane classes. + for _, cpClass := range cc.Spec.ControlPlaneClasses { + if cpClass.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } + if cpClass.MachineInfrastructure.TemplateRef.IsDefined() { + _, err = o.fetchRef(ctx, discoveryBackoff, cpClass.MachineInfrastructure.TemplateRef.ToObjectReference(cc.Namespace)) + errs = append(errs, err) + } + } + for _, mdClass := range cc.Spec.Workers.MachineDeployments { _, err = o.fetchRef(ctx, discoveryBackoff, mdClass.Infrastructure.TemplateRef.ToObjectReference(cc.Namespace)) errs = append(errs, err) diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index dd3ca76f608e..51f968cd2294 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -491,6 +491,14 @@ spec: controlPlane is a reference to a local struct that holds the details for provisioning the Control Plane for the Cluster. properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + type: string machineHealthCheck: description: |- machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. @@ -831,6 +839,371 @@ spec: required: - ref type: object + controlPlaneClasses: + description: |- + controlPlaneClasses is a list of named control plane classes that can be referenced + from the Cluster topology. Each class defines a distinct control plane + configuration. The class name MUST be unique within this list. + When classes is defined, the Cluster topology can reference a specific + control plane class by name. + syself new field. + items: + description: ControlPlaneClass defines the class for the control + plane. + properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + type: string + machineHealthCheck: + description: |- + machineHealthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. + properties: + maxUnhealthy: + anyOf: + - type: integer + - type: string + description: |- + maxUnhealthy specifies the maximum number of unhealthy machines allowed. + Any further remediation is only allowed if at most "maxUnhealthy" machines selected by + "selector" are not healthy. + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: |- + nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + to consider a Machine unhealthy if a corresponding Node isn't associated + through a `Spec.ProviderID` field. + + The duration set in this field is compared to the greatest of: + - Cluster's infrastructure ready condition timestamp (if and when available) + - Control Plane's initialized condition timestamp (if and when available) + - Machine's infrastructure ready condition timestamp (if and when available) + - Machine's metadata creation timestamp + + Defaults to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. + type: string + remediationTemplate: + description: |- + remediationTemplate is a reference to a remediation template + provided by an infrastructure provider. + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + unhealthyConditions: + description: |- + unhealthyConditions contains a list of the conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: |- + UnhealthyCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + minLength: 1 + type: string + timeout: + description: |- + timeout is the duration that a node must be in a given status for, + after which the node is considered unhealthy. + For example, with a value of "1h", the node must match the status + for at least 1 hour before being considered unhealthy. + type: string + type: + description: type of Node condition + minLength: 1 + type: string + required: + - status + - timeout + - type + type: object + maxItems: 100 + type: array + unhealthyRange: + description: |- + unhealthyRange specifies the range of unhealthy machines allowed. + Any further remediation is only allowed if the number of machines selected by "selector" as not healthy + is within the range of "unhealthyRange". Takes precedence over maxUnhealthy. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy machines (and) + (b) there are at most 5 unhealthy machines + maxLength: 32 + minLength: 1 + pattern: ^\[[0-9]+-[0-9]+\]$ + type: string + type: object + machineInfrastructure: + description: |- + machineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. + properties: + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + metadata: + description: |- + metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + This field is supported if and only if the control plane provider template + referenced is Machine based. + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + namingStrategy: + description: namingStrategy allows changing the naming pattern + used when creating the control plane provider object. + properties: + template: + description: |- + template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + maxLength: 1024 + minLength: 1 + type: string + type: object + nodeDeletionTimeout: + description: |- + nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeDrainTimeout: + description: |- + nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + nodeVolumeDetachTimeout: + description: |- + nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. + type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a condition with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + polarity: + description: |- + polarity of the conditionType specified in this readinessGate. + Valid values are Positive, Negative and omitted. + When omitted, the default behaviour will be Positive. + A positive polarity means that the condition should report a true status under normal conditions. + A negative polarity means that the condition should report a false status under normal conditions. + enum: + - Positive + - Negative + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map + ref: + description: |- + ref is a required reference to a custom resource + offered by a provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - ref + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - class + x-kubernetes-list-type: map infrastructure: description: |- infrastructure is a reference to a provider-specific template that holds @@ -1011,6 +1384,22 @@ spec: Note: this will match the controlPlane and also the controlPlane machineInfrastructure (depending on the kind and apiVersion). type: boolean + controlPlaneClass: + description: |- + controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + .spec.controlPlane.classes. + syself new field. + properties: + names: + description: names selects templates by class + names. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + type: array + type: object infrastructureCluster: description: infrastructureCluster selects templates referenced in .spec.infrastructure. @@ -2901,6 +3290,14 @@ spec: controlPlane is a reference to a local struct that holds the details for provisioning the Control Plane for the Cluster. properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + type: string deletion: description: deletion contains configuration options for Machine deletion. @@ -3261,6 +3658,391 @@ spec: required: - templateRef type: object + controlPlaneClasses: + description: |- + controlPlaneClasses is a list of named control plane classes that can be referenced + from the Cluster topology. Each class defines a distinct control plane + configuration. The class name MUST be unique within this list. + When classes is defined, the Cluster topology can reference a specific + control plane class by name. + syself new field. + items: + description: ControlPlaneClass defines the class for the control + plane. + properties: + class: + default: "" + description: |- + class denotes a type of control-plane node present in the cluster. + When used in ControlPlaneTopologyClass.Classes, this name MUST be unique + within the list and can be referenced from the Cluster topology. + syself new field. + type: string + deletion: + description: deletion contains configuration options for Machine + deletion. + minProperties: 1 + properties: + nodeDeletionTimeoutSeconds: + description: |- + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine + hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + Defaults to 10 seconds. + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: + description: |- + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. + The default value is 0, meaning that the node can be drained without any time limitations. + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: + description: |- + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes + to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + NOTE: This value can be overridden while defining a Cluster.Topology. + format: int32 + minimum: 0 + type: integer + type: object + healthCheck: + description: |- + healthCheck defines a MachineHealthCheck for this ControlPlaneClass. + This field is supported if and only if the ControlPlane provider template + referenced above is Machine based and supports setting replicas. + minProperties: 1 + properties: + checks: + description: |- + checks are the checks that are used to evaluate if a Machine is healthy. + + Independent of this configuration the MachineHealthCheck controller will always + flag Machines with `cluster.x-k8s.io/remediate-machine` annotation and + Machines with deleted Nodes as unhealthy. + + Furthermore, if checks.nodeStartupTimeoutSeconds is not set it + is defaulted to 10 minutes and evaluated accordingly. + minProperties: 1 + properties: + nodeStartupTimeoutSeconds: + description: |- + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck + to consider a Machine unhealthy if a corresponding Node isn't associated + through a `Spec.ProviderID` field. + + The duration set in this field is compared to the greatest of: + - Cluster's infrastructure ready condition timestamp (if and when available) + - Control Plane's initialized condition timestamp (if and when available) + - Machine's infrastructure ready condition timestamp (if and when available) + - Machine's metadata creation timestamp + + Defaults to 10 minutes. + If you wish to disable this feature, set the value explicitly to 0. + format: int32 + minimum: 0 + type: integer + unhealthyNodeConditions: + description: |- + unhealthyNodeConditions contains a list of conditions that determine + whether a node is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: |- + UnhealthyNodeCondition represents a Node condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a node is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + minLength: 1 + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a node must be in a given status for, + after which the node is considered unhealthy. + For example, with a value of "1h", the node must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Node condition + minLength: 1 + type: string + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + type: object + remediation: + description: |- + remediation configures if and how remediations are triggered if a Machine is unhealthy. + + If remediation or remediation.triggerIf is not set, + remediation will always be triggered for unhealthy Machines. + + If remediation or remediation.templateRef is not set, + the OwnerRemediated condition will be set on unhealthy Machines to trigger remediation via + the owner of the Machines, for example a MachineSet or a KubeadmControlPlane. + minProperties: 1 + properties: + templateRef: + description: |- + templateRef is a reference to a remediation template + provided by an infrastructure provider. + + This field is completely optional, when filled, the MachineHealthCheck controller + creates a new object from the template referenced and hands off remediation of the machine to + a controller that lives outside of Cluster API. + properties: + apiVersion: + description: |- + apiVersion of the remediation template. + apiVersion must be fully qualified domain name followed by / and a version. + NOTE: This field must be kept in sync with the APIVersion of the remediation template. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the remediation template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the remediation template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + triggerIf: + description: |- + triggerIf configures if remediations are triggered. + If this field is not set, remediations are always triggered. + minProperties: 1 + properties: + unhealthyInRange: + description: |- + unhealthyInRange specifies that remediations are only triggered if the number of + unhealthy Machines is in the configured range. + Takes precedence over unhealthyLessThanOrEqualTo. + Eg. "[3-5]" - This means that remediation will be allowed only when: + (a) there are at least 3 unhealthy Machines (and) + (b) there are at most 5 unhealthy Machines + maxLength: 32 + minLength: 1 + pattern: ^\[[0-9]+-[0-9]+\]$ + type: string + unhealthyLessThanOrEqualTo: + anyOf: + - type: integer + - type: string + description: |- + unhealthyLessThanOrEqualTo specifies that remediations are only triggered if the number of + unhealthy Machines is less than or equal to the configured value. + unhealthyInRange takes precedence if set. + x-kubernetes-int-or-string: true + type: object + type: object + type: object + machineInfrastructure: + description: |- + machineInfrastructure defines the metadata and infrastructure information + for control plane machines. + + This field is supported if and only if the control plane provider template + referenced above is Machine based and supports setting replicas. + properties: + templateRef: + description: templateRef is a required reference to the + template for a MachineInfrastructure of a ControlPlane. + properties: + apiVersion: + description: |- + apiVersion of the template. + apiVersion must be fully qualified domain name followed by / and a version. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - templateRef + type: object + metadata: + description: |- + metadata is the metadata applied to the ControlPlane and the Machines of the ControlPlane + if the ControlPlaneTemplate referenced is machine based. If not, it is applied only to the + ControlPlane. + At runtime this metadata is merged with the corresponding metadata from the topology. + + This field is supported if and only if the control plane provider template + referenced is Machine based. + minProperties: 1 + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + naming: + description: naming allows changing the naming pattern used + when creating the control plane provider object. + minProperties: 1 + properties: + template: + description: |- + template defines the template to use for generating the name of the ControlPlane object. + If not defined, it will fallback to `{{ .cluster.name }}-{{ .random }}`. + If the templated string exceeds 63 characters, it will be trimmed to 58 characters and will + get concatenated with a random suffix of length 5. + The templating mechanism provides the following arguments: + * `.cluster.name`: The name of the cluster object. + * `.random`: A random alphanumeric string, without vowels, of length 5. + maxLength: 1024 + minLength: 1 + type: string + type: object + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a condition with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + polarity: + description: |- + polarity of the conditionType specified in this readinessGate. + Valid values are Positive, Negative and omitted. + When omitted, the default behaviour will be Positive. + A positive polarity means that the condition should report a true status under normal conditions. + A negative polarity means that the condition should report a false status under normal conditions. + enum: + - Positive + - Negative + type: string + required: + - conditionType + type: object + maxItems: 32 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map + templateRef: + description: templateRef contains the reference to a provider-specific + control plane template. + properties: + apiVersion: + description: |- + apiVersion of the template. + apiVersion must be fully qualified domain name followed by / and a version. + maxLength: 317 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + kind: + description: |- + kind of the template. + kind must consist of alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + name of the template. + name must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - templateRef + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - class + x-kubernetes-list-type: map infrastructure: description: |- infrastructure is a reference to a local struct that holds the details @@ -3436,6 +4218,22 @@ spec: Note: this will match the controlPlane and also the controlPlane machineInfrastructure (depending on the kind and apiVersion). type: boolean + controlPlaneClass: + description: |- + controlPlaneClass selects templates referenced in specific ControlPlaneClasses in + .spec.controlPlane.classes. + syself new field. + properties: + names: + description: names selects templates by class + names. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + type: array + type: object infrastructureCluster: description: infrastructureCluster selects templates referenced in .spec.infrastructure. diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 6bae359e86fa..df53e2aef5f9 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -980,6 +980,15 @@ spec: controlPlane: description: controlPlane describes the cluster control plane. properties: + class: + description: |- + class is the name of the ControlPlaneClass used to create the set of control plane nodes. + This should match one of the control plane classes defined in the ClusterClass object. + If left empty `clusterclass.Spec.ControlPlane` is used. + syself new field. + maxLength: 256 + minLength: 1 + type: string machineHealthCheck: description: |- machineHealthCheck allows to enable, disable and override @@ -2466,6 +2475,15 @@ spec: description: controlPlane describes the cluster control plane. minProperties: 1 properties: + class: + description: |- + class is the name of the ControlPlaneClass used to create the set of control plane nodes. + This should match one of the control plane classes defined in the ClusterClass object. + If left empty `clusterclass.Spec.ControlPlane` is used. + syself new field. + maxLength: 256 + minLength: 1 + type: string deletion: description: deletion contains configuration options for Machine deletion. diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index c0f35f29b0dc..6903e54fcc38 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -245,7 +245,11 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu // that should be referenced by the ControlPlane object. func (g *generator) computeControlPlaneInfrastructureMachineTemplate(ctx context.Context, s *scope.Scope) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.InfrastructureMachineTemplate - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) + + // syself change + // + // no nil check for s.BluePrint.ControlPlaneClass because we already resolved the reference in internal/controllers/topology/cluster/blueprint.go 's resolveControlPlaneClass function. + templateClonedFromRef := s.Blueprint.ControlPlaneClass.MachineInfrastructure.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) cluster := s.Current.Cluster // Check if the current control plane object has a machineTemplate.infrastructureRef already defined. @@ -290,7 +294,9 @@ func (g *generator) computeControlPlaneInfrastructureMachineTemplate(ctx context // corresponding template defined in the blueprint. func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template - templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) + + // syself change + templateClonedFromRef := s.Blueprint.ControlPlaneClass.TemplateRef.ToObjectReference(s.Blueprint.ClusterClass.Namespace) cluster := s.Current.Cluster currentRef := cluster.Spec.ControlPlaneRef @@ -298,7 +304,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // We merge the labels and annotations from topology and ClusterClass. // We also add the cluster-name and the topology owned labels, so they are propagated down. topologyMetadata := s.Blueprint.Topology.ControlPlane.Metadata - clusterClassMetadata := s.Blueprint.ClusterClass.Spec.ControlPlane.Metadata + clusterClassMetadata := s.Blueprint.ControlPlaneClass.Metadata controlPlaneLabels := util.MergeMap(topologyMetadata.Labels, clusterClassMetadata.Labels) if controlPlaneLabels == nil { @@ -310,8 +316,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations) nameTemplate := "{{ .cluster.name }}-{{ .random }}" - if s.Blueprint.ClusterClass.Spec.ControlPlane.Naming.Template != "" { - nameTemplate = s.Blueprint.ClusterClass.Spec.ControlPlane.Naming.Template + if s.Blueprint.ControlPlaneClass.Naming.Template != "" { + nameTemplate = s.Blueprint.ControlPlaneClass.Naming.Template } controlPlane, err := templateToObject(templateToInput{ @@ -411,14 +417,15 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.Topology.ControlPlane.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Path()) } - } else if s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates != nil { - if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates); err != nil { + // syself change + } else if s.Blueprint.ControlPlaneClass.ReadinessGates != nil { + if err := contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Set(controlPlane, s.Blueprint.ControlPlaneClass.ReadinessGates); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates(contractVersion).Path()) } } - // If it is required to manage the NodeDrainTimeoutSeconds for the control plane, set the corresponding field. - nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeDrainTimeoutSeconds + // If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field. + nodeDrainTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeDrainTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeDrainTimeoutSeconds != nil { nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeDrainTimeoutSeconds } @@ -434,8 +441,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } } - // If it is required to manage the NodeVolumeDetachTimeoutSeconds for the control plane, set the corresponding field. - nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds + // If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field. + nodeVolumeDetachTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeVolumeDetachTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds != nil { nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeVolumeDetachTimeoutSeconds } @@ -451,8 +458,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } } - // If it is required to manage the NodeDeletionTimeoutSeconds for the control plane, set the corresponding field. - nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.Deletion.NodeDeletionTimeoutSeconds + // If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field. + nodeDeletionTimeout := s.Blueprint.ControlPlaneClass.Deletion.NodeDeletionTimeoutSeconds if s.Blueprint.Topology.ControlPlane.Deletion.NodeDeletionTimeoutSeconds != nil { nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.Deletion.NodeDeletionTimeoutSeconds } diff --git a/exp/topology/scope/blueprint.go b/exp/topology/scope/blueprint.go index 15c20fe3c597..81f96a356e2d 100644 --- a/exp/topology/scope/blueprint.go +++ b/exp/topology/scope/blueprint.go @@ -31,6 +31,12 @@ type ClusterBlueprint struct { // ClusterClass holds the ClusterClass object referenced from Cluster.Spec.Topology. ClusterClass *clusterv1.ClusterClass + // syself change + // ControlPlaneClass holds the resolved ControlPlaneClass from the ClusterClass. + // This is the ControlPlaneClass selected based on the Cluster topology's control plane class field. + // If the topology does not specify a class, this is the inline ControlPlaneClass from ClusterClass.Spec.ControlPlane. + ControlPlaneClass *clusterv1.ControlPlaneClass + // InfrastructureClusterTemplate holds the InfrastructureClusterTemplate referenced from ClusterClass. InfrastructureClusterTemplate *unstructured.Unstructured @@ -93,7 +99,12 @@ type MachinePoolBlueprint struct { // HasControlPlaneInfrastructureMachine checks whether the clusterClass mandates the controlPlane has infrastructureMachines. func (b *ClusterBlueprint) HasControlPlaneInfrastructureMachine() bool { - return b.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() + // syself change. + if b.ControlPlaneClass == nil { + return b.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() + } + + return b.ControlPlaneClass.MachineInfrastructure.TemplateRef.IsDefined() } // IsControlPlaneMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the control plane. @@ -102,18 +113,37 @@ func (b *ClusterBlueprint) IsControlPlaneMachineHealthCheckEnabled() bool { if !b.HasControlPlaneInfrastructureMachine() { return false } - // If no MachineHealthCheck is defined in the ClusterClass or in the Cluster Topology then return false. - if !b.ClusterClass.Spec.ControlPlane.HealthCheck.IsDefined() && !b.Topology.ControlPlane.HealthCheck.IsDefined() { - return false + + // syself change. + // If no MachineHealthCheck is defined in the resolved ControlPlaneClass or in the Cluster Topology then return false. + cpClassMHC := b.controlPlaneClassMachineHealthCheck() + if !b.Topology.ControlPlane.HealthCheck.IsDefined() { + if cpClassMHC == nil { + return false + } + if !cpClassMHC.IsDefined() { + return false + } } + // If `enable` is not set then consider it as true. A MachineHealthCheck will be created from either ClusterClass or Cluster Topology. if b.Topology.ControlPlane.HealthCheck.Enabled == nil { return true } + // If `enable` is explicitly set, use the value. return *b.Topology.ControlPlane.HealthCheck.Enabled } +// controlPlaneClassMachineHealthCheck returns the MachineHealthCheck from the resolved ControlPlaneClass. +// syself change. +func (b *ClusterBlueprint) controlPlaneClassMachineHealthCheck() *clusterv1.ControlPlaneClassHealthCheck { + if b.ControlPlaneClass == nil { + return &b.ClusterClass.Spec.ControlPlane.HealthCheck + } + return &b.ControlPlaneClass.HealthCheck +} + // ControlPlaneMachineHealthCheckClass returns the MachineHealthCheckClass that should be used to create the MachineHealthCheck object. func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() (clusterv1.MachineHealthCheckChecks, clusterv1.MachineHealthCheckRemediation) { if b.Topology.ControlPlane.HealthCheck.IsDefined() { @@ -141,9 +171,19 @@ func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() (clusterv1.Mach } } -// HasControlPlaneMachineHealthCheck returns true if the ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. +// HasControlPlaneMachineHealthCheck returns true if the resolved ControlPlaneClass has both MachineInfrastructure and a MachineHealthCheck defined. func (b *ClusterBlueprint) HasControlPlaneMachineHealthCheck() bool { - return b.HasControlPlaneInfrastructureMachine() && b.ClusterClass.Spec.ControlPlane.HealthCheck.IsDefined() + // syself change. + if !b.HasControlPlaneInfrastructureMachine() { + return false + } + + mhc := b.controlPlaneClassMachineHealthCheck() + if mhc == nil { + return false + } + + return mhc.IsDefined() } // IsMachineDeploymentMachineHealthCheckEnabled returns true if a MachineHealthCheck should be created for the MachineDeployment. diff --git a/internal/api/core/v1alpha4/zz_generated.conversion.go b/internal/api/core/v1alpha4/zz_generated.conversion.go index 308061fec55c..7b8bf57e5313 100644 --- a/internal/api/core/v1alpha4/zz_generated.conversion.go +++ b/internal/api/core/v1alpha4/zz_generated.conversion.go @@ -678,6 +678,7 @@ func autoConvert_v1beta2_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in *v1bet if err := Convert_v1beta2_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(&in.ControlPlane, &out.ControlPlane, s); err != nil { return err } + // WARNING: in.ControlPlaneClasses requires manual conversion: does not exist in peer-type if err := Convert_v1beta2_WorkersClass_To_v1alpha4_WorkersClass(&in.Workers, &out.Workers, s); err != nil { return err } @@ -864,6 +865,7 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in *v1b if err := Convert_v1beta2_ObjectMeta_To_v1alpha4_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + // WARNING: in.Class requires manual conversion: does not exist in peer-type // WARNING: in.TemplateRef requires manual conversion: does not exist in peer-type // WARNING: in.MachineInfrastructure requires manual conversion: inconvertible types (sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassMachineInfrastructureTemplate vs *sigs.k8s.io/cluster-api/internal/api/core/v1alpha4.LocalObjectTemplate) // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type @@ -890,6 +892,7 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(i if err := Convert_v1beta2_ObjectMeta_To_v1alpha4_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err } + // WARNING: in.Class requires manual conversion: does not exist in peer-type out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.Deletion requires manual conversion: does not exist in peer-type diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 2d6d1780cc30..9d0c4de8e934 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -97,7 +97,6 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt ). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)). Complete(r) - if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -219,6 +218,14 @@ func (r *Reconciler) reconcileExternalReferences(ctx context.Context, s *scope) clusterClass.Spec.ControlPlane.TemplateRef, } refs = append(refs, clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef) + + // Also collect refs from ControlPlaneClasses so that ownership and API version checks + // are applied to templates referenced by named control plane classes. + // syself change. + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + refs = append(refs, cpClass.TemplateRef, cpClass.MachineInfrastructure.TemplateRef) + } + for _, mdClass := range clusterClass.Spec.Workers.MachineDeployments { refs = append(refs, mdClass.Bootstrap.TemplateRef, mdClass.Infrastructure.TemplateRef) } @@ -397,7 +404,8 @@ func addNewStatusVariable(variable clusterv1.ClusterClassVariable, from string) DeprecatedV1Beta1Metadata: variable.DeprecatedV1Beta1Metadata, Schema: variable.Schema, }, - }} + }, + } } func addDefinitionToExistingStatusVariable(variable clusterv1.ClusterClassVariable, from string, existingVariable *clusterv1.ClusterClassStatusVariable) *clusterv1.ClusterClassStatusVariable { diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index e1f95b0e4738..1d0cc42fde78 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -24,6 +24,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/exp/topology/scope" + topologyselectors "sigs.k8s.io/cluster-api/internal/topology/selectors" ) // getBlueprint gets a ClusterBlueprint with the ClusterClass and the referenced templates to be used for a managed Cluster topology. @@ -44,16 +45,26 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } - // Get ClusterClass.spec.controlPlane. + // syself change + // Resolve the ControlPlaneClass to use. + // If the Cluster topology specifies a control plane class, look it up from ClusterClass.spec.controlPlane.classes. + // Otherwise, fall back to the inline ClusterClass.spec.controlPlane definition. + controlPlaneClass, err := topologyselectors.ResolveControlPlaneClass(cluster, clusterClass) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve control plane class for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) + } + + blueprint.ControlPlaneClass = controlPlaneClass + blueprint.ControlPlane = &scope.ControlPlaneBlueprint{} - blueprint.ControlPlane.Template, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.TemplateRef.ToObjectReference(clusterClass.Namespace)) + blueprint.ControlPlane.Template, err = r.getReference(ctx, controlPlaneClass.TemplateRef.ToObjectReference(clusterClass.Namespace)) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } // If the clusterClass mandates the controlPlane has infrastructureMachines, read it. if blueprint.HasControlPlaneInfrastructureMachine() { - blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.ToObjectReference(clusterClass.Namespace)) + blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, controlPlaneClass.MachineInfrastructure.TemplateRef.ToObjectReference(clusterClass.Namespace)) if err != nil { return nil, errors.Wrapf(err, "failed to get control plane's machine template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } @@ -61,7 +72,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // If the clusterClass defines a valid MachineHealthCheck (including a defined MachineInfrastructure) set the blueprint MachineHealthCheck. if blueprint.HasControlPlaneMachineHealthCheck() { - blueprint.ControlPlane.HealthCheck = blueprint.ClusterClass.Spec.ControlPlane.HealthCheck + blueprint.ControlPlane.HealthCheck = blueprint.ControlPlaneClass.HealthCheck } // Loop over the machine deployments classes in ClusterClass diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 60f524e6e6e8..5c4197b0edee 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -175,8 +175,9 @@ func addVariablesForPatch(blueprint *scope.ClusterBlueprint, desired *scope.Clus } req.Variables = globalVariables + // syself change // Calculate the Control Plane variables. - controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, patchVariableDefinitions) + controlPlaneVariables, err := variables.ControlPlane(&blueprint.Topology.ControlPlane, desired.ControlPlane.Object, desired.ControlPlane.InfrastructureMachineTemplate, blueprint.Topology.ControlPlane.Class, patchVariableDefinitions) if err != nil { return errors.Wrapf(err, "failed to calculate ControlPlane variables") } diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 9fccf0a9ba19..8cd40a9ed5ea 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -161,6 +161,36 @@ func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVar } } + // ControlPlaneClass selector targets templates belonging to a specific named control plane class + // from ClusterClass.spec.controlPlaneClasses. It reads the controlPlane.class variable set on the + // template to determine which class the template belongs to, then checks whether that class name matches + // one of the names listed in the selector (exact match, or wildcard prefix/suffix with "*"). + // This mirrors how MachineDeploymentClass selectors work for worker node templates. + if selector.MatchResources.ControlPlaneClass != nil { + if (req.HolderReference.Kind == "Cluster" && req.HolderReference.FieldPath == "spec.controlPlaneRef") || + req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), ".") { + // Read the builtin.controlPlane.class variable. + templateCPClassJSON, err := patchvariables.GetVariableValue(templateVariables, "builtin.controlPlane.class") + + // If the builtin variable could be read. + if err == nil { + // If templateCPClass matches one of the configured ControlPlaneClasses. + for _, cpClass := range selector.MatchResources.ControlPlaneClass.Names { + if cpClass == "*" || string(templateCPClassJSON.Raw) == strconv.Quote(cpClass) { + return true + } + unquoted, _ := strconv.Unquote(string(templateCPClassJSON.Raw)) + if strings.HasPrefix(cpClass, "*") && strings.HasSuffix(unquoted, strings.TrimPrefix(cpClass, "*")) { + return true + } + if strings.HasSuffix(cpClass, "*") && strings.HasPrefix(unquoted, strings.TrimSuffix(cpClass, "*")) { + return true + } + } + } + } + } + // Check if the request is for a BootstrapConfigTemplate or an InfrastructureMachineTemplate // of one of the configured MachineDeploymentClasses. if selector.MatchResources.MachineDeploymentClass != nil { diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index 68d08f6166fe..a150ac202c85 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -104,7 +104,7 @@ func Global(clusterTopology clusterv1.Topology, cluster *clusterv1.Cluster, patc } // ControlPlane returns variables that apply to templates belonging to the ControlPlane. -func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { +func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructureMachineTemplate *unstructured.Unstructured, controlPlaneClass string, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { variables := []runtimehooksv1.Variable{} // Add variables overrides for the ControlPlane. @@ -115,10 +115,12 @@ func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructu } } + // syself change // Construct builtin variable. builtin := runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ - Name: cp.GetName(), + Name: cp.GetName(), + Class: controlPlaneClass, }, } diff --git a/internal/controllers/topology/cluster/patches/variables/variables_test.go b/internal/controllers/topology/cluster/patches/variables/variables_test.go index 08a01a2e0192..38207adf6411 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables_test.go +++ b/internal/controllers/topology/cluster/patches/variables/variables_test.go @@ -658,7 +658,7 @@ func TestControlPlane(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, tt.variableDefinitionsForPatch) + got, err := ControlPlane(tt.controlPlaneTopology, tt.controlPlane, tt.controlPlaneInfrastructureMachineTemplate, "", tt.variableDefinitionsForPatch) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeComparableTo(tt.want)) }) diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 5856be641704..0d7511af35ae 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -343,13 +343,36 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) } } - // Create or update the MachineInfrastructureTemplate of the control plane. + // syself change: determine if control plane class has changed. + currentCPInfraMachineTemplate := s.Current.ControlPlane.InfrastructureMachineTemplate + cpInfraKindChanged := false + if s.Current.ControlPlane.InfrastructureMachineTemplate != nil && + s.Desired.ControlPlane.InfrastructureMachineTemplate != nil && + s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind() != s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind() { + cpInfraKindChanged = true + log.Info( + "Control plane infrastructure kind changed", + "currentKind", s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind(), + "desiredKind", s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), + ) + + // Setting currentCPInfraMachineTemplate as nil so that method reconcileReferencedTemplate do not + // try to patch the existing template. Otherwise patching will fail as we cannot patch the `Kind` + // of an object. + currentCPInfraMachineTemplate = nil + } + + compatibilityChecker := check.ObjectsAreCompatible + if cpInfraKindChanged { + compatibilityChecker = check.ObjectsAreInTheSameNamespace + } + createdInfrastructureTemplate, err := r.reconcileReferencedTemplate(ctx, reconcileReferencedTemplateInput{ cluster: s.Current.Cluster, ref: cpInfraRef, - current: s.Current.ControlPlane.InfrastructureMachineTemplate, + current: currentCPInfraMachineTemplate, desired: s.Desired.ControlPlane.InfrastructureMachineTemplate, - compatibilityChecker: check.ObjectsAreCompatible, + compatibilityChecker: compatibilityChecker, templateNamePrefix: topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name), }) if err != nil { @@ -1219,10 +1242,15 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile return true, nil } + // syself change + // ObjectsAreStrictlyCompatible is intentionally skipped here. + // When switching a cluster's control plane class (e.g. from hcloud VMs to bare-metal), + // the InfrastructureMachineTemplate kind changes, which would fail the strict compatibility check. + // Skipping it allows rolling a control plane from one infrastructure type to another. // Check if the current and desired referenced object are compatible. - if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { - return false, allErrs.ToAggregate() - } + // if allErrs := check.ObjectsAreStrictlyCompatible(in.current, in.desired); len(allErrs) > 0 { + // return false, allErrs.ToAggregate() + // } log = log.WithValues(in.current.GetKind(), klog.KObj(in.current)) ctx = ctrl.LoggerInto(ctx, log) diff --git a/internal/topology/check/compatibility.go b/internal/topology/check/compatibility.go index 80c7fa4c970c..b05f837c7f4c 100644 --- a/internal/topology/check/compatibility.go +++ b/internal/topology/check/compatibility.go @@ -177,13 +177,30 @@ func ClusterClassesAreCompatible(current, desired *clusterv1.ClusterClass) field field.NewPath("spec", "infrastructure", "templateRef"))...) // Validate control plane changes desired a compatible way. - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.TemplateRef, desired.Spec.ControlPlane.TemplateRef, - field.NewPath("spec", "controlPlane", "templateRef"))...) - if desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { - allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, - field.NewPath("spec", "controlPlane", "machineInfrastructure", "templateRef"))...) + // syself change. + if current.Spec.ControlPlane.TemplateRef.IsDefined() && desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.TemplateRef, desired.Spec.ControlPlane.TemplateRef, + field.NewPath("spec", "controlPlane"))...) + if desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() && current.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(current.Spec.ControlPlane.MachineInfrastructure.TemplateRef, desired.Spec.ControlPlane.MachineInfrastructure.TemplateRef, + field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) + } } + // Validate named control plane class changes in a compatible way. + // syself change. + for _, desiredClass := range desired.Spec.ControlPlaneClasses { + for i, currentClass := range current.Spec.ControlPlaneClasses { + if desiredClass.Class == currentClass.Class { + classPath := field.NewPath("spec", "controlPlaneClasses").Index(i) + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(currentClass.TemplateRef, desiredClass.TemplateRef, classPath)...) + if desiredClass.MachineInfrastructure.TemplateRef.IsDefined() && currentClass.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateAreCompatible(currentClass.MachineInfrastructure.TemplateRef, desiredClass.MachineInfrastructure.TemplateRef, + classPath.Child("machineInfrastructure"))...) + } + } + } + } // Validate changes to MachineDeployments. allErrs = append(allErrs, MachineDeploymentClassesAreCompatible(current, desired)...) @@ -215,6 +232,26 @@ func MachineDeploymentClassesAreCompatible(current, desired *clusterv1.ClusterCl return allErrs } +// ControlPlaneClassesAreUnique checks that no two ControlPlaneClasses in a ClusterClass share a name. +// syself change. +func ControlPlaneClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + classes := sets.Set[string]{} + for i, class := range clusterClass.Spec.ControlPlaneClasses { + if classes.Has(class.Class) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "controlplane", "classes").Index(i).Child("class"), + class.Class, + fmt.Sprintf("ControlPlane class must be unique. ControlPlane with class %q is defined more than once", class.Class), + ), + ) + } + classes.Insert(class.Class) + } + return allErrs +} + // MachineDeploymentClassesAreUnique checks that no two MachineDeploymentClasses in a ClusterClass share a name. func MachineDeploymentClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -371,8 +408,33 @@ func MachinePoolTopologiesAreValidAndDefinedInClusterClass(desired *clusterv1.Cl return allErrs } -// ClusterClassTemplatesAreValid checks that each template reference in the ClusterClass is valid . -func ClusterClassTemplatesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { +// ControlPlaneTopologyClassIsDefinedInClusterClass checks that the control plane class referenced +// in the Cluster topology (if set) is defined in the ClusterClass. +// syself change. +func ControlPlaneTopologyClassIsDefinedInClusterClass(desired *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + cpClass := desired.Spec.Topology.ControlPlane.Class + if cpClass == "" { + return nil + } + for _, class := range clusterClass.Spec.ControlPlaneClasses { + if class.Class == cpClass { + return nil + } + } + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "topology", "controlPlane", "class"), + cpClass, + fmt.Sprintf("ControlPlaneClass with name %q does not exist in ClusterClass %q", + cpClass, clusterClass.Name), + ), + ) + return allErrs +} + +// ClusterClassReferencesAreValid checks that each template reference in the ClusterClass is valid. +func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, ClusterClassTemplateIsValid(clusterClass.Spec.Infrastructure.TemplateRef, field.NewPath("spec", "infrastructure"))...) @@ -381,6 +443,16 @@ func ClusterClassTemplatesAreValid(clusterClass *clusterv1.ClusterClass) field.E allErrs = append(allErrs, ClusterClassTemplateIsValid(clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef, field.NewPath("spec", "controlPlane", "machineInfrastructure"))...) } + // validate each named control plane class. + // syself change. + for i, cpc := range clusterClass.Spec.ControlPlaneClasses { + classPath := field.NewPath("spec", "controlPlane", "controlPlaneClasses").Index(i) + allErrs = append(allErrs, ClusterClassTemplateIsValid(cpc.TemplateRef, classPath)...) + if cpc.MachineInfrastructure.TemplateRef.IsDefined() { + allErrs = append(allErrs, ClusterClassTemplateIsValid(cpc.MachineInfrastructure.TemplateRef, classPath.Child("machineInfrastructure"))...) + } + } + for i := range clusterClass.Spec.Workers.MachineDeployments { mdc := clusterClass.Spec.Workers.MachineDeployments[i] allErrs = append(allErrs, ClusterClassTemplateIsValid(mdc.Bootstrap.TemplateRef, field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("template", "bootstrap"))...) diff --git a/internal/topology/check/compatibility_test.go b/internal/topology/check/compatibility_test.go index e5373b5dadbc..e8fcb03ec84a 100644 --- a/internal/topology/check/compatibility_test.go +++ b/internal/topology/check/compatibility_test.go @@ -924,6 +924,74 @@ func TestMachinePoolClassesAreCompatible(t *testing.T) { } } +// syself change. +func TestControlPlaneClassesAreUnique(t *testing.T) { + tests := []struct { + name string + clusterClass *clusterv1.ClusterClass + wantErr bool + }{ + { + name: "pass if ControlPlaneClasses are unique", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "bb"}, + }, + }, + }, + wantErr: false, + }, + { + name: "pass if no ControlPlaneClasses are defined", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlane: clusterv1.ControlPlaneClass{}, + }, + }, + wantErr: false, + }, + { + name: "fail if ControlPlaneClasses are duplicated", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + { + name: "fail if multiple ControlPlaneClasses are identical", + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + ControlPlaneClasses: []clusterv1.ControlPlaneClass{ + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + {Class: "aa"}, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + allErrs := ControlPlaneClassesAreUnique(tt.clusterClass) + if tt.wantErr { + g.Expect(allErrs).ToNot(BeEmpty()) + return + } + g.Expect(allErrs).To(BeEmpty()) + }) + } +} + func TestMachineDeploymentClassesAreUnique(t *testing.T) { tests := []struct { name string @@ -1494,7 +1562,7 @@ func TestMachinePoolTopologiesAreUniqueAndDefinedInClusterClass(t *testing.T) { } } -func TestClusterClassTemplatesAreValid(t *testing.T) { +func TestClusterClassReferencesAreValid(t *testing.T) { ref := &clusterv1.ClusterClassTemplateReference{ APIVersion: "group.test.io/foo", Kind: "barTemplate", @@ -1629,7 +1697,7 @@ func TestClusterClassTemplatesAreValid(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - allErrs := ClusterClassTemplatesAreValid(tt.clusterClass) + allErrs := ClusterClassReferencesAreValid(tt.clusterClass) if tt.wantErr { g.Expect(allErrs).ToNot(BeEmpty()) return diff --git a/internal/topology/selectors/selectors.go b/internal/topology/selectors/selectors.go index 510257a9391d..0f0ab1f0fbd9 100644 --- a/internal/topology/selectors/selectors.go +++ b/internal/topology/selectors/selectors.go @@ -18,6 +18,8 @@ limitations under the License. package selectors import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -27,11 +29,32 @@ import ( func ForMachineDeploymentMHC(md *clusterv1.MachineDeployment) *metav1.LabelSelector { // The selector returned here is the minimal common selector for all MachineSets belonging to a MachineDeployment. // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. - return &metav1.LabelSelector{MatchLabels: map[string]string{ - clusterv1.ClusterTopologyOwnedLabel: "", - clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], - }, + return &metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], + }, + } +} + +// ResolveControlPlaneClass returns the ControlPlaneClass to use for the given Cluster. +// If the topology specifies a control plane class name, it is looked up from +// ClusterClass.spec.controlPlaneClasses. Otherwise the inline ClusterClass.spec.controlPlane is used. +// syself change. +func ResolveControlPlaneClass(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) (*clusterv1.ControlPlaneClass, error) { + if cluster.Spec.Topology.ControlPlane.Class == "" { + return &clusterClass.Spec.ControlPlane, nil + } + for i := range clusterClass.Spec.ControlPlaneClasses { + if clusterClass.Spec.ControlPlaneClasses[i].Class == cluster.Spec.Topology.ControlPlane.Class { + return &clusterClass.Spec.ControlPlaneClasses[i], nil + } } + return nil, fmt.Errorf("control plane class %q not found in ClusterClass %s/%s", + cluster.Spec.Topology.ControlPlane.Class, + clusterClass.Namespace, + clusterClass.Name, + ) } // ForControlPlaneMHC generates a selector for control plane MHCs. diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 706334dcbd53..ee9208a8f34a 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/topology/check" + topologyselectors "sigs.k8s.io/cluster-api/internal/topology/selectors" "sigs.k8s.io/cluster-api/internal/topology/variables" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/version" @@ -76,8 +77,10 @@ type Cluster struct { decoder admission.Decoder } -var _ webhook.CustomDefaulter = &Cluster{} -var _ webhook.CustomValidator = &Cluster{} +var ( + _ webhook.CustomDefaulter = &Cluster{} + _ webhook.CustomValidator = &Cluster{} +) var errClusterClassNotReconciled = errors.New("ClusterClass is not successfully reconciled") @@ -649,10 +652,16 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust fldPath := field.NewPath("spec", "topology", "controlPlane", "healthCheck") + cpClass, err := topologyselectors.ResolveControlPlaneClass(cluster, clusterClass) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + return allErrs + } + // Validate ControlPlane MachineHealthCheck if defined. if cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() { - // Ensure ControlPlane does not define a MachineHealthCheck if the ClusterClass does not define MachineInfrastructure. - if !clusterClass.Spec.ControlPlane.MachineInfrastructure.TemplateRef.IsDefined() { + // Ensure ControlPlane does not define a MachineHealthCheck if the ControlPlaneClass does not define MachineInfrastructure. + if !cpClass.MachineInfrastructure.TemplateRef.IsDefined() { allErrs = append(allErrs, field.Forbidden( fldPath, "can be only set if spec.controlPlane.machineInfrastructure is set in ClusterClass", @@ -669,7 +678,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust // Check if the machineHealthCheck is explicitly enabled in the ControlPlaneTopology. if cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled != nil && *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled { // Ensure the MHC is defined in at least one of the ControlPlaneTopology of the Cluster or the ControlPlaneClass of the ClusterClass. - if !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() && !clusterClass.Spec.ControlPlane.HealthCheck.IsDefined() { + if !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() && !cpClass.HealthCheck.IsDefined() { allErrs = append(allErrs, field.Forbidden( fldPath.Child("enable"), fmt.Sprintf("cannot be set to %t as healthCheck definition is not available in the Cluster topology or the ClusterClass", *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled), @@ -721,6 +730,16 @@ func machineDeploymentClassOfName(clusterClass *clusterv1.ClusterClass, name str return nil } +// syself change. +func controlPlaneClassOfName(clusterClass *clusterv1.ClusterClass, name string) *clusterv1.ControlPlaneClass { + for _, cpClass := range clusterClass.Spec.ControlPlaneClasses { + if cpClass.Class == name { + return &cpClass + } + } + return nil +} + // validateCIDRBlocks ensures the passed CIDR is valid. func validateCIDRBlocks(fldPath *field.Path, cidrs []string) field.ErrorList { var allErrs field.ErrorList @@ -888,6 +907,10 @@ func ValidateClusterForClusterClass(cluster *clusterv1.Cluster, clusterClass *cl if clusterClass == nil { return field.ErrorList{field.InternalError(field.NewPath(""), errors.New("ClusterClass can not be nil"))} } + + // syself change + allErrs = append(allErrs, check.ControlPlaneTopologyClassIsDefinedInClusterClass(cluster, clusterClass)...) + allErrs = append(allErrs, check.MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) allErrs = append(allErrs, check.MachinePoolTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index d3d7185992bd..a733735efc21 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -114,7 +114,7 @@ func (webhook *ClusterClass) validate(ctx context.Context, oldClusterClass, newC var allErrs field.ErrorList // Ensure all template references are valid. - allErrs = append(allErrs, check.ClusterClassTemplatesAreValid(newClusterClass)...) + allErrs = append(allErrs, check.ClusterClassReferencesAreValid(newClusterClass)...) // Ensure all MachineDeployment classes are unique. allErrs = append(allErrs, check.MachineDeploymentClassesAreUnique(newClusterClass)...) @@ -207,6 +207,43 @@ func validateUpdatesToMachineHealthCheckClasses(clusters []clusterv1.Cluster, ol } } + // syself change + // For each ControlPlaneClass check if the MachineHealthCheck definition is dropped. + for _, newCPClass := range newClusterClass.Spec.ControlPlaneClasses { + oldCPClass := controlPlaneClassOfName(oldClusterClass, newCPClass.Class) + if oldCPClass == nil { + // New ControlPlaneClass. Nothing to validate. + continue + } + + // If the MachineHealthCheck was dropped then check that no cluster is using it. + if oldCPClass.HealthCheck.IsDefined() && !newCPClass.HealthCheck.IsDefined() { + clustersUsingMHC := []string{} + + for _, cluster := range clusters { + if cluster.Spec.Topology.ControlPlane.Class != newCPClass.Class { + continue + } + + if cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled != nil && + *cluster.Spec.Topology.ControlPlane.HealthCheck.Enabled && + !cluster.Spec.Topology.ControlPlane.HealthCheck.IsDefined() { + clustersUsingMHC = append(clustersUsingMHC, cluster.Name) + } + } + + if len(clustersUsingMHC) != 0 { + allErrs = append(allErrs, field.Forbidden( + field.NewPath("spec", "controlPlaneClasses").Key(newCPClass.Class).Child("machineHealthCheck"), + fmt.Sprintf( + "MachineHealthCheck cannot be deleted because it is used by Cluster(s) %q", + strings.Join(clustersUsingMHC, ","), + ), + )) + } + } + } + // For each MachineDeploymentClass check if the MachineHealthCheck definition is dropped. for _, newMdClass := range newClusterClass.Spec.Workers.MachineDeployments { oldMdClass := machineDeploymentClassOfName(oldClusterClass, newMdClass.Class) @@ -437,12 +474,34 @@ func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorL } } - for _, md := range clusterClass.Spec.Workers.MachineDeployments { + // syself change + // Validate naming strategies for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + if cp.Naming.Template == "" { + continue + } + name, err := topologynames.ControlPlaneNameGenerator(cp.Naming.Template, "cluster").GenerateName() + templateFldPath := field.NewPath("spec", "controlPlaneClasses").Index(i).Child("namingStrategy", "template") + if err != nil { + allErrs = append(allErrs, + field.Invalid( + templateFldPath, + cp.Naming.Template, + fmt.Sprintf("invalid ControlPlaneClass name template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, field.Invalid(templateFldPath, cp.Naming.Template, err)) + } + } + } + + for i, md := range clusterClass.Spec.Workers.MachineDeployments { if md.Naming.Template == "" { continue } name, err := topologynames.MachineDeploymentNameGenerator(md.Naming.Template, "cluster", "mdtopology").GenerateName() - templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Key(md.Class).Child("naming", "template") + templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( @@ -486,6 +545,15 @@ func validateClusterClassMetadata(clusterClass *clusterv1.ClusterClass) field.Er for _, m := range clusterClass.Spec.Workers.MachineDeployments { allErrs = append(allErrs, m.Metadata.Validate(field.NewPath("spec", "workers", "machineDeployments").Key(m.Class).Child("template", "metadata"))...) } + + // syself change + // Validate metadata for each control plane class + for i, cp := range clusterClass.Spec.ControlPlaneClasses { + allErrs = append(allErrs, + cp.Metadata.Validate( + field.NewPath("spec", "controlPlaneClasses").Index(i).Child("metadata"))...) + } + for _, m := range clusterClass.Spec.Workers.MachinePools { allErrs = append(allErrs, m.Metadata.Validate(field.NewPath("spec", "workers", "machinePools").Key(m.Class).Child("template", "metadata"))...) } diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index a1d447bbb714..1b1933567f2d 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -167,6 +167,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste // Return an error if none of the possible selectors are enabled. if !ptr.Deref(selector.MatchResources.InfrastructureCluster, false) && !ptr.Deref(selector.MatchResources.ControlPlane, false) && + (selector.MatchResources.ControlPlaneClass == nil || len(selector.MatchResources.ControlPlaneClass.Names) == 0) && (selector.MatchResources.MachineDeploymentClass == nil || len(selector.MatchResources.MachineDeploymentClass.Names) == 0) && (selector.MatchResources.MachinePoolClass == nil || len(selector.MatchResources.MachinePoolClass.Names) == 0) { return append(allErrs, @@ -205,6 +206,47 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste } } + // Validate selectors for control plane classes + // syself change. + if selector.MatchResources.ControlPlaneClass != nil && len(selector.MatchResources.ControlPlaneClass.Names) > 0 { + for i, name := range selector.MatchResources.ControlPlaneClass.Names { + match := false + err := validateSelectorName(name, path, "controlPlaneClass", i) + if err != nil { + allErrs = append(allErrs, err) + break + } + for _, cp := range class.Spec.ControlPlaneClasses { + var matches bool + // "*" matches every control plane class (apply patch to all classes). + // "*suffix" matches any class whose name ends with "suffix". + // "prefix*" matches any class whose name starts with "prefix". + if cp.Class == name || name == "*" { + matches = true + } else if strings.HasPrefix(name, "*") && strings.HasSuffix(cp.Class, strings.TrimPrefix(name, "*")) { + matches = true + } else if strings.HasSuffix(name, "*") && strings.HasPrefix(cp.Class, strings.TrimSuffix(name, "*")) { + matches = true + } + + if matches { + if selectorMatchTemplate(selector, cp.TemplateRef) || + selectorMatchTemplate(selector, cp.MachineInfrastructure.TemplateRef) { + match = true + break + } + } + } + if !match { + allErrs = append(allErrs, field.Invalid( + path.Child("matchResources", "controlPlaneClass", "names").Index(i), + name, + "selector is enabled but matches neither the controlPlane ref nor the controlPlane machineInfrastructure ref of a ControlPlane class", + )) + } + } + } + if selector.MatchResources.MachineDeploymentClass != nil && len(selector.MatchResources.MachineDeploymentClass.Names) > 0 { for i, name := range selector.MatchResources.MachineDeploymentClass.Names { match := false