diff --git a/manifests/supervisorcluster/1.31/cns-csi.yaml b/manifests/supervisorcluster/1.31/cns-csi.yaml index ef257889bf..30d7043cc1 100644 --- a/manifests/supervisorcluster/1.31/cns-csi.yaml +++ b/manifests/supervisorcluster/1.31/cns-csi.yaml @@ -33,6 +33,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch", "create", "delete"] @@ -46,8 +49,11 @@ rules: resources: ["volumeattachments/status"] verbs: ["patch"] - apiGroups: ["cns.vmware.com"] - resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status"] + resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status", "clusterstoragepolicyinfoes", "clusterstoragepolicyinfoes/status"] verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["clusterstoragepolicyinfoes"] + verbs: ["create"] - apiGroups: ["cns.vmware.com"] resources: ["cnsnodevmattachments", "cnsnodevmbatchattachments", "cnsnodevmbatchattachments/status", "cnsnodevmattachments/status"] verbs: ["get", "list", "watch", "update", "patch"] diff --git a/manifests/supervisorcluster/1.32/cns-csi.yaml b/manifests/supervisorcluster/1.32/cns-csi.yaml index cd2ce0bac9..cac5c0982e 100644 --- a/manifests/supervisorcluster/1.32/cns-csi.yaml +++ b/manifests/supervisorcluster/1.32/cns-csi.yaml @@ -33,6 +33,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch", "create", "delete"] @@ -46,8 +49,11 @@ rules: resources: ["volumeattachments/status"] verbs: ["patch"] - apiGroups: ["cns.vmware.com"] - resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status"] + resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status", "clusterstoragepolicyinfoes", "clusterstoragepolicyinfoes/status"] verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["clusterstoragepolicyinfoes"] + verbs: ["create"] - apiGroups: ["cns.vmware.com"] resources: ["cnsnodevmattachments", "cnsnodevmbatchattachments", "cnsnodevmbatchattachments/status", "cnsnodevmattachments/status"] verbs: ["get", "list", "watch", "update", "patch"] diff --git a/manifests/supervisorcluster/1.33/cns-csi.yaml b/manifests/supervisorcluster/1.33/cns-csi.yaml index cd2ce0bac9..cac5c0982e 100644 --- a/manifests/supervisorcluster/1.33/cns-csi.yaml +++ b/manifests/supervisorcluster/1.33/cns-csi.yaml @@ -33,6 +33,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch", "create", "delete"] @@ -46,8 +49,11 @@ rules: resources: ["volumeattachments/status"] verbs: ["patch"] - apiGroups: ["cns.vmware.com"] - resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status"] + resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status", "clusterstoragepolicyinfoes", "clusterstoragepolicyinfoes/status"] verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["clusterstoragepolicyinfoes"] + verbs: ["create"] - apiGroups: ["cns.vmware.com"] resources: ["cnsnodevmattachments", "cnsnodevmbatchattachments", "cnsnodevmbatchattachments/status", "cnsnodevmattachments/status"] verbs: ["get", "list", "watch", "update", "patch"] diff --git a/manifests/supervisorcluster/1.34/cns-csi.yaml b/manifests/supervisorcluster/1.34/cns-csi.yaml index cd2ce0bac9..cac5c0982e 100644 --- a/manifests/supervisorcluster/1.34/cns-csi.yaml +++ b/manifests/supervisorcluster/1.34/cns-csi.yaml @@ -33,6 +33,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch", "create", "delete"] @@ -46,8 +49,11 @@ rules: resources: ["volumeattachments/status"] verbs: ["patch"] - apiGroups: ["cns.vmware.com"] - resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status"] + resources: ["cnsvolumemetadatas", "cnsfileaccessconfigs", "cnsfileaccessconfigs/status", "clusterstoragepolicyinfoes", "clusterstoragepolicyinfoes/status"] verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["clusterstoragepolicyinfoes"] + verbs: ["create"] - apiGroups: ["cns.vmware.com"] resources: ["cnsnodevmattachments", "cnsnodevmbatchattachments", "cnsnodevmbatchattachments/status", "cnsnodevmattachments/status"] verbs: ["get", "list", "watch", "update", "patch"] diff --git a/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/clusterstoragepolicyinfo_types.go b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/clusterstoragepolicyinfo_types.go new file mode 100644 index 0000000000..ceddcc6ddb --- /dev/null +++ b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/clusterstoragepolicyinfo_types.go @@ -0,0 +1,120 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VolumeCapability describes capabilities of the volume created with the given policy. +// +kubebuilder:validation:Enum=PersistentVolumeBlock;PersistentVolumeFilesystem;HighPerformanceLinkedClone +type VolumeCapability string + +const ( + // VolumeCapabilityPersistentVolumeBlock indicates block volumes are supported. + VolumeCapabilityPersistentVolumeBlock VolumeCapability = "PersistentVolumeBlock" + // VolumeCapabilityPersistentVolumeFilesystem indicates file volumes are supported. + VolumeCapabilityPersistentVolumeFilesystem VolumeCapability = "PersistentVolumeFilesystem" + // VolumeCapabilityHighPerformanceLinkedClone indicates linked-clone / fast provisioning style volumes. + VolumeCapabilityHighPerformanceLinkedClone VolumeCapability = "HighPerformanceLinkedClone" +) + +// ClusterStoragePolicyInfoSpec defines the desired state of ClusterStoragePolicyInfo. +// It is used to store the information about the storage policy that is exposed to Devops users. +// +k8s:openapi-gen=true +type ClusterStoragePolicyInfoSpec struct { + // K8sCompliantName is the Kubernetes-compliant name the VI admin chose when creating the policy. + // +required + K8sCompliantName string `json:"k8sCompliantName"` +} + +// ClusterStoragePolicyInfoStatus defines the observed state of ClusterStoragePolicyInfo. +// +k8s:openapi-gen=true +type ClusterStoragePolicyInfoStatus struct { + // StoragePolicyDeleted indicates whether the underlying storagepolicy is deleted or not on the VC. + // +optional + StoragePolicyDeleted bool `json:"storagePolicyDeleted,omitempty"` + + // VolumeCapabilities describes the supported volume capabilities. + // +optional + VolumeCapabilities []VolumeCapability `json:"volumeCapabilities,omitempty"` + + // Performance describes performance characteristics (vSAN only). + // +optional + Performance *Performance `json:"performance,omitempty"` + + // Encryption describes encryption-related status for the storage policy. + // +optional + Encryption *Encryption `json:"encryption,omitempty"` + + // Error describes the error encountered while retrieving the storage policy info. + // +optional + Error string `json:"error,omitempty"` +} + +// Encryption describes encryption capabilities. +type Encryption struct { + // SupportsEncryption indicates whether the storage policy supports encryption. + // +optional + SupportsEncryption bool `json:"supportsEncryption,omitempty"` + + // EncryptionType indicates the type of encryption. + // +optional + // +kubebuilder:validation:items:Enum=vm-encryption;vsan-encryption + EncryptionType []string `json:"encryptionType,omitempty"` + + // Error describes the error encountered while retrieving the encryption info. + // +optional + Error string `json:"error,omitempty"` +} + +// Performance describes performance characteristics (vSAN only). +type Performance struct { + // IopsLimit is the IOPS limit for volumes created from this storage class (vSAN ESA only). + // +optional + IopsLimit *int64 `json:"iopsLimit"` + + // Error describes the error encountered while retrieving the performance info. + // +optional + Error string `json:"error,omitempty"` +} + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterstoragepolicyinfoes,scope=Cluster,shortName=clusterspi +// +kubebuilder:object:root=true + +// ClusterStoragePolicyInfo is the Schema for the clusterstoragepolicyinfoes API. +type ClusterStoragePolicyInfo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterStoragePolicyInfoSpec `json:"spec,omitempty"` + Status ClusterStoragePolicyInfoStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterStoragePolicyInfoList contains a list of ClusterStoragePolicyInfo. +type ClusterStoragePolicyInfoList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterStoragePolicyInfo `json:"items"` +} diff --git a/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/doc.go b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/doc.go new file mode 100644 index 0000000000..569a81aceb --- /dev/null +++ b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=cns.vmware.com + +package v1alpha1 diff --git a/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a2a925c9cb --- /dev/null +++ b/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,169 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStoragePolicyInfo) DeepCopyInto(out *ClusterStoragePolicyInfo) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStoragePolicyInfo. +func (in *ClusterStoragePolicyInfo) DeepCopy() *ClusterStoragePolicyInfo { + if in == nil { + return nil + } + out := new(ClusterStoragePolicyInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStoragePolicyInfo) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStoragePolicyInfoList) DeepCopyInto(out *ClusterStoragePolicyInfoList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterStoragePolicyInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStoragePolicyInfoList. +func (in *ClusterStoragePolicyInfoList) DeepCopy() *ClusterStoragePolicyInfoList { + if in == nil { + return nil + } + out := new(ClusterStoragePolicyInfoList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStoragePolicyInfoList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStoragePolicyInfoSpec) DeepCopyInto(out *ClusterStoragePolicyInfoSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStoragePolicyInfoSpec. +func (in *ClusterStoragePolicyInfoSpec) DeepCopy() *ClusterStoragePolicyInfoSpec { + if in == nil { + return nil + } + out := new(ClusterStoragePolicyInfoSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStoragePolicyInfoStatus) DeepCopyInto(out *ClusterStoragePolicyInfoStatus) { + *out = *in + if in.VolumeCapabilities != nil { + in, out := &in.VolumeCapabilities, &out.VolumeCapabilities + *out = make([]VolumeCapability, len(*in)) + copy(*out, *in) + } + if in.Performance != nil { + in, out := &in.Performance, &out.Performance + *out = new(Performance) + (*in).DeepCopyInto(*out) + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(Encryption) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStoragePolicyInfoStatus. +func (in *ClusterStoragePolicyInfoStatus) DeepCopy() *ClusterStoragePolicyInfoStatus { + if in == nil { + return nil + } + out := new(ClusterStoragePolicyInfoStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Encryption) DeepCopyInto(out *Encryption) { + *out = *in + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Encryption. +func (in *Encryption) DeepCopy() *Encryption { + if in == nil { + return nil + } + out := new(Encryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Performance) DeepCopyInto(out *Performance) { + *out = *in + if in.IopsLimit != nil { + in, out := &in.IopsLimit, &out.IopsLimit + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Performance. +func (in *Performance) DeepCopy() *Performance { + if in == nil { + return nil + } + out := new(Performance) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/cnsoperator/config/cns.vmware.com_clusterstoragepolicyinfoes.yaml b/pkg/apis/cnsoperator/config/cns.vmware.com_clusterstoragepolicyinfoes.yaml new file mode 100644 index 0000000000..4c074cdb1b --- /dev/null +++ b/pkg/apis/cnsoperator/config/cns.vmware.com_clusterstoragepolicyinfoes.yaml @@ -0,0 +1,123 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: clusterstoragepolicyinfoes.cns.vmware.com +spec: + group: cns.vmware.com + names: + kind: ClusterStoragePolicyInfo + listKind: ClusterStoragePolicyInfoList + plural: clusterstoragepolicyinfoes + shortNames: + - clusterspi + singular: clusterstoragepolicyinfo + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterStoragePolicyInfo is the Schema for the clusterstoragepolicyinfoes + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + ClusterStoragePolicyInfoSpec defines the desired state of ClusterStoragePolicyInfo. + It is used to store the information about the storage policy that is exposed to Devops users. + properties: + k8sCompliantName: + description: K8sCompliantName is the Kubernetes-compliant name the + VI admin chose when creating the policy. + type: string + required: + - k8sCompliantName + type: object + status: + description: ClusterStoragePolicyInfoStatus defines the observed state + of ClusterStoragePolicyInfo. + properties: + encryption: + description: Encryption describes encryption-related status for the + storage policy. + properties: + encryptionType: + description: EncryptionType indicates the type of encryption. + items: + enum: + - vm-encryption + - vsan-encryption + type: string + type: array + error: + description: Error describes the error encountered while retrieving + the encryption info. + type: string + supportsEncryption: + description: SupportsEncryption indicates whether the storage + policy supports encryption. + type: boolean + type: object + error: + description: Error describes the error encountered while retrieving + the storage policy info. + type: string + performance: + description: Performance describes performance characteristics (vSAN + only). + properties: + error: + description: Error describes the error encountered while retrieving + the performance info. + type: string + iopsLimit: + description: IopsLimit is the IOPS limit for volumes created from + this storage class (vSAN ESA only). + format: int64 + type: integer + type: object + storagePolicyDeleted: + description: StoragePolicyDeleted indicates whether the underlying + storagepolicy is deleted or not on the VC. + type: boolean + volumeCapabilities: + description: VolumeCapabilities describes the supported volume capabilities. + items: + description: VolumeCapability describes capabilities of the volume + created with the given policy. + enum: + - PersistentVolumeBlock + - PersistentVolumeFilesystem + - HighPerformanceLinkedClone + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/apis/cnsoperator/config/config.go b/pkg/apis/cnsoperator/config/config.go index 12dd335afb..cbed482358 100644 --- a/pkg/apis/cnsoperator/config/config.go +++ b/pkg/apis/cnsoperator/config/config.go @@ -41,3 +41,8 @@ const EmbedStoragePolicyQuotaCRFileName = "cns.vmware.com_storagepolicyquotas.ya var EmbedStoragePolicyUsageCRFile embed.FS const EmbedStoragePolicyUsageCRFileName = "cns.vmware.com_storagepolicyusages.yaml" + +//go:embed cns.vmware.com_clusterstoragepolicyinfoes.yaml +var EmbedClusterStoragePolicyInfoCRFile embed.FS + +const EmbedClusterStoragePolicyInfoCRFileName = "cns.vmware.com_clusterstoragepolicyinfoes.yaml" diff --git a/pkg/apis/cnsoperator/register.go b/pkg/apis/cnsoperator/register.go index cfddb142da..7d919028e7 100644 --- a/pkg/apis/cnsoperator/register.go +++ b/pkg/apis/cnsoperator/register.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + clusterstoragepolicyinfov1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1" cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1" cnsnodevmbatchattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmbatchattachment/v1alpha1" @@ -79,6 +80,8 @@ var ( CnsVirtualMachineSnapshotSingular = "vitualmachinesnapshot" // CnsVirtualMachineSnapshotPlural is plural of VirtualMachineSnapshot CnsVirtualMachineSnapshotPlural = "vitualmachinesnapshots" + // ClusterStoragePolicyInfoPlural is plural of ClusterStoragePolicyInfo + ClusterStoragePolicyInfoPlural = "clusterstoragepolicyinfoes" ) var ( @@ -137,6 +140,12 @@ func addKnownTypes(scheme *runtime.Scheme) error { &cnsnodevmbatchattachmentv1alpha1.CnsNodeVMBatchAttachmentList{}, ) + scheme.AddKnownTypes( + SchemeGroupVersion, + &clusterstoragepolicyinfov1alpha1.ClusterStoragePolicyInfo{}, + &clusterstoragepolicyinfov1alpha1.ClusterStoragePolicyInfoList{}, + ) + scheme.AddKnownTypes( SchemeGroupVersion, &storagepolicyv1alpha1.StoragePolicyQuota{}, diff --git a/pkg/csi/service/common/constants.go b/pkg/csi/service/common/constants.go index 4807a15e94..0852e4a63c 100644 --- a/pkg/csi/service/common/constants.go +++ b/pkg/csi/service/common/constants.go @@ -490,23 +490,27 @@ const ( // HighPVNodeDensity is an FSS for guest cluster nodes that, when enabled, // raises MAX_VOLUMES_PER_NODE from 59 to 255 in NodeGetInfo responses. HighPVNodeDensity = "high-pv-node-density" + // SupportsExposingStoragePolicyAttributes is the supervisor capability that gates exposing + // storage policy attributes to devops users. + SupportsExposingStoragePolicyAttributes = "supports_exposing_storage_policy_attributes" ) var WCPFeatureStates = map[string]struct{}{ - PodVMOnStretchedSupervisor: {}, - CSIDetachOnSupervisor: {}, - WorkloadDomainIsolation: {}, - VPCCapabilitySupervisor: {}, - VolFromSnapshotOnTargetDs: {}, - SharedDiskFss: {}, - LinkedCloneSupport: {}, - WCPMobilityNonDisruptiveImport: {}, - WCPVMServiceVMSnapshots: {}, - BYOKEncryption: {}, - FCDTransactionSupport: {}, - MultipleClustersPerVsphereZone: {}, - FileVolumesWithVmService: {}, - VsanFileVolumeService: {}, + PodVMOnStretchedSupervisor: {}, + CSIDetachOnSupervisor: {}, + WorkloadDomainIsolation: {}, + VPCCapabilitySupervisor: {}, + VolFromSnapshotOnTargetDs: {}, + SharedDiskFss: {}, + LinkedCloneSupport: {}, + WCPMobilityNonDisruptiveImport: {}, + WCPVMServiceVMSnapshots: {}, + BYOKEncryption: {}, + FCDTransactionSupport: {}, + MultipleClustersPerVsphereZone: {}, + FileVolumesWithVmService: {}, + VsanFileVolumeService: {}, + SupportsExposingStoragePolicyAttributes: {}, } // WCPFeatureStatesSupportsLateEnablement contains capabilities that can be enabled later @@ -514,14 +518,15 @@ var WCPFeatureStates = map[string]struct{}{ // During FSS check if driver detects that the capabilities is disabled in the cached configmap, // it will re-fetch the configmap and update the cached configmap. var WCPFeatureStatesSupportsLateEnablement = map[string]struct{}{ - WorkloadDomainIsolation: {}, - LinkedCloneSupport: {}, - MultipleClustersPerVsphereZone: {}, - WCPVMServiceVMSnapshots: {}, - BYOKEncryption: {}, - SharedDiskFss: {}, - FileVolumesWithVmService: {}, - VsanFileVolumeService: {}, + WorkloadDomainIsolation: {}, + LinkedCloneSupport: {}, + MultipleClustersPerVsphereZone: {}, + WCPVMServiceVMSnapshots: {}, + BYOKEncryption: {}, + SharedDiskFss: {}, + FileVolumesWithVmService: {}, + VsanFileVolumeService: {}, + SupportsExposingStoragePolicyAttributes: {}, } // WCPFeatureAssociatedWithPVCSI contains FSS name used in PVCSI and associated WCP Capability name on a diff --git a/pkg/syncer/cnsoperator/controller/add_clusterstoragepolicyinfo.go b/pkg/syncer/cnsoperator/controller/add_clusterstoragepolicyinfo.go new file mode 100644 index 0000000000..7dbee35cbc --- /dev/null +++ b/pkg/syncer/cnsoperator/controller/add_clusterstoragepolicyinfo.go @@ -0,0 +1,25 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo" +) + +func init() { + AddToManagerFuncs = append(AddToManagerFuncs, clusterstoragepolicyinfo.Add) +} diff --git a/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller.go b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller.go new file mode 100644 index 0000000000..91ed959512 --- /dev/null +++ b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller.go @@ -0,0 +1,316 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterstoragepolicyinfo + +import ( + "context" + + cnstypes "github.com/vmware/govmomi/cns/types" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + apis "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" + clusterspiv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1" + volumes "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common/commonco" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" + k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer/cnsoperator/util" +) + +const ( + workerThreadsEnvVar = "WORKER_THREADS_CLUSTER_STORAGE_POLICY_INFO" + defaultMaxWorkerThreads = 4 +) + +// Add registers the ClusterStoragePolicyInfo controller with the Manager (WCP / Workload only). +func Add(mgr manager.Manager, clusterFlavor cnstypes.CnsClusterFlavor, + configInfo *config.ConfigurationInfo, _ volumes.Manager) error { + ctx, log := logger.GetNewContextWithLogger() + if clusterFlavor != cnstypes.CnsClusterFlavorWorkload { + // TODO: update this check when we add support for VKS also. + log.Debug("Not initializing the ClusterStoragePolicyInfo Controller: unsupported cluster flavor") + return nil + } + if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.SupportsExposingStoragePolicyAttributes) { + log.Infof("Not initializing the ClusterStoragePolicyInfo Controller: capability %q is not activated", + common.SupportsExposingStoragePolicyAttributes) + return nil + } + + k8sclient, err := k8s.NewClient(ctx) + if err != nil { + log.Errorf("creating Kubernetes client failed. Err: %v", err) + return err + } + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink( + &typedcorev1.EventSinkImpl{ + Interface: k8sclient.CoreV1().Events(""), + }, + ) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: apis.GroupName}) + return add(mgr, newReconciler(mgr, configInfo, recorder)) +} + +func newReconciler(mgr manager.Manager, configInfo *config.ConfigurationInfo, + recorder record.EventRecorder) *ReconcileClusterStoragePolicyInfo { + return &ReconcileClusterStoragePolicyInfo{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + configInfo: configInfo, + recorder: recorder, + } +} + +func add(mgr manager.Manager, r *ReconcileClusterStoragePolicyInfo) error { + ctx, log := logger.GetNewContextWithLogger() + maxWorkerThreads := util.GetMaxWorkerThreads(ctx, workerThreadsEnvVar, defaultMaxWorkerThreads) + scVacPredicates := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return e.Object != nil + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(_ event.DeleteEvent) bool { + return false + }, + } + + blder := ctrl.NewControllerManagedBy(mgr).Named("clusterstoragepolicyinfo-controller"). + For(&clusterspiv1alpha1.ClusterStoragePolicyInfo{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches( + &storagev1.StorageClass{}, + handler.EnqueueRequestsFromMapFunc(r.mapStorageClassToClusterSPI), + builder.WithPredicates(scVacPredicates), + ) + + vacSupported, vacErr := volumeAttributesClassAPIAvailable(mgr) + if vacErr != nil { + log.Warnf("Could not discover VolumeAttributesClass API; skipping VAC watch. Err: %v", vacErr) + } else if !vacSupported { + log.Infof("VolumeAttributesClass API not registered on this cluster; skipping VAC watch") + } else { + log.Infof("VolumeAttributesClass API available; registering VAC watch") + blder = blder.Watches( + &storagev1.VolumeAttributesClass{}, + handler.EnqueueRequestsFromMapFunc(r.mapVolumeAttributesClassToClusterSPI), + builder.WithPredicates(scVacPredicates), + ) + } + + err := blder.WithOptions(controller.Options{MaxConcurrentReconciles: maxWorkerThreads}). + Complete(r) + if err != nil { + log.Errorf("failed to build clusterstoragepolicyinfo controller. Err: %v", err) + return err + } + + return nil +} + +// mapStorageClassToClusterSPI maps a StorageClass to a ClusterStoragePolicyInfo. +func (r *ReconcileClusterStoragePolicyInfo) mapStorageClassToClusterSPI(ctx context.Context, + obj client.Object) []reconcile.Request { + sc, ok := obj.(*storagev1.StorageClass) + if !ok { + return nil + } + if storageClassIsWaitForFirstConsumer(sc) { + ctx = logger.NewContextWithLogger(ctx) + logger.GetLogger(ctx).Debugf( + "skip ClusterStoragePolicyInfo for StorageClass %q: WaitForFirstConsumer volumeBindingMode", + sc.Name) + return nil + } + return r.ensureClusterSPIExists(ctx, sc.Name, "StorageClass", sc) +} + +// mapVolumeAttributesClassToClusterSPI maps a VolumeAttributesClass to a ClusterStoragePolicyInfo. +func (r *ReconcileClusterStoragePolicyInfo) mapVolumeAttributesClassToClusterSPI(ctx context.Context, + obj client.Object) []reconcile.Request { + vac, ok := obj.(*storagev1.VolumeAttributesClass) + if !ok { + return nil + } + return r.ensureClusterSPIExists(ctx, vac.Name, "VolumeAttributesClass", vac) +} + +// generateOwnerReference returns an OwnerReference for the given client.Object. +func (r *ReconcileClusterStoragePolicyInfo) generateOwnerReference(owner client.Object) (metav1.OwnerReference, error) { + gvk, err := apiutil.GVKForObject(owner, r.scheme) + if err != nil { + return metav1.OwnerReference{}, err + } + controller := false + block := false + return metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + Controller: &controller, + BlockOwnerDeletion: &block, + }, nil +} + +// ensureOwnerReferenceOnClusterSPI ensures ownerReference is present on the given ClusterStoragePolicyInfo. +func (r *ReconcileClusterStoragePolicyInfo) ensureOwnerReferenceOnClusterSPI(ctx context.Context, + clusterspi *clusterspiv1alpha1.ClusterStoragePolicyInfo, ownerRef metav1.OwnerReference, kind string) error { + ctx = logger.NewContextWithLogger(ctx) + log := logger.GetLogger(ctx) + merged := mergeOwnerReference(clusterspi.OwnerReferences, ownerRef) + if equality.Semantic.DeepEqual(clusterspi.OwnerReferences, merged) { + return nil + } + base := clusterspi.DeepCopy() + clusterspi.OwnerReferences = merged + if err := r.client.Patch(ctx, clusterspi, client.MergeFrom(base)); err != nil { + return err + } + log.Infof("Updated ClusterStoragePolicyInfo %q ownerReferences (added %s/%s)", + clusterspi.Name, ownerRef.Kind, ownerRef.Name) + return nil +} + +// ensureClusterSPIExists returns a reconcile request for the ClusterStoragePolicyInfo +// named like the StorageClass/VAC (same name by convention). Creates the CR if missing and +// ensures an ownerReference to the triggering StorageClass or VolumeAttributesClass. +func (r *ReconcileClusterStoragePolicyInfo) ensureClusterSPIExists(ctx context.Context, + name, kind string, owner client.Object) []reconcile.Request { + ctx = logger.NewContextWithLogger(ctx) + log := logger.GetLogger(ctx) + if name == "" || owner == nil { + return nil + } + ownerRef, err := r.generateOwnerReference(owner) + if err != nil { + log.Errorf("ownerReference for %s %q: %v", kind, name, err) + return nil + } + namespacedName := apitypes.NamespacedName{Name: name} + clusterspi := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + err = r.client.Get(ctx, namespacedName, clusterspi) + if err == nil { + if err := r.ensureOwnerReferenceOnClusterSPI(ctx, clusterspi, ownerRef, kind); err != nil { + log.Errorf("Failed to patch ownerReferences on ClusterStoragePolicyInfo %q for %s: %v", name, kind, err) + return nil + } + return []reconcile.Request{{NamespacedName: namespacedName}} + } + if !apierrors.IsNotFound(err) { + log.Errorf("Failed to get ClusterStoragePolicyInfo %q for %s. Err %v", name, kind, err) + return nil + } + return r.createClusterSPIWithOwner(ctx, name, kind, ownerRef, namespacedName) +} + +// createClusterSPIWithOwner creates a ClusterStoragePolicyInfo with ownerRef, or if it already +// exists loads it and ensures ownerRef is merged into ownerReferences. +func (r *ReconcileClusterStoragePolicyInfo) createClusterSPIWithOwner(ctx context.Context, + name, kind string, ownerRef metav1.OwnerReference, namespacedName apitypes.NamespacedName) []reconcile.Request { + log := logger.GetLogger(ctx) + newClusterSPI := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apis.SchemeGroupVersion.String(), + Kind: "ClusterStoragePolicyInfo", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{ + K8sCompliantName: name, + }, + } + if err := r.client.Create(ctx, newClusterSPI); err != nil { + if apierrors.IsAlreadyExists(err) { + clusterspi := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + if err := r.client.Get(ctx, namespacedName, clusterspi); err != nil { + log.Errorf("get ClusterStoragePolicyInfo %q after AlreadyExists: %v", name, err) + return nil + } + if err := r.ensureOwnerReferenceOnClusterSPI(ctx, clusterspi, ownerRef, kind); err != nil { + log.Errorf("Failed to patch ownerReferences on ClusterStoragePolicyInfo %q for %s: %v", name, kind, err) + return nil + } + return []reconcile.Request{{NamespacedName: namespacedName}} + } + log.Errorf("create ClusterStoragePolicyInfo %q from %s: %v", name, kind, err) + return nil + } + log.Infof("Created ClusterStoragePolicyInfo %q (from %s)", name, kind) + return []reconcile.Request{{NamespacedName: namespacedName}} +} + +var _ reconcile.Reconciler = &ReconcileClusterStoragePolicyInfo{} + +// ReconcileClusterStoragePolicyInfo reconciles ClusterStoragePolicyInfo objects. +type ReconcileClusterStoragePolicyInfo struct { + client client.Client + scheme *runtime.Scheme + configInfo *config.ConfigurationInfo + recorder record.EventRecorder +} + +// Reconcile syncs storage policy attributes from the vCenter. +func (r *ReconcileClusterStoragePolicyInfo) Reconcile(ctx context.Context, + request reconcile.Request) (reconcile.Result, error) { + ctx = logger.NewContextWithLogger(ctx) + log := logger.GetLogger(ctx) + + // request always identifies a ClusterStoragePolicyInfo. SC/VAC watches enqueue the same + // NamespacedName after ensureClusterSPIExists (see mapStorageClassToClusterSPI / + // mapVolumeAttributesClassToClusterSPI). + instance := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + err := r.client.Get(ctx, request.NamespacedName, instance) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + if instance.DeletionTimestamp != nil { + return reconcile.Result{}, nil + } + + log.Infof("Reconciling ClusterStoragePolicyInfo %q", request.Name) + + // TODO: Add storage policy attributes sync logic (vCenter / SPBM). + return reconcile.Result{}, nil +} diff --git a/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller_test.go b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller_test.go new file mode 100644 index 0000000000..5a770331c0 --- /dev/null +++ b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_controller_test.go @@ -0,0 +1,504 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterstoragepolicyinfo + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + apis "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" + clusterspiv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/clusterstoragepolicyinfo/v1alpha1" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" +) + +func testScheme(t *testing.T) *runtime.Scheme { + t.Helper() + s := runtime.NewScheme() + require.NoError(t, apis.SchemeBuilder.AddToScheme(s)) + require.NoError(t, storagev1.AddToScheme(s)) + return s +} + +func volumeBindingPtr(m storagev1.VolumeBindingMode) *storagev1.VolumeBindingMode { + return &m +} + +// TestVolumeAttributesClassAPIAvailable exercises the discovery path used by +// volumeAttributesClassAPIAvailable (via volumeAttributesClassAPIAvailableFromRESTConfig) +// against a stub API server. +func TestVolumeAttributesClassAPIAvailable(t *testing.T) { + t.Run("volumeattributesclasses present", func(t *testing.T) { + srv := newDiscoveryTestServer(t, true) + t.Cleanup(srv.Close) + + cfg := &rest.Config{Host: srv.URL} + ok, err := volumeAttributesClassAPIAvailableFromRESTConfig(cfg) + require.NoError(t, err) + assert.True(t, ok) + }) + + t.Run("volumeattributesclasses absent", func(t *testing.T) { + srv := newDiscoveryTestServer(t, false) + t.Cleanup(srv.Close) + + cfg := &rest.Config{Host: srv.URL} + ok, err := volumeAttributesClassAPIAvailableFromRESTConfig(cfg) + require.NoError(t, err) + assert.False(t, ok) + }) +} + +func newDiscoveryTestServer(t *testing.T, includeVAC bool) *httptest.Server { + t.Helper() + mux := http.NewServeMux() + mux.HandleFunc("/api", func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"kind":"APIVersions","apiVersion":"v1","versions":["v1"]}`)) + }) + mux.HandleFunc("/apis", func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{ + "kind": "APIGroupList", + "apiVersion": "v1", + "groups": [ + { + "name": "storage.k8s.io", + "versions": [ + {"groupVersion": "storage.k8s.io/v1", "version": "v1"} + ], + "preferredVersion": {"groupVersion": "storage.k8s.io/v1", "version": "v1"} + } + ] +}`)) + }) + mux.HandleFunc("/apis/storage.k8s.io/v1", func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + verbs := []string{"create", "delete", "get", "list", "patch", "update", "watch"} + list := &metav1.APIResourceList{ + GroupVersion: storagev1.SchemeGroupVersion.String(), + APIResources: []metav1.APIResource{ + {Name: "storageclasses", Namespaced: false, Kind: "StorageClass", Verbs: verbs}, + }, + } + if includeVAC { + list.APIResources = append(list.APIResources, metav1.APIResource{ + Name: "volumeattributesclasses", Namespaced: false, Kind: "VolumeAttributesClass", Verbs: verbs, + }) + } + payload, err := json.Marshal(list) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write(payload) + }) + return httptest.NewServer(mux) +} + +func TestStorageClassIsWaitForFirstConsumer(t *testing.T) { + wffc := storagev1.VolumeBindingWaitForFirstConsumer + immediate := storagev1.VolumeBindingImmediate + cases := []struct { + name string + sc *storagev1.StorageClass + want bool + }{ + {"nil binding mode", &storagev1.StorageClass{}, false}, + {"immediate", &storagev1.StorageClass{VolumeBindingMode: volumeBindingPtr(immediate)}, false}, + {"wait for first consumer", &storagev1.StorageClass{VolumeBindingMode: volumeBindingPtr(wffc)}, true}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.want, storageClassIsWaitForFirstConsumer(tc.sc)) + }) + } +} + +func TestMergeOwnerReference(t *testing.T) { + controllerFalse := false + blockFalse := false + base := metav1.OwnerReference{ + APIVersion: "storage.k8s.io/v1", Kind: "StorageClass", Name: "sc1", + UID: types.UID("11111111-1111-1111-1111-111111111111"), + Controller: &controllerFalse, BlockOwnerDeletion: &blockFalse, + } + other := metav1.OwnerReference{ + APIVersion: "v1", Kind: "ConfigMap", Name: "cm1", + UID: types.UID("22222222-2222-2222-2222-222222222222"), + Controller: &controllerFalse, BlockOwnerDeletion: &blockFalse, + } + sameKeyNewUID := metav1.OwnerReference{ + APIVersion: base.APIVersion, Kind: base.Kind, Name: base.Name, + UID: types.UID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"), + Controller: &controllerFalse, BlockOwnerDeletion: &blockFalse, + } + + t.Run("append when new", func(t *testing.T) { + out := mergeOwnerReference([]metav1.OwnerReference{base}, other) + require.Len(t, out, 2) + assert.Equal(t, base, out[0]) + assert.Equal(t, other, out[1]) + }) + + t.Run("unchanged when same key and UID", func(t *testing.T) { + refs := []metav1.OwnerReference{base} + out := mergeOwnerReference(refs, base) + assert.Equal(t, refs, out) + }) + + t.Run("replace when same key different UID", func(t *testing.T) { + out := mergeOwnerReference([]metav1.OwnerReference{base}, sameKeyNewUID) + require.Len(t, out, 1) + assert.Equal(t, sameKeyNewUID, out[0]) + }) + + t.Run("empty adds one", func(t *testing.T) { + out := mergeOwnerReference(nil, base) + require.Len(t, out, 1) + assert.Equal(t, base, out[0]) + }) +} + +func TestOwnerReferenceFor(t *testing.T) { + t.Run("storage class", func(t *testing.T) { + scheme := testScheme(t) + r := &ReconcileClusterStoragePolicyInfo{scheme: scheme} + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold", UID: types.UID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")}, + } + ref, err := r.generateOwnerReference(sc) + require.NoError(t, err) + assert.Equal(t, "storage.k8s.io/v1", ref.APIVersion) + assert.Equal(t, "StorageClass", ref.Kind) + assert.Equal(t, "gold", ref.Name) + assert.Equal(t, sc.UID, ref.UID) + require.NotNil(t, ref.Controller) + assert.False(t, *ref.Controller) + require.NotNil(t, ref.BlockOwnerDeletion) + assert.False(t, *ref.BlockOwnerDeletion) + }) + + t.Run("unknown type in scheme", func(t *testing.T) { + s := runtime.NewScheme() + r := &ReconcileClusterStoragePolicyInfo{scheme: s} + _, err := r.generateOwnerReference(&storagev1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "x"}}) + require.Error(t, err) + }) +} + +func TestCreateClusterSPIWithOwner(t *testing.T) { + ctx := logger.NewContextWithLogger(context.Background()) + scheme := testScheme(t) + controllerFalse := false + blockFalse := false + scOwner := metav1.OwnerReference{ + APIVersion: "storage.k8s.io/v1", Kind: "StorageClass", Name: "gold", + UID: types.UID("11111111-1111-1111-1111-111111111111"), + Controller: &controllerFalse, BlockOwnerDeletion: &blockFalse, + } + namespacedName := types.NamespacedName{Name: "gold"} + + t.Run("creates ClusterStoragePolicyInfo", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.createClusterSPIWithOwner(ctx, "gold", "StorageClass", scOwner, namespacedName) + require.Len(t, reqs, 1) + assert.Equal(t, namespacedName, reqs[0].NamespacedName) + + got := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, got)) + assert.Equal(t, "gold", got.Spec.K8sCompliantName) + require.Len(t, got.OwnerReferences, 1) + assert.Equal(t, scOwner, got.OwnerReferences[0]) + }) + + t.Run("already exists merges owner", func(t *testing.T) { + existing := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{K8sCompliantName: "gold"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.createClusterSPIWithOwner(ctx, "gold", "StorageClass", scOwner, namespacedName) + require.Len(t, reqs, 1) + + updated := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, updated)) + require.Len(t, updated.OwnerReferences, 1) + assert.Equal(t, scOwner, updated.OwnerReferences[0]) + }) + + t.Run("create error returns nil", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, cl client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + if _, ok := obj.(*clusterspiv1alpha1.ClusterStoragePolicyInfo); ok { + return fmt.Errorf("injected create failure") + } + return cl.Create(ctx, obj, opts...) + }, + }).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + assert.Nil(t, r.createClusterSPIWithOwner(ctx, "gold", "StorageClass", scOwner, namespacedName)) + }) + + t.Run("already exists get failure returns nil", func(t *testing.T) { + existing := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{K8sCompliantName: "gold"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).WithInterceptorFuncs(interceptor.Funcs{ + Get: func(ctx context.Context, cl client.WithWatch, key client.ObjectKey, obj client.Object, + opts ...client.GetOption) error { + if _, ok := obj.(*clusterspiv1alpha1.ClusterStoragePolicyInfo); ok { + return fmt.Errorf("injected get after AlreadyExists") + } + return cl.Get(ctx, key, obj, opts...) + }, + }).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + assert.Nil(t, r.createClusterSPIWithOwner(ctx, "gold", "StorageClass", scOwner, namespacedName)) + }) +} + +func TestEnsureOwnerReferenceOnClusterSPI(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + controllerFalse := false + blockFalse := false + owner := metav1.OwnerReference{ + APIVersion: "storage.k8s.io/v1", Kind: "StorageClass", Name: "gold", + UID: types.UID("cccccccc-cccc-cccc-cccc-cccccccccccc"), + Controller: &controllerFalse, BlockOwnerDeletion: &blockFalse, + } + + t.Run("no-op when already merged", func(t *testing.T) { + cspi := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gold", + OwnerReferences: []metav1.OwnerReference{owner}, + }, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cspi.DeepCopy()).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + require.NoError(t, r.ensureOwnerReferenceOnClusterSPI(ctx, cspi, owner, "StorageClass")) + + updated := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, updated)) + require.Len(t, updated.OwnerReferences, 1) + assert.Equal(t, owner, updated.OwnerReferences[0]) + }) + + t.Run("patch adds owner", func(t *testing.T) { + cspi := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{K8sCompliantName: "gold"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cspi.DeepCopy()).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + require.NoError(t, r.ensureOwnerReferenceOnClusterSPI(ctx, cspi, owner, "StorageClass")) + + updated := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, updated)) + require.Len(t, updated.OwnerReferences, 1) + assert.Equal(t, owner, updated.OwnerReferences[0]) + }) + + t.Run("patch error", func(t *testing.T) { + cspi := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cspi.DeepCopy()).WithInterceptorFuncs( + interceptor.Funcs{ + Patch: func(ctx context.Context, cl client.WithWatch, obj client.Object, + patch client.Patch, opts ...client.PatchOption) error { + return fmt.Errorf("injected patch failure") + }, + }).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + err := r.ensureOwnerReferenceOnClusterSPI(ctx, cspi, owner, "StorageClass") + require.Error(t, err) + assert.Contains(t, err.Error(), "injected patch failure") + }) +} + +func TestMapStorageClassToClusterSPI(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + + t.Run("wrong type returns nil", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapStorageClassToClusterSPI(ctx, &storagev1.VolumeAttributesClass{}) + assert.Nil(t, reqs) + }) + + t.Run("wait for first consumer skips", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + VolumeBindingMode: volumeBindingPtr(storagev1.VolumeBindingWaitForFirstConsumer), + } + assert.Nil(t, r.mapStorageClassToClusterSPI(ctx, sc)) + }) + + t.Run("creates ClusterStoragePolicyInfo and returns request", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold", UID: types.UID("dddddddd-dddd-dddd-dddd-dddddddddddd")}, + Provisioner: "p", + } + reqs := r.mapStorageClassToClusterSPI(ctx, sc) + require.Len(t, reqs, 1) + assert.Equal(t, types.NamespacedName{Name: "gold"}, reqs[0].NamespacedName) + + got := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, got)) + assert.Equal(t, "gold", got.Spec.K8sCompliantName) + require.NotEmpty(t, got.OwnerReferences) + }) +} + +func TestMapVolumeAttributesClassToClusterSPI(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + + t.Run("wrong type returns nil", func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapVolumeAttributesClassToClusterSPI(ctx, &storagev1.StorageClass{}) + assert.Nil(t, reqs) + }) + + t.Run("creates ClusterStoragePolicyInfo from VAC", func(t *testing.T) { + vac := &storagev1.VolumeAttributesClass{ + ObjectMeta: metav1.ObjectMeta{Name: "orphan-vac", UID: types.UID("eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee")}, + DriverName: "csi.test", + Parameters: map[string]string{"k": "v"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapVolumeAttributesClassToClusterSPI(ctx, vac) + require.Len(t, reqs, 1) + assert.Equal(t, types.NamespacedName{Name: "orphan-vac"}, reqs[0].NamespacedName) + }) + + t.Run("second VAC name creates separate CSPI", func(t *testing.T) { + vac := &storagev1.VolumeAttributesClass{ + ObjectMeta: metav1.ObjectMeta{Name: "fast", UID: types.UID("12121212-1212-1212-1212-121212121212")}, + DriverName: "csi.test", + Parameters: map[string]string{"tier": "fast"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapVolumeAttributesClassToClusterSPI(ctx, vac) + require.Len(t, reqs, 1) + assert.Equal(t, types.NamespacedName{Name: "fast"}, reqs[0].NamespacedName) + }) +} + +func TestMapVolumeAttributesClassToClusterSPI_AlreadyExists(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + existing := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{K8sCompliantName: "gold"}, + } + vac := &storagev1.VolumeAttributesClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold", UID: types.UID("abababab-abab-abab-abab-abababababab")}, + DriverName: "csi.test", + Parameters: map[string]string{"k": "v"}, + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapVolumeAttributesClassToClusterSPI(ctx, vac) + require.Len(t, reqs, 1) + + updated := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, updated)) + found := false + for _, ref := range updated.OwnerReferences { + if ref.Kind == "VolumeAttributesClass" && ref.Name == "gold" && ref.UID == vac.UID { + found = true + break + } + } + assert.True(t, found, "expected ownerReference for VAC") +} + +func TestMapStorageClassToClusterSPI_PatchOwnerOnExisting(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + existing := &clusterspiv1alpha1.ClusterStoragePolicyInfo{ + ObjectMeta: metav1.ObjectMeta{Name: "gold"}, + Spec: clusterspiv1alpha1.ClusterStoragePolicyInfoSpec{K8sCompliantName: "gold"}, + } + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold", UID: types.UID("cdcdcdcd-cdcd-cdcd-cdcd-cdcdcdcdcdcd")}, + Provisioner: "p", + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + reqs := r.mapStorageClassToClusterSPI(ctx, sc) + require.Len(t, reqs, 1) + + updated := &clusterspiv1alpha1.ClusterStoragePolicyInfo{} + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "gold"}, updated)) + found := false + for _, ref := range updated.OwnerReferences { + if ref.Kind == "StorageClass" && ref.Name == "gold" && ref.UID == sc.UID { + found = true + break + } + } + assert.True(t, found, "expected ownerReference for StorageClass") +} + +func TestMapStorageClassToClusterSPI_GetCSPIError(t *testing.T) { + ctx := context.Background() + scheme := testScheme(t) + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{Name: "gold", UID: types.UID("efefefef-efef-efef-efef-efefefefefef")}, + Provisioner: "p", + } + cli := fake.NewClientBuilder().WithScheme(scheme).WithInterceptorFuncs(interceptor.Funcs{ + Get: func(ctx context.Context, cl client.WithWatch, key client.ObjectKey, obj client.Object, + opts ...client.GetOption) error { + if _, ok := obj.(*clusterspiv1alpha1.ClusterStoragePolicyInfo); ok { + return fmt.Errorf("injected get CSPI failure") + } + return cl.Get(ctx, key, obj, opts...) + }, + }).Build() + r := &ReconcileClusterStoragePolicyInfo{client: cli, scheme: scheme} + assert.Nil(t, r.mapStorageClassToClusterSPI(ctx, sc)) +} diff --git a/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_helper.go b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_helper.go new file mode 100644 index 0000000000..94b3cc63c0 --- /dev/null +++ b/pkg/syncer/cnsoperator/controller/clusterstoragepolicyinfo/clusterstoragepolicyinfo_helper.go @@ -0,0 +1,92 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterstoragepolicyinfo + +import ( + "fmt" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// ownerReferenceKey returns OwnerReference key which is concatenated from the APIVersion, Kind and Name. +func ownerReferenceKey(ref metav1.OwnerReference) string { + return ref.APIVersion + "/" + ref.Kind + "/" + ref.Name +} + +// mergeOwnerReference merges the OwnerReferences slice with the new OwnerReference. +func mergeOwnerReference(refs []metav1.OwnerReference, add metav1.OwnerReference) []metav1.OwnerReference { + key := ownerReferenceKey(add) + for i := range refs { + if ownerReferenceKey(refs[i]) == key { + if refs[i].UID == add.UID { + return refs + } + out := make([]metav1.OwnerReference, len(refs)) + copy(out, refs) + out[i] = add + return out + } + } + out := make([]metav1.OwnerReference, len(refs), len(refs)+1) + copy(out, refs) + return append(out, add) +} + +// volumeAttributesClassAPIAvailable reports whether the apiserver exposes VolumeAttributesClass +// (storage.k8s.io/v1). VAC is supportted from K8s version 1.34 onwards. +func volumeAttributesClassAPIAvailable(mgr manager.Manager) (bool, error) { + cfg := mgr.GetConfig() + if cfg == nil { + return false, fmt.Errorf("manager REST config is nil") + } + return volumeAttributesClassAPIAvailableFromRESTConfig(cfg) +} + +// volumeAttributesClassAPIAvailableFromRESTConfig is the REST/discovery implementation used by +// volumeAttributesClassAPIAvailable (also exercised directly in unit tests). +func volumeAttributesClassAPIAvailableFromRESTConfig(cfg *rest.Config) (bool, error) { + dc, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return false, err + } + _, lists, err := dc.ServerGroupsAndResources() + if lists == nil && err != nil { + return false, err + } + gv := storagev1.SchemeGroupVersion.String() + for _, list := range lists { + if list.GroupVersion != gv { + continue + } + for i := range list.APIResources { + if list.APIResources[i].Name == "volumeattributesclasses" { + return true, nil + } + } + } + return false, nil +} + +// storageClassIsWaitForFirstConsumer indicates whether the StorageClass has a WaitForFirstConsumer volumeBindingMode. +func storageClassIsWaitForFirstConsumer(sc *storagev1.StorageClass) bool { + return sc.VolumeBindingMode != nil && + *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer +} diff --git a/pkg/syncer/cnsoperator/manager/init.go b/pkg/syncer/cnsoperator/manager/init.go index 01abd74d99..66e0e311af 100644 --- a/pkg/syncer/cnsoperator/manager/init.go +++ b/pkg/syncer/cnsoperator/manager/init.go @@ -120,6 +120,15 @@ func InitCnsOperator(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavo // TODO: Verify leader election for CNS Operator in multi-master mode // Create CRD's for WCP flavor. if clusterFlavor == cnstypes.CnsClusterFlavorWorkload { + if !cnsOperator.coCommonInterface.IsFSSEnabled(ctx, common.SupportsExposingStoragePolicyAttributes) { + err = k8s.CreateCustomResourceDefinitionFromManifest(ctx, cnsoperatorconfig.EmbedClusterStoragePolicyInfoCRFile, + cnsoperatorconfig.EmbedClusterStoragePolicyInfoCRFileName) + if err != nil { + crdName := cnsoperatorv1alpha1.ClusterStoragePolicyInfoPlural + "." + cnsoperatorv1alpha1.SchemeGroupVersion.Group + log.Errorf("failed to create %q CRD. Err: %+v", crdName, err) + return err + } + } syncer.IsPodVMOnStretchSupervisorFSSEnabled = cnsOperator.coCommonInterface.IsFSSEnabled(ctx, common.PodVMOnStretchedSupervisor) // Create CnsNodeVmAttachment CRD diff --git a/pkg/syncer/metadatasyncer.go b/pkg/syncer/metadatasyncer.go index c480c06166..184af383eb 100644 --- a/pkg/syncer/metadatasyncer.go +++ b/pkg/syncer/metadatasyncer.go @@ -363,6 +363,10 @@ func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFl go commonco.ContainerOrchestratorUtility.HandleLateEnablementOfCapability(ctx, clusterFlavor, common.VsanFileVolumeService, "", "") } + if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.SupportsExposingStoragePolicyAttributes) { + go commonco.ContainerOrchestratorUtility.HandleLateEnablementOfCapability(ctx, clusterFlavor, + common.SupportsExposingStoragePolicyAttributes, "", "") + } } if metadataSyncer.clusterFlavor == cnstypes.CnsClusterFlavorGuest {