diff --git a/packs/ceph-rbd-3.11.0/README.md b/packs/ceph-rbd-3.11.0/README.md new file mode 100644 index 00000000..58f7c0a6 --- /dev/null +++ b/packs/ceph-rbd-3.11.0/README.md @@ -0,0 +1,167 @@ +# Ceph CSI + +Ceph CSI plugins implement an interface between a CSI-enabled K8s cluster and Ceph clusters. They enable dynamically provisioning Ceph volumes and attaching them to workloads. + +For in-depth details about configuration and deployment of the RBD plugin, please refer [rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) + +## Known to work K8s versions + +Ceph CSI drivers are currently developed and tested **exclusively** in Kubernetes environments. + +| Ceph CSI Version | Container Orchestrator Name | Version Tested | +| -----------------| --------------------------- | -------------------| +| v3.11.0 | Kubernetes | v1.26, v1.27, v1.28, v1.29| + +## Support Matrix + +### Ceph-CSI features and available versions + +Please refer [rbd nbd mounter](./docs/rbd-nbd.md#support-matrix) for its support details. + +| Plugin | Features | Feature Status | CSI Driver Version | CSI Spec Version | Ceph Cluster Version | Kubernetes Version | +| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ | +| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision Block mode RWOP volume| Alpha | >= v3.5.0 | >= v1.5.0 | Pacific (>=v16.2.0) | >= v1.22.0 | +| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Pacific (>=v16.2.0) | >= v1.22.0 | +| | Provision File Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Creating and deleting snapshot | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision volume from snapshot | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision volume from another volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.15.0 | +| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.15.0 | +| | Volume/PV Metrics of Block Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.21.0 | +| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.14.0 | + +`NOTE`: The `Alpha` status reflects possible non-backward +compatible changes in the future, and is thus not recommended +for production use. + +## Usage & Configuration + +The following table lists the configurable parameters of the ceph-rbd pack and its default values. + +| Parameter | Description | Default | +| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `serviceAccounts.nodeplugin.create` | Specifies whether a nodeplugin ServiceAccount should be created | `true` | +| `serviceAccounts.nodeplugin.name` | The name of the nodeplugin ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" | +| `serviceAccounts.provisioner.create` | Specifies whether a provisioner ServiceAccount should be created | `true` | +| `serviceAccounts.provisioner.name` | The name of the provisioner ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" | +| `csiConfig` | Configuration for the CSI to connect to the cluster | [] | +| `csiMapping` | Configuration details of clusterID,PoolID,FscID mapping | [] | +| `encryptionKMSConfig` | Configuration for the encryption KMS | `{}` | +| `commonLabels` | Labels to apply to all resources | `{}` | +| `logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `5` | +| `sidecarLogLevel` | Set logging level for csi sidecar containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `1` | +| `nodeplugin.name` | Specifies the nodeplugins name | `nodeplugin` | +| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` | +| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` | +| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` | +| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` | +| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` | +| `nodeplugin.registrar.image.tag` | Image tag | `v2.10.0` | +| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` | +| `nodeplugin.plugin.image.tag` | Image tag | `canary` | +| `nodeplugin.plugin.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` | +| `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` | +| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` | +| `provisioner.name` | Specifies the name of provisioner | `provisioner` | +| `provisioner.replicaCount` | Specifies the replicaCount | `3` | +| `provisioner.defaultFSType` | Specifies the default Fstype | `ext4` | +| `provisioner.deployController` | It enables or disables the deployment of controller which generates the OMAP data if it is not present | `true` | +| `provisioner.hardMaxCloneDepth` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs | `8` | +| `provisioner.softMaxCloneDepth` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs | `4` | +| `provisioner.maxSnapshotsOnImage` | Maximum number of snapshots allowed on rbd image without flattening | `450` | +| `provisioner.minSnapshotsOnImage` | Minimum number of snapshots allowed on rbd image to trigger flattening | `250` | +| `provisioner.skipForceFlatten` | Skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature | `false` | +| `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` | +| `provisioner.clustername` | Cluster name to set on the RBD image | "" | +| `provisioner.setmetadata` | Set metadata on volume | `true` | +| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` | +| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` | +| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` | +| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` | +| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` | +| `provisioner.provisioner.image.tag` | Specifies image tag | `v4.0.0` | +| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` | +| `provisioner.snapshotter.args.enableVolumeGroupSnapshots` | enables the creation of volume group snapshots | `false` | +| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` | +| `provisioner.attacher.image.tag` | Specifies image tag | `v4.5.` | +| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.attacher.image.extraArgs` | Specifies extra arguments for the attacher sidecar | `[]` | +| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` | +| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` | +| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` | +| `provisioner.resizer.image.tag` | Specifies image tag | `v1.10.0` | +| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` | +| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` | +| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` | +| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` | +| `provisioner.snapshotter.image.tag` | Specifies image tag | `v7.0.0` | +| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` | +| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` | +| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` | +| `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` | +| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` | +| `topology.enabled` | Specifies whether topology based provisioning support should be exposed by CSI | `false` | +| `topology.domainLabels` | DomainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `{}` | +| `readAffinity.enabled` | Enable read affinity for RBD volumes. Recommended to set to true if running kernel 5.8 or newer. | `false` | +| `readAffinity.crushLocationLabels` | Define which node labels to use as CRUSH location. This should correspond to the values set in the CRUSH map. For more information, click [here](https://github.com/ceph/ceph-csi/blob/v3.9.0/docs/deploy-rbd.md#read-affinity-using-crush-locations-for-rbd-volumes)| `[]` | +| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` | +| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` | +| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` | +| `cephLogDirHostPath` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` | +| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` | +| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` | +| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` | +| `cephConfConfigMapName` | Name of the configmap which contains ceph.conf configuration | `ceph-config` | +| `kmsConfigMapName` | Name of the configmap used for encryption kms configuration | `ceph-csi-encryption-kms-config` | +| `storageClass.create` | Specifies whether the StorageClass should be created | `false` | +| `storageClass.name` | Specifies the rbd StorageClass name | `csi-rbd-sc` | +| `storageClass.annotations` | Specifies the annotations for the rbd StorageClass | `[]` | +| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `` | +| `storageClass.dataPool` | Specifies the erasure coded pool | `""` | +| `storageClass.pool` | Ceph pool into which the RBD image shall be created | `replicapool` | +| `storageclass.imageFeatures` | Specifies RBD image features | `layering` | +| `storageclass.tryOtherMounters` | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason | `false` | +| `storageclass.mkfsOptions` | Options to pass to the `mkfs` command while creating the filesystem on the RBD device | `""` | +| `storageClass.mounter` | Specifies RBD mounter | `""` | +| `storageClass.cephLogDir` | ceph client log location, it is the target bindmount path used inside container | `"/var/log/ceph"` | +| `storageClass.cephLogStrategy` | ceph client log strategy, available options `remove` or `compress` or `preserve` | `"remove"` | +| `storageClass.volumeNamePrefix` | Prefix to use for naming RBD images | `""` | +| `storageClass.encrypted` | Specifies whether volume should be encrypted. Set it to true if you want to enable encryption | `""` | +| `storageClass.encryptionKMSID` | Specifies the encryption kms id | `""` | +| `storageClass.topologyConstrainedPools` | Add topology constrained pools configuration, if topology based pools are setup, and topology constrained provisioning is required | `[]` | +| `storageClass.mapOptions` | Specifies comma-separated list of map options | `""` | +| `storageClass.unmapOtpions` | Specifies comma-separated list of unmap options | `""` | +| `storageClass.stripeUnit` | Specifies the stripe unit in bytes | `""` | +| `storageClass.stripeCount` | Specifies the number of objects to stripe over before looping | `""` | +| `storageClass.objectSize` | Specifies the object size in bytes | `""` | +| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-rbd-secret` | +| `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` | +| `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-rbd-secret` | +| `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` | +| `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-rbd-secret` | +| `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` | +| `storageClass.fstype` | Specify the filesystem type of the volume | `ext4` | +| `storageClass.reclaimPolicy` | Specifies the reclaim policy of the StorageClass | `Delete` | +| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` | +| `storageClass.mountOptions` | Specifies the mount options for storageClass | `[]` | +| `secret.create` | Specifies whether the secret should be created | `false` | +| `secret.name` | Specifies the rbd secret name | `csi-rbd-secret` | +| `secret.userID` | Specifies the user ID of the rbd secret | `` | +| `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` | +| `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` | +| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` | +| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` | +| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` | diff --git a/packs/ceph-rbd-3.11.0/charts/ceph-rbd-3.11.0.tgz b/packs/ceph-rbd-3.11.0/charts/ceph-rbd-3.11.0.tgz new file mode 100644 index 00000000..5787305c Binary files /dev/null and b/packs/ceph-rbd-3.11.0/charts/ceph-rbd-3.11.0.tgz differ diff --git a/packs/ceph-rbd-3.11.0/logo.png b/packs/ceph-rbd-3.11.0/logo.png new file mode 100644 index 00000000..7549685d Binary files /dev/null and b/packs/ceph-rbd-3.11.0/logo.png differ diff --git a/packs/ceph-rbd-3.11.0/pack.json b/packs/ceph-rbd-3.11.0/pack.json new file mode 100644 index 00000000..5596ecae --- /dev/null +++ b/packs/ceph-rbd-3.11.0/pack.json @@ -0,0 +1,25 @@ +{ + "annotations": { + "commit_msg": "CSI driver, provisioner, snapshotter, resizer and attacher for Ceph RBD", + "source": "community", + "contributor" : "pedro@spectrocloud" + }, + "charts": [ + "charts/ceph-rbd-3.11.0.tgz" + ], + "cloudTypes": [ + "aws", + "azure", + "gcp", + "vsphere", + "maas", + "openstack", + "edge", + "edge-native", + "custom" + ], + "displayName": "Ceph CSI RBD", + "layer":"csi", + "name": "ceph-rbd", + "version": "3.11.0" +} \ No newline at end of file diff --git a/packs/ceph-rbd-3.11.0/values.yaml b/packs/ceph-rbd-3.11.0/values.yaml new file mode 100644 index 00000000..dbd88ad0 --- /dev/null +++ b/packs/ceph-rbd-3.11.0/values.yaml @@ -0,0 +1,578 @@ +# Default values for ceph-csi-rbd driver +# This is a YAML-formatted file +pack: + content: + images: + - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 + - image: quay.io/cephcsi/cephcsi:v3.11.0 + - image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 + - image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 + - image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 + - image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.0 + + charts: + - repo: https://ceph.github.io/csi-charts + name: ceph-csi-rbd + version: 3.11.0 + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "ceph-csi-rbd" + namespaceLabels: + "ceph-csi-rbd": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}" + +charts: + ceph-csi-rbd: + fullnameOverride: ceph-csi-rbd + + rbac: + # Specifies whether RBAC resources should be created + create: true + + serviceAccounts: + nodeplugin: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname + name: + provisioner: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname + name: + + # Configuration for the CSI to connect to the cluster + # Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md + # Example: + # csiConfig: + # - clusterID: "<cluster-id>" + # monitors: + # - "<MONValue1>" + # - "<MONValue2>" + # rbd: + # netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" + # readAffinity: + # enabled: true + # crushLocationLabels: + # - topology.kubernetes.io/region + # - topology.kubernetes.io/zone + csiConfig: [] + + # Configuration details of clusterID,PoolID and FscID mapping + # csiMapping: + # - clusterIDMapping: + # clusterID on site1: clusterID on site2 + # RBDPoolIDMapping: + # - poolID on site1: poolID on site2 + # CephFSFscIDMapping: + # - CephFS FscID on site1: CephFS FscID on site2 + csiMapping: [] + + # Configuration for the encryption KMS + # Ref: https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md + # Example: + # encryptionKMSConfig: + # vault-unique-id-1: + # encryptionKMSType: vault + # vaultAddress: https://vault.example.com + # vaultAuthPath: /v1/auth/kubernetes/login + # vaultRole: csi-kubernetes + # vaultPassphraseRoot: /v1/secret + # vaultPassphrasePath: ceph-csi/ + # vaultCAVerify: "false" + encryptionKMSConfig: {} + + # Labels to apply to all resources + commonLabels: {} + + # Set logging level for csi containers. + # Supported values from 0 to 5. 0 for general useful logs, + # 5 for trace level verbosity. + # logLevel is the variable for CSI driver containers's log level + logLevel: 5 + # sidecarLogLevel is the variable for Kubernetes sidecar container's log level + sidecarLogLevel: 1 + + # Set fsGroupPolicy for CSI Driver object spec + # https://kubernetes-csi.github.io/docs/support-fsgroup.html + # The following modes are supported: + # - None: Indicates that volumes will be mounted with no modifications, as the + # CSI volume driver does not support these operations. + # - File: Indicates that the CSI volume driver supports volume ownership and + # permission change via fsGroup, and Kubernetes may use fsGroup to change + # permissions and ownership of the volume to match user requested fsGroup in + # the pod's SecurityPolicy regardless of fstype or access mode. + # - ReadWriteOnceWithFSType: Indicates that volumes will be examined to + # determine if volume ownership and permissions should be modified to match + # the pod's security policy. + # Changes will only occur if the fsType is defined and the persistent volume's + # accessModes contains ReadWriteOnce. + CSIDriver: + fsGroupPolicy: "File" + seLinuxMount: true + + nodeplugin: + name: nodeplugin + # set user created priorityclassName for csi plugin pods. default is + # system-node-critical which is high priority + priorityClassName: system-node-critical + # if you are using rbd-nbd client set this value to OnDelete + updateStrategy: RollingUpdate + + httpMetrics: + # Metrics only available for cephcsi/cephcsi => 1.2.0 + # Specifies whether http metrics should be exposed + enabled: true + # The port of the container to expose the metrics + containerPort: 8080 + + service: + # Specifies whether a service should be created for the metrics + enabled: true + # The port to use for the service + servicePort: 8080 + type: ClusterIP + + # Annotations for the service + # Example: + # annotations: + # prometheus.io/scrape: "true" + # prometheus.io/port: "8080" + annotations: {} + + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Reference to one or more secrets to be used when pulling images + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + profiling: + # enable profiling to check for memory leaks + enabled: false + + registrar: + image: + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar + tag: v2.10.0 + pullPolicy: IfNotPresent + resources: {} + + plugin: + image: + repository: quay.io/cephcsi/cephcsi + tag: v3.11.0 + pullPolicy: IfNotPresent + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + provisioner: + name: provisioner + replicaCount: 3 + strategy: + # RollingUpdate strategy replaces old pods with new ones gradually, + # without incurring downtime. + type: RollingUpdate + rollingUpdate: + # maxUnavailable is the maximum number of pods that can be + # unavailable during the update process. + maxUnavailable: 50% + # if fstype is not specified in storageclass, ext4 is default + defaultFSType: ext4 + # deployController to enable or disable the deployment of controller which + # generates the OMAP data if its not Present. + deployController: true + # Timeout for waiting for creation or deletion of a volume + timeout: 60s + # cluster name to set on the RBD image + # clustername: "k8s-cluster-1" + # Hard limit for maximum number of nested volume clones that are taken before + # a flatten occurs + hardMaxCloneDepth: 8 + # Soft limit for maximum number of nested volume clones that are taken before + # a flatten occurs + softMaxCloneDepth: 4 + # Maximum number of snapshots allowed on rbd image without flattening + maxSnapshotsOnImage: 450 + # Minimum number of snapshots allowed on rbd image to trigger flattening + minSnapshotsOnImage: 250 + # skip image flattening if kernel support mapping of rbd images + # which has the deep-flatten feature + # skipForceFlatten: false + + # set user created priorityclassName for csi provisioner pods. default is + # system-cluster-critical which is less priority than system-node-critical + priorityClassName: system-cluster-critical + + # enable hostnetwork for provisioner pod. default is false + # useful for deployments where the podNetwork has no access to ceph + enableHostNetwork: false + + httpMetrics: + # Metrics only available for cephcsi/cephcsi => 1.2.0 + # Specifies whether http metrics should be exposed + enabled: true + # The port of the container to expose the metrics + containerPort: 8080 + + service: + # Specifies whether a service should be created for the metrics + enabled: true + # The port to use for the service + servicePort: 8080 + type: ClusterIP + + # Annotations for the service + # Example: + # annotations: + # prometheus.io/scrape: "true" + # prometheus.io/port: "8080" + annotations: {} + + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Reference to one or more secrets to be used when pulling images + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + profiling: + # enable profiling to check for memory leaks + enabled: false + + provisioner: + image: + repository: registry.k8s.io/sig-storage/csi-provisioner + tag: v4.0.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-provisioner#command-line-options + extraArgs: [] + + # set metadata on volume + setmetadata: true + + attacher: + name: attacher + enabled: true + image: + repository: registry.k8s.io/sig-storage/csi-attacher + tag: v4.5.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-attacher#command-line-options + extraArgs: [] + + resizer: + name: resizer + enabled: true + image: + repository: registry.k8s.io/sig-storage/csi-resizer + tag: v1.10.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-resizer#recommended-optional-arguments + extraArgs: [] + + snapshotter: + image: + repository: registry.k8s.io/sig-storage/csi-snapshotter + tag: v7.0.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-snapshotter#csi-external-snapshotter-sidecar-command-line-options + extraArgs: [] + + args: + # enableVolumeGroupSnapshots enables support for volume group snapshots + enableVolumeGroupSnapshots: false + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + topology: + # Specifies whether topology based provisioning support should + # be exposed by CSI + enabled: false + # domainLabels define which node labels to use as domains + # for CSI nodeplugins to advertise their domains + # NOTE: the value here serves as an example and needs to be + # updated with node labels that define domains of interest + domainLabels: + - failure-domain/region + - failure-domain/zone + + # readAffinity: + # Enable read affinity for RBD volumes. Recommended to + # set to true if running kernel 5.8 or newer. + # enabled: false + # Define which node labels to use as CRUSH location. + # This should correspond to the values set in the CRUSH map. + # NOTE: the value here serves as an example + # crushLocationLabels: + # - topology.kubernetes.io/region + # - topology.kubernetes.io/zone + + storageClass: + # Specifies whether the storageclass should be created + create: false + name: csi-rbd-sc + + # Annotations for the storage class + # Example: + # annotations: + # storageclass.kubernetes.io/is-default-class: "true" + annotations: {} + + # (required) String representing a Ceph cluster to provision storage from. + # Should be unique across all Ceph clusters in use for provisioning, + # cannot be greater than 36 bytes in length, and should remain immutable for + # the lifetime of the StorageClass in use. + clusterID: <cluster-ID> + + # (optional) If you want to use erasure coded pool with RBD, you need to + # create two pools. one erasure coded and one replicated. + # You need to specify the replicated pool here in the `pool` parameter, it is + # used for the metadata of the images. + # The erasure coded pool must be set as the `dataPool` parameter below. + # dataPool: <ec-data-pool> + dataPool: "" + + # (required) Ceph pool into which the RBD image shall be created + # (optional) if topologyConstrainedPools is provided + # eg: pool: replicapool + pool: replicapool + + # (optional) RBD image features, CSI creates image with image-format 2 CSI + # RBD currently supports `layering`, `journaling`, `exclusive-lock`, + # `object-map`, `fast-diff`, `deep-flatten` features. + # Refer https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features + # for image feature dependencies. + # imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff + imageFeatures: "layering" + + # (optional) Specifies whether to try other mounters in case if the current + # mounter fails to mount the rbd image for any reason. True means fallback + # to next mounter, default is set to false. + # Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd + # in case if any of the specified imageFeatures is not supported by krbd + # driver on node scheduled for application pod launch, but in the future this + # should work with any mounter type. + # tryOtherMounters: false + + # (optional) Options to pass to the `mkfs` command while creating the + # filesystem on the RBD device. Check the man-page for the `mkfs` command + # for the filesystem for more details. When `mkfsOptions` is set here, the + # defaults will not be used, consider including them in this parameter. + # + # The default options depend on the csi.storage.k8s.io/fstype setting: + # - ext4: "-m0 -Enodiscard,lazy_itable_init=1,lazy_journal_init=1" + # - xfs: "-K" + # + # mkfsOptions: "-m0 -Ediscard -i1024" + + # (optional) uncomment the following to use rbd-nbd as mounter + # on supported nodes + # mounter: rbd-nbd + mounter: "" + + # (optional) ceph client log location, eg: rbd-nbd + # By default host-path /var/log/ceph of node is bind-mounted into + # csi-rbdplugin pod at /var/log/ceph mount path. This is to configure + # target bindmount path used inside container for ceph clients logging. + # See docs/rbd-nbd.md for available configuration options. + # cephLogDir: /var/log/ceph + cephLogDir: "" + + # (optional) ceph client log strategy + # By default, log file belonging to a particular volume will be deleted + # on unmap, but you can choose to just compress instead of deleting it + # or even preserve the log file in text format as it is. + # Available options `remove` or `compress` or `preserve` + # cephLogStrategy: remove + cephLogStrategy: "" + + # (optional) Prefix to use for naming RBD images. + # If omitted, defaults to "csi-vol-". + # volumeNamePrefix: "foo-bar-" + volumeNamePrefix: "" + + # (optional) Instruct the plugin it has to encrypt the volume + # By default it is disabled. Valid values are "true" or "false". + # A string is expected here, i.e. "true", not true. + # encrypted: "true" + encrypted: "" + + # (optional) Use external key management system for encryption passphrases by + # specifying a unique ID matching KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + encryptionKMSID: "" + + # Add topology constrained pools configuration, if topology based pools + # are setup, and topology constrained provisioning is required. + # For further information read TODO<doc> + # topologyConstrainedPools: | + # [{"poolName":"pool0", + # "dataPool":"ec-pool0" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"east"}, + # {"domainLabel":"zone","value":"zone1"}]}, + # {"poolName":"pool1", + # "dataPool":"ec-pool1" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"east"}, + # {"domainLabel":"zone","value":"zone2"}]}, + # {"poolName":"pool2", + # "dataPool":"ec-pool2" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"west"}, + # {"domainLabel":"zone","value":"zone1"}]} + # ] + topologyConstrainedPools: [] + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink" + mapOptions: "" + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # unmapOptions: "krbd:force;nbd:force" + unmapOptions: "" + + # (optional) stripe unit in bytes + # If set, stripeCount must also be specified + # For defaults, refer to + # https://docs.ceph.com/en/latest/man/8/rbd/#striping + stripeUnit: "" + + # (optional) number of objects to stripe over before looping + # If set, stripeUnit must also be specified + # For defaults, refer to + # https://docs.ceph.com/en/latest/man/8/rbd/#striping + stripeCount: "" + + # (optional) object size in bytes + # If set, must be a power of 2 + objectSize: "" + + # The secrets have to contain Ceph credentials with required access + # to the 'pool'. + provisionerSecret: csi-rbd-secret + # If Namespaces are left empty, the secrets are assumed to be in the + # Release namespace. + provisionerSecretNamespace: "" + controllerExpandSecret: csi-rbd-secret + controllerExpandSecretNamespace: "" + nodeStageSecret: csi-rbd-secret + nodeStageSecretNamespace: "" + # Specify the filesystem type of the volume. If not specified, + # csi-provisioner will set default as `ext4`. + fstype: ext4 + reclaimPolicy: Delete + allowVolumeExpansion: true + mountOptions: [] + # Mount Options + # Example: + # mountOptions: + # - discard + + # Mount the host /etc/selinux inside pods to support + # selinux-enabled filesystems + selinuxMount: true + + secret: + # Specifies whether the secret should be created + create: false + name: csi-rbd-secret + annotations: {} + # Key values correspond to a user name and its key, as defined in the + # ceph cluster. User ID should have required access to the 'pool' + # specified in the storage class + userID: <plaintext ID> + userKey: <Ceph auth key corresponding to userID above> + # Encryption passphrase + encryptionPassphrase: test_passphrase + + # This is a sample configmap that helps define a Ceph configuration as required + # by the CSI plugins. + # Sample ceph.conf available at + # https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed + # documentation is available at + # https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/ + cephconf: | + [global] + auth_cluster_required = cephx + auth_service_required = cephx + auth_client_required = cephx + + # Array of extra objects to deploy with the release + extraDeploy: [] + + ######################################################### + # Variables for 'internal' use please use with caution! # + ######################################################### + + # The filename of the provisioner socket + provisionerSocketFile: csi-provisioner.sock + # The filename of the plugin socket + pluginSocketFile: csi.sock + # kubelet working directory,can be set using `--root-dir` when starting kubelet. + kubeletDir: /var/lib/kubelet + # Host path location for ceph client processes logging, ex: rbd-nbd + cephLogDirHostPath: /var/log/ceph + # Name of the csi-driver + driverName: rbd.csi.ceph.com + # Name of the configmap used for state + configMapName: ceph-csi-config + # Key to use in the Configmap if not config.json + # configMapKey: + # Use an externally provided configmap + externallyManagedConfigmap: false + # Name of the configmap used for ceph.conf + cephConfConfigMapName: ceph-config + # Name of the configmap used for encryption kms configuration + kmsConfigMapName: ceph-csi-encryption-kms-config \ No newline at end of file diff --git a/packs/ceph-rbd-addon-3.11.0/README.md b/packs/ceph-rbd-addon-3.11.0/README.md new file mode 100644 index 00000000..58f7c0a6 --- /dev/null +++ b/packs/ceph-rbd-addon-3.11.0/README.md @@ -0,0 +1,167 @@ +# Ceph CSI + +Ceph CSI plugins implement an interface between a CSI-enabled K8s cluster and Ceph clusters. They enable dynamically provisioning Ceph volumes and attaching them to workloads. + +For in-depth details about configuration and deployment of the RBD plugin, please refer [rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) + +## Known to work K8s versions + +Ceph CSI drivers are currently developed and tested **exclusively** in Kubernetes environments. + +| Ceph CSI Version | Container Orchestrator Name | Version Tested | +| -----------------| --------------------------- | -------------------| +| v3.11.0 | Kubernetes | v1.26, v1.27, v1.28, v1.29| + +## Support Matrix + +### Ceph-CSI features and available versions + +Please refer [rbd nbd mounter](./docs/rbd-nbd.md#support-matrix) for its support details. + +| Plugin | Features | Feature Status | CSI Driver Version | CSI Spec Version | Ceph Cluster Version | Kubernetes Version | +| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ | +| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision Block mode RWOP volume| Alpha | >= v3.5.0 | >= v1.5.0 | Pacific (>=v16.2.0) | >= v1.22.0 | +| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 | +| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Pacific (>=v16.2.0) | >= v1.22.0 | +| | Provision File Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Creating and deleting snapshot | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision volume from snapshot | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 | +| | Provision volume from another volume | GA | >= v1.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 | +| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.15.0 | +| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.15.0 | +| | Volume/PV Metrics of Block Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.21.0 | +| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.14.0 | + +`NOTE`: The `Alpha` status reflects possible non-backward +compatible changes in the future, and is thus not recommended +for production use. + +## Usage & Configuration + +The following table lists the configurable parameters of the ceph-rbd pack and its default values. + +| Parameter | Description | Default | +| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `serviceAccounts.nodeplugin.create` | Specifies whether a nodeplugin ServiceAccount should be created | `true` | +| `serviceAccounts.nodeplugin.name` | The name of the nodeplugin ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" | +| `serviceAccounts.provisioner.create` | Specifies whether a provisioner ServiceAccount should be created | `true` | +| `serviceAccounts.provisioner.name` | The name of the provisioner ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" | +| `csiConfig` | Configuration for the CSI to connect to the cluster | [] | +| `csiMapping` | Configuration details of clusterID,PoolID,FscID mapping | [] | +| `encryptionKMSConfig` | Configuration for the encryption KMS | `{}` | +| `commonLabels` | Labels to apply to all resources | `{}` | +| `logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `5` | +| `sidecarLogLevel` | Set logging level for csi sidecar containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `1` | +| `nodeplugin.name` | Specifies the nodeplugins name | `nodeplugin` | +| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` | +| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` | +| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` | +| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` | +| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` | +| `nodeplugin.registrar.image.tag` | Image tag | `v2.10.0` | +| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` | +| `nodeplugin.plugin.image.tag` | Image tag | `canary` | +| `nodeplugin.plugin.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` | +| `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` | +| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` | +| `provisioner.name` | Specifies the name of provisioner | `provisioner` | +| `provisioner.replicaCount` | Specifies the replicaCount | `3` | +| `provisioner.defaultFSType` | Specifies the default Fstype | `ext4` | +| `provisioner.deployController` | It enables or disables the deployment of controller which generates the OMAP data if it is not present | `true` | +| `provisioner.hardMaxCloneDepth` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs | `8` | +| `provisioner.softMaxCloneDepth` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs | `4` | +| `provisioner.maxSnapshotsOnImage` | Maximum number of snapshots allowed on rbd image without flattening | `450` | +| `provisioner.minSnapshotsOnImage` | Minimum number of snapshots allowed on rbd image to trigger flattening | `250` | +| `provisioner.skipForceFlatten` | Skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature | `false` | +| `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` | +| `provisioner.clustername` | Cluster name to set on the RBD image | "" | +| `provisioner.setmetadata` | Set metadata on volume | `true` | +| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` | +| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` | +| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` | +| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` | +| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` | +| `provisioner.provisioner.image.tag` | Specifies image tag | `v4.0.0` | +| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` | +| `provisioner.snapshotter.args.enableVolumeGroupSnapshots` | enables the creation of volume group snapshots | `false` | +| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` | +| `provisioner.attacher.image.tag` | Specifies image tag | `v4.5.` | +| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.attacher.image.extraArgs` | Specifies extra arguments for the attacher sidecar | `[]` | +| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` | +| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` | +| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` | +| `provisioner.resizer.image.tag` | Specifies image tag | `v1.10.0` | +| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` | +| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` | +| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` | +| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` | +| `provisioner.snapshotter.image.tag` | Specifies image tag | `v7.0.0` | +| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` | +| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` | +| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` | +| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` | +| `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` | +| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` | +| `topology.enabled` | Specifies whether topology based provisioning support should be exposed by CSI | `false` | +| `topology.domainLabels` | DomainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `{}` | +| `readAffinity.enabled` | Enable read affinity for RBD volumes. Recommended to set to true if running kernel 5.8 or newer. | `false` | +| `readAffinity.crushLocationLabels` | Define which node labels to use as CRUSH location. This should correspond to the values set in the CRUSH map. For more information, click [here](https://github.com/ceph/ceph-csi/blob/v3.9.0/docs/deploy-rbd.md#read-affinity-using-crush-locations-for-rbd-volumes)| `[]` | +| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` | +| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` | +| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` | +| `cephLogDirHostPath` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` | +| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` | +| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` | +| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` | +| `cephConfConfigMapName` | Name of the configmap which contains ceph.conf configuration | `ceph-config` | +| `kmsConfigMapName` | Name of the configmap used for encryption kms configuration | `ceph-csi-encryption-kms-config` | +| `storageClass.create` | Specifies whether the StorageClass should be created | `false` | +| `storageClass.name` | Specifies the rbd StorageClass name | `csi-rbd-sc` | +| `storageClass.annotations` | Specifies the annotations for the rbd StorageClass | `[]` | +| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` | +| `storageClass.dataPool` | Specifies the erasure coded pool | `""` | +| `storageClass.pool` | Ceph pool into which the RBD image shall be created | `replicapool` | +| `storageclass.imageFeatures` | Specifies RBD image features | `layering` | +| `storageclass.tryOtherMounters` | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason | `false` | +| `storageclass.mkfsOptions` | Options to pass to the `mkfs` command while creating the filesystem on the RBD device | `""` | +| `storageClass.mounter` | Specifies RBD mounter | `""` | +| `storageClass.cephLogDir` | ceph client log location, it is the target bindmount path used inside container | `"/var/log/ceph"` | +| `storageClass.cephLogStrategy` | ceph client log strategy, available options `remove` or `compress` or `preserve` | `"remove"` | +| `storageClass.volumeNamePrefix` | Prefix to use for naming RBD images | `""` | +| `storageClass.encrypted` | Specifies whether volume should be encrypted. Set it to true if you want to enable encryption | `""` | +| `storageClass.encryptionKMSID` | Specifies the encryption kms id | `""` | +| `storageClass.topologyConstrainedPools` | Add topology constrained pools configuration, if topology based pools are setup, and topology constrained provisioning is required | `[]` | +| `storageClass.mapOptions` | Specifies comma-separated list of map options | `""` | +| `storageClass.unmapOtpions` | Specifies comma-separated list of unmap options | `""` | +| `storageClass.stripeUnit` | Specifies the stripe unit in bytes | `""` | +| `storageClass.stripeCount` | Specifies the number of objects to stripe over before looping | `""` | +| `storageClass.objectSize` | Specifies the object size in bytes | `""` | +| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-rbd-secret` | +| `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` | +| `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-rbd-secret` | +| `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` | +| `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-rbd-secret` | +| `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` | +| `storageClass.fstype` | Specify the filesystem type of the volume | `ext4` | +| `storageClass.reclaimPolicy` | Specifies the reclaim policy of the StorageClass | `Delete` | +| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` | +| `storageClass.mountOptions` | Specifies the mount options for storageClass | `[]` | +| `secret.create` | Specifies whether the secret should be created | `false` | +| `secret.name` | Specifies the rbd secret name | `csi-rbd-secret` | +| `secret.userID` | Specifies the user ID of the rbd secret | `<plaintext ID>` | +| `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` | +| `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` | +| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` | +| `CSIDriver.fsGroupPolicy` | Specifies the fsGroupPolicy for the CSI driver object | `File` | +| `CSIDriver.seLinuxMount` | Specify for efficient SELinux volume relabeling | `true` | diff --git a/packs/ceph-rbd-addon-3.11.0/charts/ceph-rbd-3.11.0.tgz b/packs/ceph-rbd-addon-3.11.0/charts/ceph-rbd-3.11.0.tgz new file mode 100644 index 00000000..5787305c Binary files /dev/null and b/packs/ceph-rbd-addon-3.11.0/charts/ceph-rbd-3.11.0.tgz differ diff --git a/packs/ceph-rbd-addon-3.11.0/logo.png b/packs/ceph-rbd-addon-3.11.0/logo.png new file mode 100644 index 00000000..7549685d Binary files /dev/null and b/packs/ceph-rbd-addon-3.11.0/logo.png differ diff --git a/packs/ceph-rbd-addon-3.11.0/pack.json b/packs/ceph-rbd-addon-3.11.0/pack.json new file mode 100644 index 00000000..91cbc0cb --- /dev/null +++ b/packs/ceph-rbd-addon-3.11.0/pack.json @@ -0,0 +1,26 @@ +{ + "addonType": "system app", + "annotations": { + "commit_msg": "CSI driver, provisioner, snapshotter, resizer and attacher for Ceph RBD", + "source": "community", + "contributor" : "pedro@spectrocloud" + }, + "charts": [ + "charts/ceph-rbd-3.11.0.tgz" + ], + "cloudTypes": [ + "aws", + "azure", + "gcp", + "vsphere", + "maas", + "openstack", + "edge", + "edge-native", + "custom" + ], + "displayName": "Ceph CSI RBD", + "layer":"addon", + "name": "ceph-rbd-addon", + "version": "3.11.0" +} \ No newline at end of file diff --git a/packs/ceph-rbd-addon-3.11.0/values.yaml b/packs/ceph-rbd-addon-3.11.0/values.yaml new file mode 100644 index 00000000..dbd88ad0 --- /dev/null +++ b/packs/ceph-rbd-addon-3.11.0/values.yaml @@ -0,0 +1,578 @@ +# Default values for ceph-csi-rbd driver +# This is a YAML-formatted file +pack: + content: + images: + - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 + - image: quay.io/cephcsi/cephcsi:v3.11.0 + - image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 + - image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 + - image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 + - image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.0 + + charts: + - repo: https://ceph.github.io/csi-charts + name: ceph-csi-rbd + version: 3.11.0 + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "ceph-csi-rbd" + namespaceLabels: + "ceph-csi-rbd": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}" + +charts: + ceph-csi-rbd: + fullnameOverride: ceph-csi-rbd + + rbac: + # Specifies whether RBAC resources should be created + create: true + + serviceAccounts: + nodeplugin: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname + name: + provisioner: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname + name: + + # Configuration for the CSI to connect to the cluster + # Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md + # Example: + # csiConfig: + # - clusterID: "<cluster-id>" + # monitors: + # - "<MONValue1>" + # - "<MONValue2>" + # rbd: + # netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" + # readAffinity: + # enabled: true + # crushLocationLabels: + # - topology.kubernetes.io/region + # - topology.kubernetes.io/zone + csiConfig: [] + + # Configuration details of clusterID,PoolID and FscID mapping + # csiMapping: + # - clusterIDMapping: + # clusterID on site1: clusterID on site2 + # RBDPoolIDMapping: + # - poolID on site1: poolID on site2 + # CephFSFscIDMapping: + # - CephFS FscID on site1: CephFS FscID on site2 + csiMapping: [] + + # Configuration for the encryption KMS + # Ref: https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md + # Example: + # encryptionKMSConfig: + # vault-unique-id-1: + # encryptionKMSType: vault + # vaultAddress: https://vault.example.com + # vaultAuthPath: /v1/auth/kubernetes/login + # vaultRole: csi-kubernetes + # vaultPassphraseRoot: /v1/secret + # vaultPassphrasePath: ceph-csi/ + # vaultCAVerify: "false" + encryptionKMSConfig: {} + + # Labels to apply to all resources + commonLabels: {} + + # Set logging level for csi containers. + # Supported values from 0 to 5. 0 for general useful logs, + # 5 for trace level verbosity. + # logLevel is the variable for CSI driver containers's log level + logLevel: 5 + # sidecarLogLevel is the variable for Kubernetes sidecar container's log level + sidecarLogLevel: 1 + + # Set fsGroupPolicy for CSI Driver object spec + # https://kubernetes-csi.github.io/docs/support-fsgroup.html + # The following modes are supported: + # - None: Indicates that volumes will be mounted with no modifications, as the + # CSI volume driver does not support these operations. + # - File: Indicates that the CSI volume driver supports volume ownership and + # permission change via fsGroup, and Kubernetes may use fsGroup to change + # permissions and ownership of the volume to match user requested fsGroup in + # the pod's SecurityPolicy regardless of fstype or access mode. + # - ReadWriteOnceWithFSType: Indicates that volumes will be examined to + # determine if volume ownership and permissions should be modified to match + # the pod's security policy. + # Changes will only occur if the fsType is defined and the persistent volume's + # accessModes contains ReadWriteOnce. + CSIDriver: + fsGroupPolicy: "File" + seLinuxMount: true + + nodeplugin: + name: nodeplugin + # set user created priorityclassName for csi plugin pods. default is + # system-node-critical which is high priority + priorityClassName: system-node-critical + # if you are using rbd-nbd client set this value to OnDelete + updateStrategy: RollingUpdate + + httpMetrics: + # Metrics only available for cephcsi/cephcsi => 1.2.0 + # Specifies whether http metrics should be exposed + enabled: true + # The port of the container to expose the metrics + containerPort: 8080 + + service: + # Specifies whether a service should be created for the metrics + enabled: true + # The port to use for the service + servicePort: 8080 + type: ClusterIP + + # Annotations for the service + # Example: + # annotations: + # prometheus.io/scrape: "true" + # prometheus.io/port: "8080" + annotations: {} + + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Reference to one or more secrets to be used when pulling images + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + profiling: + # enable profiling to check for memory leaks + enabled: false + + registrar: + image: + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar + tag: v2.10.0 + pullPolicy: IfNotPresent + resources: {} + + plugin: + image: + repository: quay.io/cephcsi/cephcsi + tag: v3.11.0 + pullPolicy: IfNotPresent + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + provisioner: + name: provisioner + replicaCount: 3 + strategy: + # RollingUpdate strategy replaces old pods with new ones gradually, + # without incurring downtime. + type: RollingUpdate + rollingUpdate: + # maxUnavailable is the maximum number of pods that can be + # unavailable during the update process. + maxUnavailable: 50% + # if fstype is not specified in storageclass, ext4 is default + defaultFSType: ext4 + # deployController to enable or disable the deployment of controller which + # generates the OMAP data if its not Present. + deployController: true + # Timeout for waiting for creation or deletion of a volume + timeout: 60s + # cluster name to set on the RBD image + # clustername: "k8s-cluster-1" + # Hard limit for maximum number of nested volume clones that are taken before + # a flatten occurs + hardMaxCloneDepth: 8 + # Soft limit for maximum number of nested volume clones that are taken before + # a flatten occurs + softMaxCloneDepth: 4 + # Maximum number of snapshots allowed on rbd image without flattening + maxSnapshotsOnImage: 450 + # Minimum number of snapshots allowed on rbd image to trigger flattening + minSnapshotsOnImage: 250 + # skip image flattening if kernel support mapping of rbd images + # which has the deep-flatten feature + # skipForceFlatten: false + + # set user created priorityclassName for csi provisioner pods. default is + # system-cluster-critical which is less priority than system-node-critical + priorityClassName: system-cluster-critical + + # enable hostnetwork for provisioner pod. default is false + # useful for deployments where the podNetwork has no access to ceph + enableHostNetwork: false + + httpMetrics: + # Metrics only available for cephcsi/cephcsi => 1.2.0 + # Specifies whether http metrics should be exposed + enabled: true + # The port of the container to expose the metrics + containerPort: 8080 + + service: + # Specifies whether a service should be created for the metrics + enabled: true + # The port to use for the service + servicePort: 8080 + type: ClusterIP + + # Annotations for the service + # Example: + # annotations: + # prometheus.io/scrape: "true" + # prometheus.io/port: "8080" + annotations: {} + + clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Reference to one or more secrets to be used when pulling images + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + profiling: + # enable profiling to check for memory leaks + enabled: false + + provisioner: + image: + repository: registry.k8s.io/sig-storage/csi-provisioner + tag: v4.0.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-provisioner#command-line-options + extraArgs: [] + + # set metadata on volume + setmetadata: true + + attacher: + name: attacher + enabled: true + image: + repository: registry.k8s.io/sig-storage/csi-attacher + tag: v4.5.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-attacher#command-line-options + extraArgs: [] + + resizer: + name: resizer + enabled: true + image: + repository: registry.k8s.io/sig-storage/csi-resizer + tag: v1.10.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-resizer#recommended-optional-arguments + extraArgs: [] + + snapshotter: + image: + repository: registry.k8s.io/sig-storage/csi-snapshotter + tag: v7.0.0 + pullPolicy: IfNotPresent + resources: {} + ## For further options, check + ## https://github.com/kubernetes-csi/external-snapshotter#csi-external-snapshotter-sidecar-command-line-options + extraArgs: [] + + args: + # enableVolumeGroupSnapshots enables support for volume group snapshots + enableVolumeGroupSnapshots: false + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + topology: + # Specifies whether topology based provisioning support should + # be exposed by CSI + enabled: false + # domainLabels define which node labels to use as domains + # for CSI nodeplugins to advertise their domains + # NOTE: the value here serves as an example and needs to be + # updated with node labels that define domains of interest + domainLabels: + - failure-domain/region + - failure-domain/zone + + # readAffinity: + # Enable read affinity for RBD volumes. Recommended to + # set to true if running kernel 5.8 or newer. + # enabled: false + # Define which node labels to use as CRUSH location. + # This should correspond to the values set in the CRUSH map. + # NOTE: the value here serves as an example + # crushLocationLabels: + # - topology.kubernetes.io/region + # - topology.kubernetes.io/zone + + storageClass: + # Specifies whether the storageclass should be created + create: false + name: csi-rbd-sc + + # Annotations for the storage class + # Example: + # annotations: + # storageclass.kubernetes.io/is-default-class: "true" + annotations: {} + + # (required) String representing a Ceph cluster to provision storage from. + # Should be unique across all Ceph clusters in use for provisioning, + # cannot be greater than 36 bytes in length, and should remain immutable for + # the lifetime of the StorageClass in use. + clusterID: <cluster-ID> + + # (optional) If you want to use erasure coded pool with RBD, you need to + # create two pools. one erasure coded and one replicated. + # You need to specify the replicated pool here in the `pool` parameter, it is + # used for the metadata of the images. + # The erasure coded pool must be set as the `dataPool` parameter below. + # dataPool: <ec-data-pool> + dataPool: "" + + # (required) Ceph pool into which the RBD image shall be created + # (optional) if topologyConstrainedPools is provided + # eg: pool: replicapool + pool: replicapool + + # (optional) RBD image features, CSI creates image with image-format 2 CSI + # RBD currently supports `layering`, `journaling`, `exclusive-lock`, + # `object-map`, `fast-diff`, `deep-flatten` features. + # Refer https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features + # for image feature dependencies. + # imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff + imageFeatures: "layering" + + # (optional) Specifies whether to try other mounters in case if the current + # mounter fails to mount the rbd image for any reason. True means fallback + # to next mounter, default is set to false. + # Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd + # in case if any of the specified imageFeatures is not supported by krbd + # driver on node scheduled for application pod launch, but in the future this + # should work with any mounter type. + # tryOtherMounters: false + + # (optional) Options to pass to the `mkfs` command while creating the + # filesystem on the RBD device. Check the man-page for the `mkfs` command + # for the filesystem for more details. When `mkfsOptions` is set here, the + # defaults will not be used, consider including them in this parameter. + # + # The default options depend on the csi.storage.k8s.io/fstype setting: + # - ext4: "-m0 -Enodiscard,lazy_itable_init=1,lazy_journal_init=1" + # - xfs: "-K" + # + # mkfsOptions: "-m0 -Ediscard -i1024" + + # (optional) uncomment the following to use rbd-nbd as mounter + # on supported nodes + # mounter: rbd-nbd + mounter: "" + + # (optional) ceph client log location, eg: rbd-nbd + # By default host-path /var/log/ceph of node is bind-mounted into + # csi-rbdplugin pod at /var/log/ceph mount path. This is to configure + # target bindmount path used inside container for ceph clients logging. + # See docs/rbd-nbd.md for available configuration options. + # cephLogDir: /var/log/ceph + cephLogDir: "" + + # (optional) ceph client log strategy + # By default, log file belonging to a particular volume will be deleted + # on unmap, but you can choose to just compress instead of deleting it + # or even preserve the log file in text format as it is. + # Available options `remove` or `compress` or `preserve` + # cephLogStrategy: remove + cephLogStrategy: "" + + # (optional) Prefix to use for naming RBD images. + # If omitted, defaults to "csi-vol-". + # volumeNamePrefix: "foo-bar-" + volumeNamePrefix: "" + + # (optional) Instruct the plugin it has to encrypt the volume + # By default it is disabled. Valid values are "true" or "false". + # A string is expected here, i.e. "true", not true. + # encrypted: "true" + encrypted: "" + + # (optional) Use external key management system for encryption passphrases by + # specifying a unique ID matching KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + encryptionKMSID: "" + + # Add topology constrained pools configuration, if topology based pools + # are setup, and topology constrained provisioning is required. + # For further information read TODO<doc> + # topologyConstrainedPools: | + # [{"poolName":"pool0", + # "dataPool":"ec-pool0" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"east"}, + # {"domainLabel":"zone","value":"zone1"}]}, + # {"poolName":"pool1", + # "dataPool":"ec-pool1" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"east"}, + # {"domainLabel":"zone","value":"zone2"}]}, + # {"poolName":"pool2", + # "dataPool":"ec-pool2" # optional, erasure-coded pool for data + # "domainSegments":[ + # {"domainLabel":"region","value":"west"}, + # {"domainLabel":"zone","value":"zone1"}]} + # ] + topologyConstrainedPools: [] + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink" + mapOptions: "" + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # unmapOptions: "krbd:force;nbd:force" + unmapOptions: "" + + # (optional) stripe unit in bytes + # If set, stripeCount must also be specified + # For defaults, refer to + # https://docs.ceph.com/en/latest/man/8/rbd/#striping + stripeUnit: "" + + # (optional) number of objects to stripe over before looping + # If set, stripeUnit must also be specified + # For defaults, refer to + # https://docs.ceph.com/en/latest/man/8/rbd/#striping + stripeCount: "" + + # (optional) object size in bytes + # If set, must be a power of 2 + objectSize: "" + + # The secrets have to contain Ceph credentials with required access + # to the 'pool'. + provisionerSecret: csi-rbd-secret + # If Namespaces are left empty, the secrets are assumed to be in the + # Release namespace. + provisionerSecretNamespace: "" + controllerExpandSecret: csi-rbd-secret + controllerExpandSecretNamespace: "" + nodeStageSecret: csi-rbd-secret + nodeStageSecretNamespace: "" + # Specify the filesystem type of the volume. If not specified, + # csi-provisioner will set default as `ext4`. + fstype: ext4 + reclaimPolicy: Delete + allowVolumeExpansion: true + mountOptions: [] + # Mount Options + # Example: + # mountOptions: + # - discard + + # Mount the host /etc/selinux inside pods to support + # selinux-enabled filesystems + selinuxMount: true + + secret: + # Specifies whether the secret should be created + create: false + name: csi-rbd-secret + annotations: {} + # Key values correspond to a user name and its key, as defined in the + # ceph cluster. User ID should have required access to the 'pool' + # specified in the storage class + userID: <plaintext ID> + userKey: <Ceph auth key corresponding to userID above> + # Encryption passphrase + encryptionPassphrase: test_passphrase + + # This is a sample configmap that helps define a Ceph configuration as required + # by the CSI plugins. + # Sample ceph.conf available at + # https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed + # documentation is available at + # https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/ + cephconf: | + [global] + auth_cluster_required = cephx + auth_service_required = cephx + auth_client_required = cephx + + # Array of extra objects to deploy with the release + extraDeploy: [] + + ######################################################### + # Variables for 'internal' use please use with caution! # + ######################################################### + + # The filename of the provisioner socket + provisionerSocketFile: csi-provisioner.sock + # The filename of the plugin socket + pluginSocketFile: csi.sock + # kubelet working directory,can be set using `--root-dir` when starting kubelet. + kubeletDir: /var/lib/kubelet + # Host path location for ceph client processes logging, ex: rbd-nbd + cephLogDirHostPath: /var/log/ceph + # Name of the csi-driver + driverName: rbd.csi.ceph.com + # Name of the configmap used for state + configMapName: ceph-csi-config + # Key to use in the Configmap if not config.json + # configMapKey: + # Use an externally provided configmap + externallyManagedConfigmap: false + # Name of the configmap used for ceph.conf + cephConfConfigMapName: ceph-config + # Name of the configmap used for encryption kms configuration + kmsConfigMapName: ceph-csi-encryption-kms-config \ No newline at end of file diff --git a/packs/crossplane-1.16/README.md b/packs/crossplane-1.16/README.md new file mode 100644 index 00000000..83370a54 --- /dev/null +++ b/packs/crossplane-1.16/README.md @@ -0,0 +1,35 @@ +# Crossplane + +Crossplane is an open source Kubernetes extension that transforms your Kubernetes cluster into a universal control plane. + +Crossplane lets you manage anything, anywhere, all through standard Kubernetes APIs. Crossplane can even let you order a pizza directly from Kubernetes. If it has an API, Crossplane can connect to it. + +With Crossplane, platform teams can create new abstractions and custom APIs with the full power of Kubernetes policies, namespaces, role based access controls and more. Crossplane brings all your non-Kubernetes resources under one roof. + +Custom APIs, created by platform teams, allow security and compliance enforcement across resources or clouds, without exposing any complexity to the developers. A single API call can create multiple resources, in multiple clouds and use Kubernetes as the control plane for everything. + +## Prerequisites + +Kuberernetes >= 1.27.0 +## Usage + +Installing a provider creates new Kubernetes resources representing the Provider’s APIs. Installing a provider also creates a Provider pod that’s responsible for reconciling the Provider’s APIs into the Kubernetes cluster. Providers constantly watch the state of the desired managed resources and create any external resources that are missing. + +Install a Provider with a Crossplane Provider object setting the spec.package value to the location of the provider package. Additional providers can be found in the [Upboud Marketplace](https://marketplace.upbound.io/) + +*For Example* +Install the [Palette Provider](https://marketplace.upbound.io/providers/crossplane-contrib/provider-palette/v0.19.2) + +```yaml +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-palette +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-palette:v0.19.2 +``` +## References + +Crossplane Provider Guide - https://docs.crossplane.io/latest/concepts/providers/ +Crossplane Concepts - https://docs.crossplane.io/latest/concepts/ +Upbound Marketplace - https://marketplace.upbound.io/ \ No newline at end of file diff --git a/packs/crossplane-1.16/charts/crossplane-1.16.0.tgz b/packs/crossplane-1.16/charts/crossplane-1.16.0.tgz new file mode 100644 index 00000000..d0ff023a Binary files /dev/null and b/packs/crossplane-1.16/charts/crossplane-1.16.0.tgz differ diff --git a/packs/crossplane-1.16/logo.png b/packs/crossplane-1.16/logo.png new file mode 100644 index 00000000..94280b87 Binary files /dev/null and b/packs/crossplane-1.16/logo.png differ diff --git a/packs/crossplane-1.16/pack.json b/packs/crossplane-1.16/pack.json new file mode 100644 index 00000000..852c0830 --- /dev/null +++ b/packs/crossplane-1.16/pack.json @@ -0,0 +1,18 @@ +{ + "addonType": "system app", + "annotations": { + "source": "community", + "contributor" : "spectrocloud" + }, + "cloudTypes": [ + "all" + ], + "displayName": "Crossplane", + "charts": [ + "charts/crossplane-1.16.0.tgz" + ], + "layer":"addon", + "name": "crossplane", + "version": "1.16.0" + } + \ No newline at end of file diff --git a/packs/crossplane-1.16/presets.yaml b/packs/crossplane-1.16/presets.yaml new file mode 100644 index 00000000..b35fc47b --- /dev/null +++ b/packs/crossplane-1.16/presets.yaml @@ -0,0 +1,21 @@ +presets: + - name: "palette-provider" + displayName: "Enable Spectro Cloud Palette Provider" + group: "Provider" + remove: [] + add: | + charts: + crossplane: + provider: + packages: + - "xpkg.upbound.io/crossplane-contrib/provider-palette:v0.19.2" + - name: "no-provider" + displayName: "Remove Preset Providers" + group: "Provider" + remove: | + charts: + crossplane: + provider: + packages: + - "xpkg.upbound.io/crossplane-contrib/provider-palette:v0.19.2" + add: [] \ No newline at end of file diff --git a/packs/crossplane-1.16/values.yaml b/packs/crossplane-1.16/values.yaml new file mode 100644 index 00000000..eb699d62 --- /dev/null +++ b/packs/crossplane-1.16/values.yaml @@ -0,0 +1,197 @@ +pack: + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "crossplane-system" + content: + images: + - image: xpkg.upbound.io/crossplane/crossplane:v1.16.0 + + +charts: + crossplane: + # helm-docs renders these comments into markdown. Use markdown formatting where + # appropiate. + # + # -- The number of Crossplane pod `replicas` to deploy. + replicas: 1 + + # -- The deployment strategy for the Crossplane and RBAC Manager pods. + deploymentStrategy: RollingUpdate + + image: + # -- Repository for the Crossplane pod image. + repository: xpkg.upbound.io/crossplane/crossplane + # -- The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`. + tag: "" + # -- The image pull policy used for Crossplane and RBAC Manager pods. + pullPolicy: IfNotPresent + + # -- Add `nodeSelectors` to the Crossplane pod deployment. + nodeSelector: {} + # -- Add `tolerations` to the Crossplane pod deployment. + tolerations: [] + # -- Add `affinities` to the Crossplane pod deployment. + affinity: {} + + # -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. + hostNetwork: false + + # -- Specify the `dnsPolicy` to be used by the Crossplane pod. + dnsPolicy: "" + + # -- Add custom `labels` to the Crossplane pod deployment. + customLabels: {} + + # -- Add custom `annotations` to the Crossplane pod deployment. + customAnnotations: {} + + serviceAccount: + # -- Add custom `annotations` to the Crossplane ServiceAccount. + customAnnotations: {} + + # -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the Crossplane pod. + leaderElection: true + # -- Add custom arguments to the Crossplane pod. + args: [] + + provider: + # -- A list of Provider packages to install. + packages: [] + + configuration: + # -- A list of Configuration packages to install. + packages: [] + + function: + # -- A list of Function packages to install + packages: [] + + # -- The imagePullSecret names to add to the Crossplane ServiceAccount. + imagePullSecrets: [] + + registryCaBundleConfig: + # -- The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. + name: "" + # -- The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. + key: "" + + service: + # -- Configure annotations on the service object. Only enabled when webhooks.enabled = true + customAnnotations: {} + + webhooks: + # -- Enable webhooks for Crossplane and installed Provider packages. + enabled: true + + rbacManager: + # -- Deploy the RBAC Manager pod and its required roles. + deploy: true + # -- Don't install aggregated Crossplane ClusterRoles. + skipAggregatedClusterRoles: false + # -- The number of RBAC Manager pod `replicas` to deploy. + replicas: 1 + # -- Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the RBAC Manager pod. + leaderElection: true + # -- Add custom arguments to the RBAC Manager pod. + args: [] + # -- Add `nodeSelectors` to the RBAC Manager pod deployment. + nodeSelector: {} + # -- Add `tolerations` to the RBAC Manager pod deployment. + tolerations: [] + # -- Add `affinities` to the RBAC Manager pod deployment. + affinity: {} + + # -- The PriorityClass name to apply to the Crossplane and RBAC Manager pods. + priorityClassName: "" + + resourcesCrossplane: + limits: + # -- CPU resource limits for the Crossplane pod. + cpu: 500m + # -- Memory resource limits for the Crossplane pod. + memory: 1024Mi + requests: + # -- CPU resource requests for the Crossplane pod. + cpu: 100m + # -- Memory resource requests for the Crossplane pod. + memory: 256Mi + + securityContextCrossplane: + # -- The user ID used by the Crossplane pod. + runAsUser: 65532 + # -- The group ID used by the Crossplane pod. + runAsGroup: 65532 + # -- Enable `allowPrivilegeEscalation` for the Crossplane pod. + allowPrivilegeEscalation: false + # -- Set the Crossplane pod root file system as read-only. + readOnlyRootFilesystem: true + + packageCache: + # -- Set to `Memory` to hold the package cache in a RAM backed file system. Useful for Crossplane development. + medium: "" + # -- The size limit for the package cache. If medium is `Memory` the `sizeLimit` can't exceed Node memory. + sizeLimit: 20Mi + # -- The name of a PersistentVolumeClaim to use as the package cache. Disables the default package cache `emptyDir` Volume. + pvc: "" + # -- The name of a ConfigMap to use as the package cache. Disables the default package cache `emptyDir` Volume. + configMap: "" + + resourcesRBACManager: + limits: + # -- CPU resource limits for the RBAC Manager pod. + cpu: 100m + # -- Memory resource limits for the RBAC Manager pod. + memory: 512Mi + requests: + # -- CPU resource requests for the RBAC Manager pod. + cpu: 100m + # -- Memory resource requests for the RBAC Manager pod. + memory: 256Mi + + securityContextRBACManager: + # -- The user ID used by the RBAC Manager pod. + runAsUser: 65532 + # -- The group ID used by the RBAC Manager pod. + runAsGroup: 65532 + # -- Enable `allowPrivilegeEscalation` for the RBAC Manager pod. + allowPrivilegeEscalation: false + # -- Set the RBAC Manager pod root file system as read-only. + readOnlyRootFilesystem: true + + metrics: + # -- Enable Prometheus path, port and scrape annotations and expose port 8080 for both the Crossplane and RBAC Manager pods. + enabled: false + + # -- Add custom environmental variables to the Crossplane pod deployment. + # Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. + extraEnvVarsCrossplane: {} + + # -- Add custom environmental variables to the RBAC Manager pod deployment. + # Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. + extraEnvVarsRBACManager: {} + + # -- Add a custom `securityContext` to the Crossplane pod. + podSecurityContextCrossplane: {} + + # -- Add a custom `securityContext` to the RBAC Manager pod. + podSecurityContextRBACManager: {} + + # -- Add custom `volumes` to the Crossplane pod. + extraVolumesCrossplane: {} + + # -- Add custom `volumeMounts` to the Crossplane pod. + extraVolumeMountsCrossplane: {} + + # -- To add arbitrary Kubernetes Objects during a Helm Install + extraObjects: [] + # - apiVersion: pkg.crossplane.io/v1alpha1 + # kind: ControllerConfig + # metadata: + # name: aws-config + # annotations: + # eks.amazonaws.com/role-arn: arn:aws:iam::123456789101:role/example + # helm.sh/hook: post-install + # spec: + # podSecurityContext: + # fsGroup: 2000 + diff --git a/packs/crossplane-1.7.0/README.md b/packs/crossplane-1.7.0/README.md new file mode 100644 index 00000000..e71fe631 --- /dev/null +++ b/packs/crossplane-1.7.0/README.md @@ -0,0 +1 @@ +Crossplane Deprecated \ No newline at end of file diff --git a/packs/crossplane-1.7.0/pack.json b/packs/crossplane-1.7.0/pack.json index 250cd7d5..6aa66427 100644 --- a/packs/crossplane-1.7.0/pack.json +++ b/packs/crossplane-1.7.0/pack.json @@ -2,7 +2,8 @@ "addonType": "system app", "annotations": { "source": "community", - "contributor" : "spectrocloud" + "contributor" : "spectrocloud", + "system_state": "deprecated" }, "cloudTypes": [ "all" diff --git a/packs/crossplane-1.7.0/values.yaml b/packs/crossplane-1.7.0/values.yaml index 8c2d25ca..fec8c88b 100644 --- a/packs/crossplane-1.7.0/values.yaml +++ b/packs/crossplane-1.7.0/values.yaml @@ -2,6 +2,9 @@ pack: #The namespace (on the target cluster) to install this chart #When not found, a new namespace will be created namespace: "crossplane" + content: + images: + - image: xpkg.upbound.io/crossplane/crossplane:v1.7.0 charts: crossplane: diff --git a/packs/hello-universe-1.1.2/README.md b/packs/hello-universe-1.1.2/README.md index 0d0c548e..3b1bbf98 100644 --- a/packs/hello-universe-1.1.2/README.md +++ b/packs/hello-universe-1.1.2/README.md @@ -34,6 +34,10 @@ The following parameters are applied to the **hello-universe.yaml** manifest thr | `manifests.dbPassword` | The base64 encoded database password to connect to the API database. | `REPLACE_ME` | No | | `manifests.authToken` | The base64 encoded auth token for the API connection. | `REPLACE_ME` | No | +## Upgrade + +Upgrades from the [hello-universe-1.1.1](../hello-universe-1.1.1/README.md) pack are not supported. If you want to upgrade the pack, you must first remove it from the cluster profile. Then, you can add the upgraded version as a cluster profile layer. + ## Usage The Hello Universe pack has two presets that you can select: diff --git a/packs/hello-universe-1.1.3/README.md b/packs/hello-universe-1.1.3/README.md new file mode 100644 index 00000000..0995f9f6 --- /dev/null +++ b/packs/hello-universe-1.1.3/README.md @@ -0,0 +1,91 @@ +# Hello Universe + +[Hello Universe](https://github.com/spectrocloud/hello-universe) is a demo web application utilized to help users learn more about [Palette](https://docs.spectrocloud.com/introduction) and its features. + +You can deploy it using two preset configurations: +- A standalone front-end application. It provides a click counter that is saved locally and displays Spectro Cloud themed images. +- A three-tier application with a front-end application, API server and PostgreSQL database into a Kubernetes cluster. It provides a click counter that is saved in the deployed database and displays Spectro Cloud themed images. You can read more about this configuration on the Hello Universe [README](https://github.com/spectrocloud/hello-universe?tab=readme-ov-file#reverse-proxy-with-kubernetes). + +## Prerequisites + +- A Palette account. + +- A cluster profile where the Hello Universe pack can be integrated. + +- A Palette cluster with port `:8080` available. If port 8080 is not available, you can set a different port in the **values.yaml** file. + +- If you are using the **Enable Hello Universe API** preset, you will need the `:3000` port available on your cluster too. Check out the [Usage](#usage) section for further details. + +- Ensure sufficient CPU resources within the cluster to allocate a minimum of 500 milliCPU and a maximum of 500 milliCPU per replica. + +## Parameters + +The following parameters are applied to the **hello-universe.yaml** manifest through the **values.yaml** file. Users do not need to take any additional actions regarding these parameters. + +| **Parameter** | **Description** | **Default Value** | **Required** | +| --------------------------------- | ------------------------------------------------------------------------------ | ------------------------------------------- | ------------ | +| `manifests.namespace` | The namespace in which the application will be deployed. | `hello-universe` | Yes | +| `manifests.images.hellouniverse` | The [`hello-universe`](https://github.com/spectrocloud/hello-universe) application image that will be utilized to create the containers. | `ghcr.io/spectrocloud/hello-universe:1.1.3`/ `ghcr.io/spectrocloud/hello-universe:1.1.3-proxy` | Yes | +| `manifests.images.hellouniverseapi` | The [`hello-universe-api`](https://github.com/spectrocloud/hello-universe-api) application image that will be utilized to create the containers. | `ghcr.io/spectrocloud/hello-universe-api:1.0.12` | No | +| `manifests.images.hellouniversedb` | The [`hello-universe-db`](https://github.com/spectrocloud/hello-universe-db) application image that will be utilized to create the containers. | `ghcr.io/spectrocloud/hello-universe-db:1.0.2` | No | +| `manifests.apiEnabled` | The flag that indicates whether to deploy the UI application as standalone or together with the API server. | `false` | Yes | +| `manifests.port` | The cluster port number on which the service will listen for incoming traffic. | `8080` | Yes | +| `manifests.replicas` | The number of Pods to be created. | `1` | Yes | +| `manifests.dbPassword` | The base64 encoded database password to connect to the API database. | `REPLACE_ME` | No | +| `manifests.authToken` | The base64 encoded auth token for the API connection. | `REPLACE_ME` | No | +| `manifests.hello-universe.ui.useTolerations` | Flag to indicate whether to use tolerations for the UI pods. | `false` | No | +| `manifests.hello-universe.api.useTolerations` | Flag to indicate whether to use tolerations for the API pods. | `false` | No | +| `manifests.hello-universe.postgres.useTolerations` | Flag to indicate whether to use tolerations for the Postgres pods. | `false` | No | +| `manifests.hello-universe.ui.tolerations.effect` | The toleration effect to use for the Hello Universe UI pods. The allowed values are `PreferNoSchedule`, `NoSchedule` and `NoExecute`. | `PreferNoSchedule` | No | +| `manifests.hello-universe.api.tolerations.effect` | The toleration effect to use for the Hello Universe API pods. The allowed values are `PreferNoSchedule`, `NoSchedule` and `NoExecute`. | `PreferNoSchedule` | No | +| `manifests.hello-universe.postgres.tolerations.effect` | The toleration effect to use for the Hello Universe Postgres pods. The allowed values are `PreferNoSchedule`, `NoSchedule` and `NoExecute`. | `PreferNoSchedule` | No | +| `manifests.hello-universe.ui.tolerations.key` | The tolerations key to use for the Hello Universe UI pods. | `app` | No | +| `manifests.hello-universe.api.tolerations.key` | The tolerations key to use for the Hello Universe API pods. | `app` | No | +| `manifests.hello-universe.postgres.tolerations.key` | The tolerations key to use for the Hello Universe Postgres pods. | `app` | No | +| `manifests.hello-universe.ui.tolerations.value` | The tolerations value to use for the Hello Universe UI pods. | `ui` | No | +| `manifests.hello-universe.api.tolerations.value` | The tolerations value to use for the Hello Universe API pods. | `api` | No | +| `manifests.hello-universe.postgres.tolerations.value` | The tolerations value to use for the Hello Universe Postgres pods. | `postgres` | No | + +## Upgrade + +Upgrades from the [hello-universe-1.1.1](../hello-universe-1.1.1/README.md) pack are not supported. If you want to upgrade the pack, you must first remove it from the cluster profile. Then, you can add the upgraded version as a cluster profile layer. + +## Usage + +The Hello Universe pack has two presets that you can select: +- **Disable Hello Universe API** configures Hello Universe as a standalone frontend application. This is the default configuration of the pack. +- **Enable Hello Universe API** configures Hello Universe as a three-tier application with a frontend, API server, and Postgres database. + +To utilize the Hello Universe pack, create either a [full Palette cluster profile](https://docs.spectrocloud.com/profiles/cluster-profiles/create-cluster-profiles/create-full-profile) or an [add-on Palette cluster profile](https://docs.spectrocloud.com/profiles/cluster-profiles/create-cluster-profiles/create-addon-profile/) and add the pack to your profile. You can select the preset you wish to deploy on the cluster profile creation page. + +If your infrastructure provider does not offer a native load balancer solution, such as VMware and MAAS, the [MetalLB](https://docs.spectrocloud.com/integrations/metallb) pack must be included to the cluster profile to help the LoadBalancer service specified in the manifest obtain an IP address. + +After defining the cluster profile, use it to deploy a new cluster or attach it as an add-on profile to an existing cluster. + +Once the cluster status displays **Running** and **Healthy**, access the Hello Universe application through the exposed service URL along with the displayed port number. + +### Tolerations + +The Hello Universe pack provides parameters for providing pod tolerations. These make it possible to provide [Kubernetes Taints](https://docs.spectrocloud.com/clusters/cluster-management/taints/#taints). You can only apply toleration for the UI, API, and Postgres pods. + +The parameters are applied using the `Equal` operator as demonstrated below: + +```yaml + tolerations: + - effect: {{ .Values.ui.tolerations.effect }} + key: {{ .Values.ui.tolerations.key }} + operator: Equal + value: {{ .Values.ui.tolerations.value }} +``` + +## References + +- [Hello Universe GitHub Repository](https://github.com/spectrocloud/hello-universe) + +- [Hello Universe API GitHub Repository](https://github.com/spectrocloud/hello-universe-api) + +- [Deploy a Custom Pack Tutorial](https://docs.spectrocloud.com/registries-and-packs/deploy-pack/) + +- [Registries and Packs](https://docs.spectrocloud.com/registries-and-packs/) + +- [Node Labels and Taints](https://docs.spectrocloud.com/clusters/cluster-management/taints/) diff --git a/packs/hello-universe-1.1.3/logo.png b/packs/hello-universe-1.1.3/logo.png new file mode 100644 index 00000000..eeae3434 Binary files /dev/null and b/packs/hello-universe-1.1.3/logo.png differ diff --git a/packs/hello-universe-1.1.3/manifests/hello-universe.yaml b/packs/hello-universe-1.1.3/manifests/hello-universe.yaml new file mode 100644 index 00000000..cfb34574 --- /dev/null +++ b/packs/hello-universe-1.1.3/manifests/hello-universe.yaml @@ -0,0 +1,329 @@ +{{ if not .Values.apiEnabled }} + +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} +--- +apiVersion: v1 +kind: Service +metadata: + name: hello-universe-service + namespace: {{ .Values.namespace }} +spec: + type: LoadBalancer + selector: + app: hello-universe + ports: + - protocol: TCP + port: {{ .Values.port }} + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-universe-deployment + namespace: {{ .Values.namespace }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: hello-universe + template: + metadata: + labels: + app: hello-universe + spec: + containers: + - name: hello-universe + image: {{ .Values.images.hellouniverse }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + resources: + requests: + cpu: "100m" # requesting 100 milliCPU + memory: "512Mi" # requesting 512 MiB of memory + limits: + cpu: "200m" # limiting to 200 milliCPU + memory: "1024Mi" # requesting 1024 MiB of memory + +{{ end }} + +{{ if .Values.apiEnabled }} + +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: service-reader +rules: + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hello-universe-role + namespace: {{ .Values.namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: service-reader-binding +subjects: + - kind: ServiceAccount + name: hello-universe-role + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: service-reader + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: Service +metadata: + name: ui + namespace: {{ .Values.namespace }} +spec: + selector: + app: ui + ports: + - protocol: TCP + name: ui + port: {{ .Values.port }} + targetPort: 8080 + - protocol: TCP + name: api + port: 3000 + targetPort: 3000 + type: LoadBalancer +--- +apiVersion: v1 +kind: Secret +metadata: + name: db-password + namespace: {{ .Values.namespace }} +type: Opaque +data: + db-password: {{ .Values.dbPassword }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-token + namespace: {{ .Values.namespace }} +type: Opaque +data: + auth-token: {{ .Values.authToken }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: postgres + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: {{ .Values.images.hellouniversedb }} + ports: + - containerPort: 5432 + name: postgres + resources: + limits: + memory: "500Mi" + cpu: "500m" + requests: + memory: "500Mi" + cpu: "500m" + {{ if .Values.postgres.useTolerations -}} + tolerations: + - effect: {{ .Values.postgres.tolerations.effect }} + key: {{ .Values.postgres.tolerations.key }} + operator: Equal + value: {{ .Values.postgres.tolerations.value }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: {{ .Values.namespace }} +spec: + selector: + app: postgres + ports: + - protocol: TCP + port: 5432 + targetPort: 5432 + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: api + replicas: 1 + template: + metadata: + labels: + app: api + spec: + serviceAccountName: hello-universe-role + containers: + - name: api + image: {{ .Values.images.hellouniverseapi }} + ports: + - containerPort: 3000 + name: api + env: + - name: db-password + valueFrom: + secretKeyRef: + name: db-password + key: db-password + - name: DB_HOST + value: "postgres.{{ .Values.namespace }}.svc.cluster.local" + - name: PORT + value: "3000" + - name: DB_USER + value: "postgres" + - name: DB_NAME + value: "counter" + - name: DB_ENCRYPTION + value: "disable" + - name: DB_INIT + value: "false" + - name: AUTHORIZATION + value: "true" + resources: + limits: + memory: "500Mi" + cpu: "500m" + requests: + memory: "500Mi" + cpu: "500m" + livenessProbe: + httpGet: + path: /api/v1/health + port: 3000 + initialDelaySeconds: 35 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /api/v1/health + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 3 + {{ if .Values.api.useTolerations -}} + tolerations: + - effect: {{ .Values.api.tolerations.effect }} + key: {{ .Values.api.tolerations.key }} + operator: Equal + value: {{ .Values.api.tolerations.value }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: api + namespace: {{ .Values.namespace }} +spec: + selector: + app: api + ports: + - protocol: TCP + port: 3000 + targetPort: 3000 + type: ClusterIP # If you want to expose the API service, change this value to LoadBalancer. See below for an example. + # type: Loadbalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ui + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: ui + replicas: {{ .Values.replicas }} + template: + metadata: + labels: + app: ui + spec: + serviceAccountName: hello-universe-role + initContainers: + - name: init-container + image: busybox + command: + ["/bin/sh", "-c", "echo 'sleeping for 30 seconds' && sleep 30"] + containers: + - name: ui + image: {{ .Values.images.hellouniverse }} + imagePullPolicy: Always + ports: + - containerPort: 8080 + name: ui + env: + - name: TOKEN + valueFrom: + secretKeyRef: + name: auth-token + key: auth-token + - name: API_URI + # Leave empty and set QUERY_K8S_API to true to leverage a single loadbalancer. + # Otherwise, set to the API service's URL and set QUERY_K8S_API to false + value: "" + - name: SVC_URI + value: "api.{{ .Values.namespace }}.svc.cluster.local:3000" + - name: API_VERSION + value: "1" + - name: QUERY_K8S_API + value: "true" + resources: + limits: + memory: "500Mi" + cpu: "500m" + requests: + memory: "500Mi" + cpu: "500m" + livenessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 40 + periodSeconds: 3 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 35 + periodSeconds: 3 + {{ if .Values.ui.useTolerations -}} + tolerations: + - effect: {{ .Values.ui.tolerations.effect }} + key: {{ .Values.ui.tolerations.key }} + operator: Equal + value: {{ .Values.ui.tolerations.value }} + {{- end }} +{{ end }} diff --git a/packs/hello-universe-1.1.3/pack.json b/packs/hello-universe-1.1.3/pack.json new file mode 100644 index 00000000..22c55c76 --- /dev/null +++ b/packs/hello-universe-1.1.3/pack.json @@ -0,0 +1,15 @@ +{ + "addonType": "app services", + "annotations": { + "source": "community", + "contributor": "spectrocloud" + }, + "cloudTypes": ["all"], + "displayName": "Hello Universe", + "kubeManifests": [ + "manifests/hello-universe.yaml" + ], + "layer": "addon", + "name": "hello-universe", + "version": "1.1.3" +} diff --git a/packs/hello-universe-1.1.3/presets.yaml b/packs/hello-universe-1.1.3/presets.yaml new file mode 100644 index 00000000..c70784dc --- /dev/null +++ b/packs/hello-universe-1.1.3/presets.yaml @@ -0,0 +1,50 @@ +presets: + - name: "disable-api" + displayName: "Disable Hello Universe API" + group: "Backend" + remove: [] + add: | + manifests: + hello-universe: + images: + hellouniverse: ghcr.io/spectrocloud/hello-universe:1.1.3 + apiEnabled: false + namespace: hello-universe + port: 8080 + replicas: 1 + + - name: "enable-api" + displayName: "Enable Hello Universe API" + group: "Backend" + remove: [] + add: | + manifests: + hello-universe: + images: + hellouniverse: ghcr.io/spectrocloud/hello-universe:1.1.3-proxy + hellouniverseapi: ghcr.io/spectrocloud/hello-universe-api:1.0.12 + hellouniversedb: ghcr.io/spectrocloud/hello-universe-db:1.0.2 + apiEnabled: true + namespace: hello-universe + port: 8080 + replicas: 1 + dbPassword: REPLACE_ME # Add base64 encoded password + authToken: REPLACE_ME # Add base64 encoded token + ui: + useTolerations: false + tolerations: + effect: PreferNoSchedule + key: app + value: ui + api: + useTolerations: false + tolerations: + effect: PreferNoSchedule + key: app + value: api + postgres: + useTolerations: false + tolerations: + effect: PreferNoSchedule + key: app + value: postgres diff --git a/packs/hello-universe-1.1.3/schema.yaml b/packs/hello-universe-1.1.3/schema.yaml new file mode 100644 index 00000000..a4609c29 --- /dev/null +++ b/packs/hello-universe-1.1.3/schema.yaml @@ -0,0 +1,104 @@ +manifests.hello-universe.namespace: + # The namespace in which the UI application will be deployed. + # The namespace should be specified as a string. + schema: '{{ required | format "${string}" | hints "Enter the namespace in which the application will be deployed."}}' + +manifests.hello-universe.images.hellouniverse: + # The application image. + # Ensure that the image follows the required format: registry/repository:version. + schema: '{{ required | format "${string}" | hints "Enter the UI application image. Ensure that it follows the required format: registry/repository:version." }}' + +manifests.hello-universe.images.hellouniverseapi: + # The hello-universe-api application image. + # Ensure that the image follows the required format: registry/repository:version. + schema: '{{ format "${string}" | hints "Enter the API application image. Ensure that it follows the required format: registry/repository:version." }}' + +manifests.hello-universe.images.hellouniversedb: + # The hello-universe-db application image. + # Ensure that the image follows the required format: registry/repository:version. + schema: '{{ format "${string}" | hints "Enter the API application image. Ensure that it follows the required format: registry/repository:version." }}' + +manifests.hello-universe.port: + # The cluster port number on which the UI service will listen for incoming traffic. + # The port should be specified as a number. + schema: '{{ required | format "${number}" | hints "Enter the cluster port number on which the UI service will listen for incoming traffic."}}' + +manifests.hello-universe.replicas: + # The number of replicas of the UI application to be created. + # The replicas should be specified as a number. + schema: '{{ required | format "${number}" | hints "Enter the number of replicas of the UI application."}}' + +manifests.hello-universe.dbPassword: + # The database password to connect the hello-universe-api to the hello-universe-db. + # The password should be specified as a base64 encoded string. + schema: '{{ format "${string}" | hints "Enter the base64 encoded database password to connect to."}}' + +manifests.hello-universe.authToken: + # The auth token to connect the hello-universe-api to the hello-universe-db. + # The token should be specified as a base64 encoded string. + schema: '{{ format "${string}" | hints "Enter the base64 encoded auth token to connect to the API with."}}' + +manifests.hello-universe.apiEnabled: + # Flag to indicate whether Hello Universe should be deployed with API server or not. + # The flag should be specified with true or false. + schema: '{{ required | format "${boolean}" | hints "Enter whether to deploy Hello Universe as a three-tier application."}}' + +manifests.hello-universe.ui.useTolerations: + # Flag to indicate whether to use tolerations for the UI pods. + # The flag should be specified with true or false. + schema: '{{ format "${boolean}" | hints "Enter whether to use toleratios for the Hello Universe UI pods."}}' + +manifests.hello-universe.api.useTolerations: + # Flag to indicate whether to use tolerations for the API pods. + # The flag should be specified with true or false. + schema: '{{ format "${boolean}" | hints "Enter whether to use toleratios for the Hello Universe API pods."}}' + +manifests.hello-universe.postgres.useTolerations: + # Flag to indicate whether to use tolerations for the postgres pods. + # The flag should be specified with true or false. + schema: '{{ format "${boolean}" | hints "Enter whether to use toleratios for the Hello Universe API pods."}}' + +manifests.hello-universe.ui.tolerations.effect: + # The toleration effect to use for the Hello Universe UI pods. + # The allowed values are PreferNoSchedule, NoSchedule and NoExecute. + schema: '{{ format "${list:[PreferNoSchedule,NoSchedule,NoExecute]}" | hints "Enter the toleration effect for the UI pods."}}' + +manifests.hello-universe.api.tolerations.effect: + # The effect to use for the Hello Universe API pods. + # The allowed values are PreferNoSchedule, NoSchedule and NoExecute. + schema: '{{ format "${list:[PreferNoSchedule,NoSchedule,NoExecute]}" | hints "Enter the toleration effect for the API pods."}}' + +manifests.hello-universe.postgres.tolerations.effect: + # The effect to use for the Hello Universe API pods. + # The allowed values are PreferNoSchedule, NoSchedule and NoExecute. + schema: '{{ format "${list:[PreferNoSchedule,NoSchedule,NoExecute]}" | hints "Enter the toleration effect for the Postgres pods."}}' + +manifests.hello-universe.ui.tolerations.key: + # The tolerations key to use for the Hello Universe UI pods. + # The key should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration key for the UI pods."}}' + +manifests.hello-universe.api.tolerations.key: + # The tolerations key to use for the Hello Universe API pods. + # The key should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration key for the API pods."}}' + +manifests.hello-universe.postgres.tolerations.key: + # The tolerations key to use for the Hello Universe Postgres pods. + # The key should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration key for the Postgres pods."}}' + +manifests.hello-universe.ui.tolerations.value: + # The tolerations value to use for the Hello Universe UI pods. + # The value should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration value for the UI pods."}}' + +manifests.hello-universe.api.tolerations.value: + # The tolerations value to use for the Hello Universe API pods. + # The value should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration value for the API pods."}}' + +manifests.hello-universe.postgres.tolerations.value: + # The tolerations value to use for the Hello Universe Postgres pods. + # The value should be specified as a string. + schema: '{{ format "${string}" | hints "Enter the toleration value for the Postgres pods."}}' diff --git a/packs/hello-universe-1.1.3/values.yaml b/packs/hello-universe-1.1.3/values.yaml new file mode 100644 index 00000000..3ba129d6 --- /dev/null +++ b/packs/hello-universe-1.1.3/values.yaml @@ -0,0 +1,16 @@ +# spectrocloud.com/enabled-presets: Backend:disable-api + +pack: + content: + images: + - image: ghcr.io/spectrocloud/hello-universe:1.1.3 + + +manifests: + hello-universe: + images: + hellouniverse: ghcr.io/spectrocloud/hello-universe:1.1.3 + apiEnabled: false + namespace: hello-universe + port: 8080 + replicas: 1 diff --git a/packs/kyverno-1.12.2/README.md b/packs/kyverno-1.12.2/README.md new file mode 100644 index 00000000..d6d4b494 --- /dev/null +++ b/packs/kyverno-1.12.2/README.md @@ -0,0 +1,45 @@ +# Kyverno + +Kyverno (Greek for “govern”) is a policy engine designed specifically for Kubernetes. Some of its many features include: + + policies as Kubernetes resources (no new language to learn!) + validate, mutate, generate, or cleanup (remove) any resource + verify container images for software supply chain security + inspect image metadata + match resources using label selectors and wildcards + validate and mutate using overlays (like Kustomize!) + synchronize configurations across Namespaces + block non-conformant resources using admission controls, or report policy violations + self-service reports (no proprietary audit log!) + self-service policy exceptions + test policies and validate resources using the Kyverno CLI, in your CI/CD pipeline, before applying to your cluster + manage policies as code using familiar tools like git and kustomize + +Kyverno allows cluster administrators to manage environment specific configurations independently of workload configurations and enforce configuration best practices for their clusters. Kyverno can be used to scan existing workloads for best practices, or can be used to enforce best practices by blocking or mutating API requests.our applications, APIs, or other resources while also offloading network ingress and middleware execution to ngrok's platform. + +## Prerequisites + +- kubernetes version >= 1.26.0 + +## Usage +To use the Kyverno pack, first create a new [add-on cluster profile](https://docs.spectrocloud.com/profiles/cluster-profiles/create-cluster-profiles/create-addon-profile/), search for the **kyverno** Kyverno pack: + + +A Kyverno policy is a collection of rules. Each rule consists of a [`match`](https://kyverno.io/docs/writing-policies/match-exclude/) declaration, an optional [`exclude`](https://kyverno.io/docs/writing-policies/match-exclude/) declaration, and one of a [`validate`](https://kyverno.io/docs/writing-policies/validate/), [`mutate`](https://kyverno.io/docs/writing-policies/mutate/), [`generate`](https://kyverno.io/docs/writing-policies/generate/), or [`verifyImages`](https://kyverno.io/docs/writing-policies/verify-images) declaration. Each rule can contain only a single `validate`, `mutate`, `generate`, or `verifyImages` child declaration. + +<img src="https://kyverno.io/images/Kyverno-Policy-Structure.png" alt="Kyverno Policy" width="65%"/> +<br/> +<br/> + +Policies can be defined as cluster-wide resources (using the kind `ClusterPolicy`) or namespaced resources (using the kind `Policy`). As expected, namespaced policies will only apply to resources within the namespace in which they are defined while cluster-wide policies are applied to matching resources across all namespaces. Otherwise, there is no difference between the two types. + +Additional policy types include [Policy Exceptions](https://kyverno.io/docs/writing-policies/exceptions/) and [Cleanup Policies](https://kyverno.io/docs/writing-policies/cleanup/) which are separate resources and described further in the documentation. + +Learn more about [Applying Policies](https://kyverno.io/docs/applying-policies/) and [Writing Policies](https://kyverno.io/docs/writing-policies/) in the upcoming chapters. + + +## References + +- [Kyverno Docs](https://kyverno.io/docs/introduction/) +- [Kyverno](https://kyverno.io/) +- [Kyverno Github](https://github.com/kyverno/kyverno/) \ No newline at end of file diff --git a/packs/kyverno-1.12.2/charts/kyverno-3.2.3.tgz b/packs/kyverno-1.12.2/charts/kyverno-3.2.3.tgz new file mode 100644 index 00000000..6cc9d97d Binary files /dev/null and b/packs/kyverno-1.12.2/charts/kyverno-3.2.3.tgz differ diff --git a/packs/kyverno-1.12.2/logo.png b/packs/kyverno-1.12.2/logo.png new file mode 100644 index 00000000..ecf4c004 Binary files /dev/null and b/packs/kyverno-1.12.2/logo.png differ diff --git a/packs/kyverno-1.12.2/pack.json b/packs/kyverno-1.12.2/pack.json new file mode 100644 index 00000000..3a3aed76 --- /dev/null +++ b/packs/kyverno-1.12.2/pack.json @@ -0,0 +1,18 @@ +{ + "addonType": "system app", + "annotations": { + "source": "community", + "contributor" : "spectrocloud" + }, + "cloudTypes": [ + "all" + ], + "displayName": "Kyverno", + "charts": [ + "charts/kyverno-3.2.3.tgz" + ], + "layer":"addon", + "name": "kyverno", + "version": "1.12.2" + } + \ No newline at end of file diff --git a/packs/kyverno-1.12.2/presets.yaml b/packs/kyverno-1.12.2/presets.yaml new file mode 100644 index 00000000..e69de29b diff --git a/packs/kyverno-1.12.2/schema.yaml b/packs/kyverno-1.12.2/schema.yaml new file mode 100644 index 00000000..e69de29b diff --git a/packs/kyverno-1.12.2/values.yaml b/packs/kyverno-1.12.2/values.yaml new file mode 100644 index 00000000..1e3256d6 --- /dev/null +++ b/packs/kyverno-1.12.2/values.yaml @@ -0,0 +1,2166 @@ +pack: + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "kyverno" + content: + images: + - image: ghcr.io/kyverno/background-controller:v1.12.2 + - image: ghcr.io/kyverno/kyvernopre:v1.12.2 + - image: ghcr.io/kyverno/cleanup-controller:v1.12.2 + - image: ghcr.io/kyverno/reports-controller:v1.12.2 + - image: ghcr.io/kyverno/kyverno-cli:v1.12.2 + - image: ghcr.io/kyverno/kyverno:v1.12.2 + +charts: + kyverno: + # -- Internal settings used with `helm template` to generate install manifest + # @ignored + templating: + enabled: false + debug: false + version: ~ + + global: + + image: + # -- (string) Global value that allows to set a single image registry across all deployments. + # When set, it will override any values set under `.image.registry` across the chart. + registry: ~ + + caCertificates: + # -- Global CA certificates to use with Kyverno deployments + # This value is expected to be one large string of CA certificates + # Individual controller values will override this global value + data: ~ + + # -- Global value to set single volume to be mounted for CA certificates for all deployments. + # Not used when `.Values.global.caCertificates.data` is defined + # Individual controller values will override this global value + volume: {} + # Example to use hostPath: + # hostPath: + # path: /etc/pki/tls/ca-certificates.crt + # type: File + + # -- Additional container environment variables to apply to all containers and init containers + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + # -- Global node labels for pod assignment. Non-global values will override the global value. + nodeSelector: {} + + # -- (string) Override the name of the chart + nameOverride: ~ + + # -- (string) Override the expanded name of the chart + fullnameOverride: ~ + + # -- (string) Override the namespace the chart deploys to + namespaceOverride: ~ + + upgrade: + # -- Upgrading from v2 to v3 is not allowed by default, set this to true once changes have been reviewed. + fromV2: false + + apiVersionOverride: + # -- (string) Override api version used to create `PodDisruptionBudget`` resources. + # When not specified the chart will check if `policy/v1/PodDisruptionBudget` is available to + # determine the api version automatically. + podDisruptionBudget: ~ + + # CRDs configuration + crds: + + # -- Whether to have Helm install the Kyverno CRDs, if the CRDs are not installed by Helm, they must be added before policies can be created + install: true + + groups: + + # -- Install CRDs in group `kyverno.io` + kyverno: + admissionreports: true + backgroundscanreports: true + cleanuppolicies: true + clusteradmissionreports: true + clusterbackgroundscanreports: true + clustercleanuppolicies: true + clusterpolicies: true + globalcontextentries: true + policies: true + policyexceptions: true + updaterequests: true + + # -- Install CRDs in group `reports.kyverno.io` + reports: + clusterephemeralreports: true + ephemeralreports: true + + # -- Install CRDs in group `wgpolicyk8s.io` + wgpolicyk8s: + clusterpolicyreports: true + policyreports: true + + # -- Additional CRDs annotations + annotations: {} + # argocd.argoproj.io/sync-options: Replace=true + # strategy.spinnaker.io/replace: 'true' + + # -- Additional CRDs labels + customLabels: {} + + migration: + + # -- Enable CRDs migration using helm post upgrade hook + enabled: true + + # -- Resources to migrate + resources: + - admissionreports.kyverno.io + - backgroundscanreports.kyverno.io + - cleanuppolicies.kyverno.io + - clusteradmissionreports.kyverno.io + - clusterbackgroundscanreports.kyverno.io + - clustercleanuppolicies.kyverno.io + - clusterpolicies.kyverno.io + - globalcontextentries.kyverno.io + - policies.kyverno.io + - policyexceptions.kyverno.io + - updaterequests.kyverno.io + + image: + # -- (string) Image registry + registry: ghcr.io + # -- (string) Image repository + repository: kyverno/kyverno-cli + # -- (string) Image tag + # Defaults to appVersion in Chart.yaml if omitted + tag: ~ + # -- (string) Image pull policy + pullPolicy: IfNotPresent + + # -- Image pull secrets + imagePullSecrets: [] + # - name: secretName + + # -- Security context for the pod + podSecurityContext: {} + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + # -- Pod anti affinity constraints. + podAntiAffinity: {} + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Pod labels. + podLabels: {} + + # -- Pod annotations. + podAnnotations: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Security context for the hook containers + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # Configuration + config: + + # -- Create the configmap. + create: true + + # -- (string) The configmap name (required if `create` is `false`). + name: ~ + + # -- Additional annotations to add to the configmap. + annotations: {} + + # -- Enable registry mutation for container images. Enabled by default. + enableDefaultRegistryMutation: true + + # -- The registry hostname used for the image mutation. + defaultRegistry: docker.io + + # -- Exclude groups + excludeGroups: + - system:nodes + + # -- Exclude usernames + excludeUsernames: [] + # - '!system:kube-scheduler' + + # -- Exclude roles + excludeRoles: [] + + # -- Exclude roles + excludeClusterRoles: [] + + # -- Generate success events. + generateSuccessEvents: false + + # -- Resource types to be skipped by the Kyverno policy engine. + # Make sure to surround each entry in quotes so that it doesn't get parsed as a nested YAML list. + # These are joined together without spaces, run through `tpl`, and the result is set in the config map. + # @default -- See [values.yaml](values.yaml) + resourceFilters: + - '[Event,*,*]' + - '[*/*,kube-system,*]' + - '[*/*,kube-public,*]' + - '[*/*,kube-node-lease,*]' + - '[Node,*,*]' + - '[Node/*,*,*]' + - '[APIService,*,*]' + - '[APIService/*,*,*]' + - '[TokenReview,*,*]' + - '[SubjectAccessReview,*,*]' + - '[SelfSubjectAccessReview,*,*]' + - '[Binding,*,*]' + - '[Pod/binding,*,*]' + - '[ReplicaSet,*,*]' + - '[ReplicaSet/*,*,*]' + - '[AdmissionReport,*,*]' + - '[AdmissionReport/*,*,*]' + - '[ClusterAdmissionReport,*,*]' + - '[ClusterAdmissionReport/*,*,*]' + - '[BackgroundScanReport,*,*]' + - '[BackgroundScanReport/*,*,*]' + - '[ClusterBackgroundScanReport,*,*]' + - '[ClusterBackgroundScanReport/*,*,*]' + # exclude resources from the chart + - '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}]' + - '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}:core]' + - '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}:additional]' + - '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}]' + - '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}:core]' + - '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}:additional]' + - '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}]' + - '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}:core]' + - '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}:additional]' + - '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}]' + - '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}:core]' + - '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}:additional]' + - '[ClusterRoleBinding,*,{{ template "kyverno.admission-controller.roleName" . }}]' + - '[ClusterRoleBinding,*,{{ template "kyverno.background-controller.roleName" . }}]' + - '[ClusterRoleBinding,*,{{ template "kyverno.cleanup-controller.roleName" . }}]' + - '[ClusterRoleBinding,*,{{ template "kyverno.reports-controller.roleName" . }}]' + - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]' + - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]' + - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]' + - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]' + - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]' + - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]' + - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]' + - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]' + - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.roleName" . }}]' + - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.roleName" . }}]' + - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.roleName" . }}]' + - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.roleName" . }}]' + - '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.roleName" . }}]' + - '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.roleName" . }}]' + - '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.roleName" . }}]' + - '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.roleName" . }}]' + - '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.configMapName" . }}]' + - '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.metricsConfigMapName" . }}]' + - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]' + - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]' + - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]' + - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]' + - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]' + - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]' + - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]' + - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]' + - '[Job,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]' + - '[Job/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]' + - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]' + - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]' + - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]' + - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.admission-controller.name" . }}]' + - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.background-controller.name" . }}]' + - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.reports-controller.name" . }}]' + - '[Secret,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.*]' + - '[Secret,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.*]' + + # -- Defines the `namespaceSelector` in the webhook configurations. + # Note that it takes a list of `namespaceSelector` and/or `objectSelector` in the JSON format, and only the first element + # will be forwarded to the webhook configurations. + # The Kyverno namespace is excluded if `excludeKyvernoNamespace` is `true` (default) + webhooks: + # Exclude namespaces + - namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kube-system + # Exclude objects + # - objectSelector: + # matchExpressions: + # - key: webhooks.kyverno.io/exclude + # operator: DoesNotExist + + # -- Defines annotations to set on webhook configurations. + webhookAnnotations: + # Example to disable admission enforcer on AKS: + 'admissions.enforcer/disabled': 'true' + + # -- Defines labels to set on webhook configurations. + webhookLabels: {} + # Example to adopt webhook resources in ArgoCD: + # 'argocd.argoproj.io/instance': 'kyverno' + + # -- Defines match conditions to set on webhook configurations (requires Kubernetes 1.27+). + matchConditions: [] + + # -- Exclude Kyverno namespace + # Determines if default Kyverno namespace exclusion is enabled for webhooks and resourceFilters + excludeKyvernoNamespace: true + + # -- resourceFilter namespace exclude + # Namespaces to exclude from the default resourceFilters + resourceFiltersExcludeNamespaces: [] + + # -- resourceFilters exclude list + # Items to exclude from config.resourceFilters + resourceFiltersExclude: [] + + # -- resourceFilter namespace include + # Namespaces to include to the default resourceFilters + resourceFiltersIncludeNamespaces: [] + + # -- resourceFilters include list + # Items to include to config.resourceFilters + resourceFiltersInclude: [] + + # Metrics configuration + metricsConfig: + + # -- Create the configmap. + create: true + + # -- (string) The configmap name (required if `create` is `false`). + name: ~ + + # -- Additional annotations to add to the configmap. + annotations: {} + + namespaces: + + # -- List of namespaces to capture metrics for. + include: [] + + # -- list of namespaces to NOT capture metrics for. + exclude: [] + + # -- (string) Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics. WARNING: This flag is not working since Kyverno 1.8.0 + metricsRefreshInterval: ~ + # metricsRefreshInterval: 24h + + # -- (list) Configures the bucket boundaries for all Histogram metrics, changing this configuration requires restart of the kyverno admission controller + bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30] + + # -- (map) Configures the exposure of individual metrics, by default all metrics and all labels are exported, changing this configuration requires restart of the kyverno admission controller + metricsExposure: ~ + # metricsExposure: + # kyverno_policy_execution_duration_seconds: + # disabledLabelDimensions: ["resource_kind", "resource_namespace", "resource_request_operation"] + # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] + # kyverno_admission_review_duration_seconds: + # enabled: false + + # -- Image pull secrets for image verification policies, this will define the `--imagePullSecrets` argument + imagePullSecrets: {} + # regcred: + # registry: foo.example.com + # username: foobar + # password: secret + # regcred2: + # registry: bar.example.com + # username: barbaz + # password: secret2 + + # -- Existing Image pull secrets for image verification policies, this will define the `--imagePullSecrets` argument + existingImagePullSecrets: [] + # - test-registry + # - other-test-registry + + # Tests configuration + test: + + image: + # -- (string) Image registry + registry: ~ + # -- Image repository + repository: busybox + # -- Image tag + # Defaults to `latest` if omitted + tag: '1.35' + # -- (string) Image pull policy + # Defaults to image.pullPolicy if omitted + pullPolicy: ~ + + resources: + # -- Pod resource limits + limits: + cpu: 100m + memory: 256Mi + # -- Pod resource requests + requests: + cpu: 10m + memory: 64Mi + + # -- Security context for the test containers + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # -- Additional labels + customLabels: {} + + webhooksCleanup: + # -- Create a helm pre-delete hook to cleanup webhooks. + enabled: true + + image: + # -- (string) Image registry + registry: ~ + # -- Image repository + repository: bitnami/kubectl + # -- Image tag + # Defaults to `latest` if omitted + tag: '1.28.5' + # -- (string) Image pull policy + # Defaults to image.pullPolicy if omitted + pullPolicy: ~ + + # -- Image pull secrets + imagePullSecrets: [] + + # -- Security context for the pod + podSecurityContext: {} + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + # -- Pod anti affinity constraints. + podAntiAffinity: {} + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Pod labels. + podLabels: {} + + # -- Pod annotations. + podAnnotations: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Security context for the hook containers + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + policyReportsCleanup: + # -- Create a helm post-upgrade hook to cleanup the old policy reports. + enabled: true + + image: + # -- (string) Image registry + registry: ~ + # -- Image repository + repository: bitnami/kubectl + # -- Image tag + # Defaults to `latest` if omitted + tag: '1.28.5' + # -- (string) Image pull policy + # Defaults to image.pullPolicy if omitted + pullPolicy: ~ + + # -- Image pull secrets + imagePullSecrets: [] + # - name: secretName + + # -- Security context for the pod + podSecurityContext: {} + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + # -- Pod anti affinity constraints. + podAntiAffinity: {} + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Pod labels. + podLabels: {} + + # -- Pod annotations. + podAnnotations: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Security context for the hook containers + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + grafana: + # -- Enable grafana dashboard creation. + enabled: false + + # -- Configmap name template. + configMapName: '{{ include "kyverno.fullname" . }}-grafana' + + # -- (string) Namespace to create the grafana dashboard configmap. + # If not set, it will be created in the same namespace where the chart is deployed. + namespace: ~ + + # -- Grafana dashboard configmap annotations. + annotations: {} + + # -- Grafana dashboard configmap labels + labels: + grafana_dashboard: "1" + + # -- create GrafanaDashboard custom resource referencing to the configMap. + # according to https://grafana-operator.github.io/grafana-operator/docs/examples/dashboard_from_configmap/readme/ + grafanaDashboard: + create: false + matchLabels: + dashboards: "grafana" + + # Features configuration + features: + admissionReports: + # -- Enables the feature + enabled: true + aggregateReports: + # -- Enables the feature + enabled: true + policyReports: + # -- Enables the feature + enabled: true + validatingAdmissionPolicyReports: + # -- Enables the feature + enabled: false + autoUpdateWebhooks: + # -- Enables the feature + enabled: true + backgroundScan: + # -- Enables the feature + enabled: true + # -- Number of background scan workers + backgroundScanWorkers: 2 + # -- Background scan interval + backgroundScanInterval: 1h + # -- Skips resource filters in background scan + skipResourceFilters: true + configMapCaching: + # -- Enables the feature + enabled: true + deferredLoading: + # -- Enables the feature + enabled: true + dumpPayload: + # -- Enables the feature + enabled: false + forceFailurePolicyIgnore: + # -- Enables the feature + enabled: false + generateValidatingAdmissionPolicy: + # -- Enables the feature + enabled: false + globalContext: + # -- Maximum allowed response size from API Calls. A value of 0 bypasses checks (not recommended) + maxApiCallResponseLength: 2000000 + logging: + # -- Logging format + format: text + # -- Logging verbosity + verbosity: 2 + omitEvents: + # -- Events which should not be emitted (possible values `PolicyViolation`, `PolicyApplied`, `PolicyError`, and `PolicySkipped`) + eventTypes: + - PolicyApplied + - PolicySkipped + # - PolicyViolation + # - PolicyError + policyExceptions: + # -- Enables the feature + enabled: true + # -- Restrict policy exceptions to a single namespace + namespace: '' + protectManagedResources: + # -- Enables the feature + enabled: false + registryClient: + # -- Allow insecure registry + allowInsecure: false + # -- Enable registry client helpers + credentialHelpers: + - default + - google + - amazon + - azure + - github + reports: + # -- Reports chunk size + chunkSize: 0 + ttlController: + # -- Reconciliation interval for the label based cleanup manager + reconciliationInterval: 1m + tuf: + # -- Enables the feature + enabled: false + # -- (string) Tuf root + root: ~ + # -- (string) Tuf mirror + mirror: ~ + + # Cleanup cronjobs to prevent internal resources from stacking up in the cluster + cleanupJobs: + + admissionReports: + + # -- Enable cleanup cronjob + enabled: true + + # -- Maximum number of retries before considering a Job as failed. Defaults to 3. + backoffLimit: 3 + + image: + # -- (string) Image registry + registry: ~ + # -- Image repository + repository: bitnami/kubectl + # -- Image tag + # Defaults to `latest` if omitted + tag: '1.28.5' + # -- (string) Image pull policy + # Defaults to image.pullPolicy if omitted + pullPolicy: ~ + + # -- Image pull secrets + imagePullSecrets: [] + # - name: secretName + + # -- Cronjob schedule + schedule: '*/10 * * * *' + + # -- Reports threshold, if number of reports are above this value the cronjob will start deleting them + threshold: 10000 + + # -- Cronjob history + history: + success: 1 + failure: 1 + + # -- Security context for the pod + podSecurityContext: {} + + # -- Security context for the containers + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # -- Pod PriorityClassName + priorityClassName: "" + + # -- Job resources + resources: {} + + # -- List of node taints to tolerate + tolerations: [] + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- Pod Annotations + podAnnotations: {} + + # -- Pod labels + podLabels: {} + + # -- Pod anti affinity constraints. + podAntiAffinity: {} + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + clusterAdmissionReports: + + # -- Enable cleanup cronjob + enabled: true + + # -- Maximum number of retries before considering a Job as failed. Defaults to 3. + backoffLimit: 3 + + image: + # -- (string) Image registry + registry: ~ + # -- Image repository + repository: bitnami/kubectl + # -- Image tag + # Defaults to `latest` if omitted + tag: '1.28.5' + # -- (string) Image pull policy + # Defaults to image.pullPolicy if omitted + pullPolicy: ~ + + # -- Image pull secrets + imagePullSecrets: [] + # - name: secretName + + # -- Cronjob schedule + schedule: '*/10 * * * *' + + # -- Reports threshold, if number of reports are above this value the cronjob will start deleting them + threshold: 10000 + + # -- Cronjob history + history: + success: 1 + failure: 1 + + # -- Security context for the pod + podSecurityContext: {} + + # -- Security context for the containers + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # -- Pod PriorityClassName + priorityClassName: "" + + # -- Job resources + resources: {} + + # -- List of node taints to tolerate + tolerations: [] + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- Pod Annotations + podAnnotations: {} + + # -- Pod Labels + podLabels: {} + + # -- Pod anti affinity constraints. + podAntiAffinity: {} + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # Admission controller configuration + admissionController: + + # -- Overrides features defined at the root level + featuresOverride: {} + + rbac: + # -- Create RBAC resources + create: true + + serviceAccount: + # -- The ServiceAccount name + name: + + # -- Annotations for the ServiceAccount + annotations: {} + # example.com/annotation: value + + coreClusterRole: + # -- Extra resource permissions to add in the core cluster role. + # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. + # @default -- See [values.yaml](values.yaml) + extraResources: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch + + clusterRole: + # -- Extra resource permissions to add in the cluster role + extraResources: [] + # - apiGroups: + # - '' + # resources: + # - pods + # verbs: + # - create + # - update + # - delete + + # -- Create self-signed certificates at deployment time. + # The certificates won't be automatically renewed if this is set to `true`. + createSelfSignedCert: false + + # -- (int) Desired number of pods + replicas: ~ + + # -- The number of revisions to keep + revisionHistoryLimit: 10 + + # -- Additional labels to add to each pod + podLabels: {} + # example.com/label: foo + + # -- Additional annotations to add to each pod + podAnnotations: {} + # example.com/annotation: foo + + # -- Deployment update strategy. + # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # @default -- See [values.yaml](values.yaml) + updateStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 40% + type: RollingUpdate + + # -- Optional priority class + priorityClassName: '' + + # -- Change `apiPriorityAndFairness` to `true` if you want to insulate the API calls made by Kyverno admission controller activities. + # This will help ensure Kyverno stability in busy clusters. + # Ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/ + apiPriorityAndFairness: false + + # -- Priority level configuration. + # The block is directly forwarded into the priorityLevelConfiguration, so you can use whatever specification you want. + # ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration + # @default -- See [values.yaml](values.yaml) + priorityLevelConfigurationSpec: + type: Limited + limited: + nominalConcurrencyShares: 10 + limitResponse: + queuing: + queueLengthLimit: 50 + type: Queue + + # -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. + # Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. + # Update the `dnsPolicy` accordingly as well to suit the host network mode. + hostNetwork: false + + # -- admissionController webhook server port + # in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to + webhookServer: + port: 9443 + + # -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. + # In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. + # For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. + dnsPolicy: ClusterFirst + + # -- Startup probe. + # The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + startupProbe: + httpGet: + path: /health/liveness + port: 9443 + scheme: HTTPS + failureThreshold: 20 + initialDelaySeconds: 2 + periodSeconds: 6 + + # -- Liveness probe. + # The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + livenessProbe: + httpGet: + path: /health/liveness + port: 9443 + scheme: HTTPS + initialDelaySeconds: 15 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 2 + successThreshold: 1 + + # -- Readiness Probe. + # The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + readinessProbe: + httpGet: + path: /health/readiness + port: 9443 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + antiAffinity: + # -- Pod antiAffinities toggle. + # Enabled by default but can be disabled if you want to schedule pods to the same node. + enabled: true + + # -- Pod anti affinity constraints. + # @default -- See [values.yaml](values.yaml) + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - admission-controller + topologyKey: kubernetes.io/hostname + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Topology spread constraints. + topologySpreadConstraints: [] + + # -- Security context for the pod + podSecurityContext: {} + + podDisruptionBudget: + # -- Enable PodDisruptionBudget. + # Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. + enabled: false + # -- Configures the minimum available pods for disruptions. + # Cannot be used if `maxUnavailable` is set. + minAvailable: 1 + # -- Configures the maximum unavailable pods for disruptions. + # Cannot be used if `minAvailable` is set. + maxUnavailable: + + # -- A writable volume to use for the TUF root initialization. + tufRootMountPath: /.sigstore + + # -- Volume to be mounted in pods for TUF/cosign work. + sigstoreVolume: + emptyDir: {} + + caCertificates: + # -- CA certificates to use with Kyverno deployments + # This value is expected to be one large string of CA certificates + data: ~ + # -- Volume to be mounted for CA certificates + # Not used when `.Values.admissionController.caCertificates.data` is defined + volume: {} + # Example to use hostPath: + # hostPath: + # path: /etc/pki/tls/ca-certificates.crt + # type: File + + # -- Image pull secrets + imagePullSecrets: [] + # - secretName + + initContainer: + + image: + # -- Image registry + registry: ghcr.io + # -- Image repository + repository: kyverno/kyvernopre + # -- (string) Image tag + # If missing, defaults to image.tag + tag: ~ + # -- (string) Image pull policy + # If missing, defaults to image.pullPolicy + pullPolicy: ~ + + resources: + # -- Pod resource limits + limits: + cpu: 100m + memory: 256Mi + # -- Pod resource requests + requests: + cpu: 10m + memory: 64Mi + + # -- Container security context + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # -- Additional container args. + extraArgs: {} + + # -- Additional container environment variables. + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + container: + + image: + # -- Image registry + registry: ghcr.io + # -- Image repository + repository: kyverno/kyverno + # -- (string) Image tag + # Defaults to appVersion in Chart.yaml if omitted + tag: ~ + # -- Image pull policy + pullPolicy: IfNotPresent + + resources: + # -- Pod resource limits + limits: + memory: 384Mi + # -- Pod resource requests + requests: + cpu: 100m + memory: 128Mi + + # -- Container security context + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # -- Additional container args. + extraArgs: {} + + # -- Additional container environment variables. + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + # -- Array of extra init containers + extraInitContainers: [] + # - name: init-container + # image: busybox + # command: ['sh', '-c', 'echo Hello'] + + # -- Array of extra containers to run alongside kyverno + extraContainers: [] + # - name: myapp-container + # image: busybox + # command: ['sh', '-c', 'echo Hello && sleep 3600'] + + service: + # -- Service port. + port: 443 + # -- Service type. + type: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: + # -- Service annotations. + annotations: {} + + metricsService: + # -- Create service. + create: true + # -- Service port. + # Kyverno's metrics server will be exposed at this port. + port: 8000 + # -- Service type. + type: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: + # -- Service annotations. + annotations: {} + + networkPolicy: + # -- When true, use a NetworkPolicy to allow ingress to the webhook + # This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. + enabled: false + # -- A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. + ingressFrom: [] + + serviceMonitor: + # -- Create a `ServiceMonitor` to collect Prometheus metrics. + enabled: false + # -- Additional labels + additionalLabels: {} + # -- (string) Override namespace + namespace: ~ + # -- Interval to scrape metrics + interval: 30s + # -- Timeout if metrics can't be retrieved in given time interval + scrapeTimeout: 25s + # -- Is TLS required for endpoint + secure: false + # -- TLS Configuration for endpoint + tlsConfig: {} + # -- RelabelConfigs to apply to samples before scraping + relabelings: [] + # -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + tracing: + # -- Enable tracing + enabled: false + # -- Traces receiver address + address: + # -- Traces receiver port + port: + # -- Traces receiver credentials + creds: '' + + metering: + # -- Disable metrics export + disabled: false + # -- Otel configuration, can be `prometheus` or `grpc` + config: prometheus + # -- Prometheus endpoint port + port: 8000 + # -- Otel collector endpoint + collector: '' + # -- Otel collector credentials + creds: '' + + profiling: + # -- Enable profiling + enabled: false + # -- Profiling endpoint port + port: 6060 + # -- Service type. + serviceType: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: + + # Background controller configuration + backgroundController: + + # -- Overrides features defined at the root level + featuresOverride: {} + + # -- Enable background controller. + enabled: true + + rbac: + # -- Create RBAC resources + create: true + + serviceAccount: + # -- Service account name + name: + + # -- Annotations for the ServiceAccount + annotations: {} + # example.com/annotation: value + + coreClusterRole: + # -- Extra resource permissions to add in the core cluster role. + # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. + # @default -- See [values.yaml](values.yaml) + extraResources: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + - ingressclasses + - networkpolicies + verbs: + - create + - update + - patch + - delete + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - update + - patch + - delete + - apiGroups: + - '' + resources: + - configmaps + - secrets + - resourcequotas + - limitranges + verbs: + - create + - update + - patch + - delete + + clusterRole: + # -- Extra resource permissions to add in the cluster role + extraResources: [] + # - apiGroups: + # - '' + # resources: + # - pods + # verbs: + # - create + # - update + # - delete + # - patch + + image: + # -- Image registry + registry: ghcr.io + # -- Image repository + repository: kyverno/background-controller + # -- Image tag + # Defaults to appVersion in Chart.yaml if omitted + tag: ~ + # -- Image pull policy + pullPolicy: IfNotPresent + + # -- Image pull secrets + imagePullSecrets: [] + # - secretName + + # -- (int) Desired number of pods + replicas: ~ + + # -- The number of revisions to keep + revisionHistoryLimit: 10 + + # -- Additional labels to add to each pod + podLabels: {} + # example.com/label: foo + + # -- Additional annotations to add to each pod + podAnnotations: {} + # example.com/annotation: foo + + # -- Deployment update strategy. + # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # @default -- See [values.yaml](values.yaml) + updateStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 40% + type: RollingUpdate + + # -- Optional priority class + priorityClassName: '' + + # -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. + # Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. + # Update the `dnsPolicy` accordingly as well to suit the host network mode. + hostNetwork: false + + # -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. + # In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. + # For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. + dnsPolicy: ClusterFirst + + # -- Extra arguments passed to the container on the command line + extraArgs: {} + + # -- Additional container environment variables. + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + resources: + # -- Pod resource limits + limits: + memory: 128Mi + # -- Pod resource requests + requests: + cpu: 100m + memory: 64Mi + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + antiAffinity: + # -- Pod antiAffinities toggle. + # Enabled by default but can be disabled if you want to schedule pods to the same node. + enabled: true + + # -- Pod anti affinity constraints. + # @default -- See [values.yaml](values.yaml) + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - background-controller + topologyKey: kubernetes.io/hostname + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Topology spread constraints. + topologySpreadConstraints: [] + + # -- Security context for the pod + podSecurityContext: {} + + # -- Security context for the containers + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + # -- Enable PodDisruptionBudget. + # Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. + enabled: false + # -- Configures the minimum available pods for disruptions. + # Cannot be used if `maxUnavailable` is set. + minAvailable: 1 + # -- Configures the maximum unavailable pods for disruptions. + # Cannot be used if `minAvailable` is set. + maxUnavailable: + + caCertificates: + # -- CA certificates to use with Kyverno deployments + # This value is expected to be one large string of CA certificates + data: ~ + # -- Volume to be mounted for CA certificates + # Not used when `.Values.backgroundController.caCertificates.data` is defined + volume: {} + # Example to use hostPath: + # hostPath: + # path: /etc/pki/tls/ca-certificates.crt + # type: File + + metricsService: + # -- Create service. + create: true + # -- Service port. + # Metrics server will be exposed at this port. + port: 8000 + # -- Service type. + type: ClusterIP + # -- Service node port. + # Only used if `metricsService.type` is `NodePort`. + nodePort: + # -- Service annotations. + annotations: {} + + networkPolicy: + + # -- When true, use a NetworkPolicy to allow ingress to the webhook + # This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. + enabled: false + + # -- A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. + ingressFrom: [] + + serviceMonitor: + # -- Create a `ServiceMonitor` to collect Prometheus metrics. + enabled: false + # -- Additional labels + additionalLabels: {} + # -- (string) Override namespace + namespace: ~ + # -- Interval to scrape metrics + interval: 30s + # -- Timeout if metrics can't be retrieved in given time interval + scrapeTimeout: 25s + # -- Is TLS required for endpoint + secure: false + # -- TLS Configuration for endpoint + tlsConfig: {} + # -- RelabelConfigs to apply to samples before scraping + relabelings: [] + # -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + tracing: + # -- Enable tracing + enabled: false + # -- Traces receiver address + address: + # -- Traces receiver port + port: + # -- Traces receiver credentials + creds: '' + + metering: + # -- Disable metrics export + disabled: false + # -- Otel configuration, can be `prometheus` or `grpc` + config: prometheus + # -- Prometheus endpoint port + port: 8000 + # -- Otel collector endpoint + collector: '' + # -- Otel collector credentials + creds: '' + + profiling: + # -- Enable profiling + enabled: false + # -- Profiling endpoint port + port: 6060 + # -- Service type. + serviceType: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: + + # Cleanup controller configuration + cleanupController: + + # -- Overrides features defined at the root level + featuresOverride: {} + + # -- Enable cleanup controller. + enabled: true + + rbac: + # -- Create RBAC resources + create: true + + serviceAccount: + # -- Service account name + name: + + # -- Annotations for the ServiceAccount + annotations: {} + # example.com/annotation: value + + clusterRole: + # -- Extra resource permissions to add in the cluster role + extraResources: [] + # - apiGroups: + # - '' + # resources: + # - pods + # verbs: + # - delete + # - list + # - watch + + # -- Create self-signed certificates at deployment time. + # The certificates won't be automatically renewed if this is set to `true`. + createSelfSignedCert: false + + image: + # -- Image registry + registry: ghcr.io + # -- Image repository + repository: kyverno/cleanup-controller + # -- (string) Image tag + # Defaults to appVersion in Chart.yaml if omitted + tag: ~ + # -- Image pull policy + pullPolicy: IfNotPresent + + # -- Image pull secrets + imagePullSecrets: [] + # - secretName + + # -- (int) Desired number of pods + replicas: ~ + + # -- The number of revisions to keep + revisionHistoryLimit: 10 + + # -- Additional labels to add to each pod + podLabels: {} + # example.com/label: foo + + # -- Additional annotations to add to each pod + podAnnotations: {} + # example.com/annotation: foo + + # -- Deployment update strategy. + # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # @default -- See [values.yaml](values.yaml) + updateStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 40% + type: RollingUpdate + + # -- Optional priority class + priorityClassName: '' + + # -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. + # Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. + # Update the `dnsPolicy` accordingly as well to suit the host network mode. + hostNetwork: false + + # -- cleanupController server port + # in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to + server: + port: 9443 + # -- cleanupController webhook server port + # in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to + webhookServer: + port: 9443 + + # -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. + # In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. + # For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. + dnsPolicy: ClusterFirst + + # -- Extra arguments passed to the container on the command line + extraArgs: {} + + # -- Additional container environment variables. + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + resources: + # -- Pod resource limits + limits: + memory: 128Mi + # -- Pod resource requests + requests: + cpu: 100m + memory: 64Mi + + # -- Startup probe. + # The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + startupProbe: + httpGet: + path: /health/liveness + port: 9443 + scheme: HTTPS + failureThreshold: 20 + initialDelaySeconds: 2 + periodSeconds: 6 + + # -- Liveness probe. + # The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + livenessProbe: + httpGet: + path: /health/liveness + port: 9443 + scheme: HTTPS + initialDelaySeconds: 15 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 2 + successThreshold: 1 + + # -- Readiness Probe. + # The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # @default -- See [values.yaml](values.yaml) + readinessProbe: + httpGet: + path: /health/readiness + port: 9443 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + antiAffinity: + # -- Pod antiAffinities toggle. + # Enabled by default but can be disabled if you want to schedule pods to the same node. + enabled: true + + # -- Pod anti affinity constraints. + # @default -- See [values.yaml](values.yaml) + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - cleanup-controller + topologyKey: kubernetes.io/hostname + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Topology spread constraints. + topologySpreadConstraints: [] + + # -- Security context for the pod + podSecurityContext: {} + + # -- Security context for the containers + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + # -- Enable PodDisruptionBudget. + # Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. + enabled: false + # -- Configures the minimum available pods for disruptions. + # Cannot be used if `maxUnavailable` is set. + minAvailable: 1 + # -- Configures the maximum unavailable pods for disruptions. + # Cannot be used if `minAvailable` is set. + maxUnavailable: + + service: + # -- Service port. + port: 443 + # -- Service type. + type: ClusterIP + # -- Service node port. + # Only used if `service.type` is `NodePort`. + nodePort: + # -- Service annotations. + annotations: {} + + metricsService: + # -- Create service. + create: true + # -- Service port. + # Metrics server will be exposed at this port. + port: 8000 + # -- Service type. + type: ClusterIP + # -- Service node port. + # Only used if `metricsService.type` is `NodePort`. + nodePort: + # -- Service annotations. + annotations: {} + + networkPolicy: + + # -- When true, use a NetworkPolicy to allow ingress to the webhook + # This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. + enabled: false + + # -- A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. + ingressFrom: [] + + serviceMonitor: + # -- Create a `ServiceMonitor` to collect Prometheus metrics. + enabled: false + # -- Additional labels + additionalLabels: {} + # -- (string) Override namespace + namespace: ~ + # -- Interval to scrape metrics + interval: 30s + # -- Timeout if metrics can't be retrieved in given time interval + scrapeTimeout: 25s + # -- Is TLS required for endpoint + secure: false + # -- TLS Configuration for endpoint + tlsConfig: {} + # -- RelabelConfigs to apply to samples before scraping + relabelings: [] + # -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + tracing: + # -- Enable tracing + enabled: false + # -- Traces receiver address + address: + # -- Traces receiver port + port: + # -- Traces receiver credentials + creds: '' + + metering: + # -- Disable metrics export + disabled: false + # -- Otel configuration, can be `prometheus` or `grpc` + config: prometheus + # -- Prometheus endpoint port + port: 8000 + # -- Otel collector endpoint + collector: '' + # -- Otel collector credentials + creds: '' + + profiling: + # -- Enable profiling + enabled: false + # -- Profiling endpoint port + port: 6060 + # -- Service type. + serviceType: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: + + # Reports controller configuration + reportsController: + + # -- Overrides features defined at the root level + featuresOverride: {} + + # -- Enable reports controller. + enabled: true + + rbac: + # -- Create RBAC resources + create: true + + serviceAccount: + # -- Service account name + name: + + # -- Annotations for the ServiceAccount + annotations: {} + # example.com/annotation: value + + coreClusterRole: + # -- Extra resource permissions to add in the core cluster role. + # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. + # @default -- See [values.yaml](values.yaml) + extraResources: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch + + clusterRole: + # -- Extra resource permissions to add in the cluster role + extraResources: [] + # - apiGroups: + # - '' + # resources: + # - pods + + image: + # -- Image registry + registry: ghcr.io + # -- Image repository + repository: kyverno/reports-controller + # -- (string) Image tag + # Defaults to appVersion in Chart.yaml if omitted + tag: ~ + # -- Image pull policy + pullPolicy: IfNotPresent + + # -- Image pull secrets + imagePullSecrets: [] + # - secretName + + # -- (int) Desired number of pods + replicas: ~ + + # -- The number of revisions to keep + revisionHistoryLimit: 10 + + # -- Additional labels to add to each pod + podLabels: {} + # example.com/label: foo + + # -- Additional annotations to add to each pod + podAnnotations: {} + # example.com/annotation: foo + + # -- Deployment update strategy. + # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # @default -- See [values.yaml](values.yaml) + updateStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 40% + type: RollingUpdate + + # -- Optional priority class + priorityClassName: '' + + # -- Change `apiPriorityAndFairness` to `true` if you want to insulate the API calls made by Kyverno reports controller activities. + # This will help ensure Kyverno reports stability in busy clusters. + # Ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/ + apiPriorityAndFairness: false + + # -- Priority level configuration. + # The block is directly forwarded into the priorityLevelConfiguration, so you can use whatever specification you want. + # ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration + # @default -- See [values.yaml](values.yaml) + priorityLevelConfigurationSpec: + type: Limited + limited: + nominalConcurrencyShares: 10 + limitResponse: + queuing: + queueLengthLimit: 50 + type: Queue + + # -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. + # Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. + # Update the `dnsPolicy` accordingly as well to suit the host network mode. + hostNetwork: false + + # -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. + # In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. + # For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. + dnsPolicy: ClusterFirst + + # -- Extra arguments passed to the container on the command line + extraArgs: {} + + # -- Additional container environment variables. + extraEnvVars: [] + # Example setting proxy + # extraEnvVars: + # - name: HTTPS_PROXY + # value: 'https://proxy.example.com:3128' + + resources: + # -- Pod resource limits + limits: + memory: 128Mi + # -- Pod resource requests + requests: + cpu: 100m + memory: 64Mi + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- List of node taints to tolerate + tolerations: [] + + antiAffinity: + # -- Pod antiAffinities toggle. + # Enabled by default but can be disabled if you want to schedule pods to the same node. + enabled: true + + # -- Pod anti affinity constraints. + # @default -- See [values.yaml](values.yaml) + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - reports-controller + topologyKey: kubernetes.io/hostname + + # -- Pod affinity constraints. + podAffinity: {} + + # -- Node affinity constraints. + nodeAffinity: {} + + # -- Topology spread constraints. + topologySpreadConstraints: [] + + # -- Security context for the pod + podSecurityContext: {} + + # -- Security context for the containers + securityContext: + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + # -- Enable PodDisruptionBudget. + # Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. + enabled: false + # -- Configures the minimum available pods for disruptions. + # Cannot be used if `maxUnavailable` is set. + minAvailable: 1 + # -- Configures the maximum unavailable pods for disruptions. + # Cannot be used if `minAvailable` is set. + maxUnavailable: + + # -- A writable volume to use for the TUF root initialization. + tufRootMountPath: /.sigstore + + # -- Volume to be mounted in pods for TUF/cosign work. + sigstoreVolume: + emptyDir: {} + + caCertificates: + # -- CA certificates to use with Kyverno deployments + # This value is expected to be one large string of CA certificates + data: ~ + # -- Volume to be mounted for CA certificates + # Not used when `.Values.reportsController.caCertificates.data` is defined + volume: {} + # Example to use hostPath: + # hostPath: + # path: /etc/pki/tls/ca-certificates.crt + # type: File + + + metricsService: + # -- Create service. + create: true + # -- Service port. + # Metrics server will be exposed at this port. + port: 8000 + # -- Service type. + type: ClusterIP + # -- (string) Service node port. + # Only used if `type` is `NodePort`. + nodePort: ~ + # -- Service annotations. + annotations: {} + + networkPolicy: + + # -- When true, use a NetworkPolicy to allow ingress to the webhook + # This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. + enabled: false + + # -- A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. + ingressFrom: [] + + serviceMonitor: + # -- Create a `ServiceMonitor` to collect Prometheus metrics. + enabled: false + # -- Additional labels + additionalLabels: {} + # -- (string) Override namespace + namespace: ~ + # -- Interval to scrape metrics + interval: 30s + # -- Timeout if metrics can't be retrieved in given time interval + scrapeTimeout: 25s + # -- Is TLS required for endpoint + secure: false + # -- TLS Configuration for endpoint + tlsConfig: {} + # -- RelabelConfigs to apply to samples before scraping + relabelings: [] + # -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + tracing: + # -- Enable tracing + enabled: false + # -- (string) Traces receiver address + address: ~ + # -- (string) Traces receiver port + port: ~ + # -- (string) Traces receiver credentials + creds: ~ + + metering: + # -- Disable metrics export + disabled: false + # -- Otel configuration, can be `prometheus` or `grpc` + config: prometheus + # -- Prometheus endpoint port + port: 8000 + # -- (string) Otel collector endpoint + collector: ~ + # -- (string) Otel collector credentials + creds: ~ + + profiling: + # -- Enable profiling + enabled: false + # -- Profiling endpoint port + port: 6060 + # -- Service type. + serviceType: ClusterIP + # -- Service node port. + # Only used if `type` is `NodePort`. + nodePort: +