From e068e156ce32ba837d042a98a5f2419e2bacb0b5 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Wed, 5 Feb 2025 12:08:48 -0700 Subject: [PATCH 1/5] osd: enable osd ok-to-stop checks on single node for three osds If there are at least three OSDs on a single node, we should treat it as a potential production cluster and perform the ok-to-stop checks during reconcile. Otherwise, it may cause instability during upgrades on single-node clusters. Signed-off-by: Travis Nielsen --- deploy/examples/cluster-test.yaml | 2 ++ pkg/daemon/ceph/client/upgrade.go | 15 --------------- pkg/daemon/ceph/client/upgrade_test.go | 4 ++-- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/deploy/examples/cluster-test.yaml b/deploy/examples/cluster-test.yaml index bde8e182e82b..f2012328ef16 100644 --- a/deploy/examples/cluster-test.yaml +++ b/deploy/examples/cluster-test.yaml @@ -20,6 +20,8 @@ spec: mon: count: 1 allowMultiplePerNode: true + # test environments can skip ok-to-stop checks during upgrades + skipUpgradeChecks: true mgr: count: 1 allowMultiplePerNode: true diff --git a/pkg/daemon/ceph/client/upgrade.go b/pkg/daemon/ceph/client/upgrade.go index 8de07b5d1b62..e214320e1294 100644 --- a/pkg/daemon/ceph/client/upgrade.go +++ b/pkg/daemon/ceph/client/upgrade.go @@ -344,21 +344,6 @@ func OSDUpdateShouldCheckOkToStop(context *clusterd.Context, clusterInfo *Cluste return false } - // aio means all in one - aio, err := allOSDsSameHost(context, clusterInfo) - if err != nil { - if errors.Is(err, errNoHostInCRUSH) { - logger.Warning("the CRUSH map has no 'host' entries so not performing ok-to-stop checks") - return false - } - logger.Warningf("failed to determine if all osds are running on the same host. will check if OSDs are ok-to-stop. if all OSDs are running on one host %s. %v", userIntervention, err) - return true - } - if aio { - logger.Warningf("all OSDs are running on the same host. not performing upgrade check. running in best-effort") - return false - } - return true } diff --git a/pkg/daemon/ceph/client/upgrade_test.go b/pkg/daemon/ceph/client/upgrade_test.go index 45329915cce4..2c72b0dae7e8 100644 --- a/pkg/daemon/ceph/client/upgrade_test.go +++ b/pkg/daemon/ceph/client/upgrade_test.go @@ -323,7 +323,7 @@ func TestOSDUpdateShouldCheckOkToStop(t *testing.T) { t.Run("1 node with 3 OSDs", func(t *testing.T) { lsOutput = fake.OsdLsOutput(3) treeOutput = fake.OsdTreeOutput(1, 3) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) + assert.True(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) }) t.Run("2 nodes with 1 OSD each", func(t *testing.T) { @@ -349,6 +349,6 @@ func TestOSDUpdateShouldCheckOkToStop(t *testing.T) { t.Run("0 nodes with down OSDs", func(t *testing.T) { lsOutput = fake.OsdLsOutput(3) treeOutput = fake.OsdTreeOutput(0, 1) - assert.False(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) + assert.True(t, OSDUpdateShouldCheckOkToStop(context, clusterInfo)) }) } From 278f45b0bc4cbf84d12a3d8427ecd689901e8b7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 13:00:51 +0000 Subject: [PATCH 2/5] build(deps): bump golangci/golangci-lint-action from 6.3.1 to 6.5.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.3.1 to 6.5.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/2e788936b09dd82dc280e845628a40d2ba6b204c...2226d7cb06a077cd73e56eedd38eecad18e5d837) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/golangci-lint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index bd348960be33..a853988a7058 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -31,7 +31,7 @@ jobs: with: go-version: "1.23" - name: golangci-lint - uses: golangci/golangci-lint-action@2e788936b09dd82dc280e845628a40d2ba6b204c # v6.3.1 + uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.62 From 45e76204bf0b91b7cc3ae66ebfcd9051ee9f5854 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Thu, 20 Feb 2025 12:23:16 +0530 Subject: [PATCH 3/5] ci: wait for pod before exec into pod keystone integration test has recentally starting to fail as pod with lable osc-admin-admin was not in running status and we're trying to exec into the pod. Signed-off-by: subhamkrai --- tests/integration/ceph_base_keystone_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/ceph_base_keystone_test.go b/tests/integration/ceph_base_keystone_test.go index 7fc019fbc786..8b30d6cc07fa 100644 --- a/tests/integration/ceph_base_keystone_test.go +++ b/tests/integration/ceph_base_keystone_test.go @@ -982,6 +982,8 @@ func runSwiftE2ETest(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHe } func testInOpenStackClient(t *testing.T, sh *utils.K8sHelper, namespace string, projectname string, username string, expectNoError bool, command ...string) { + err := sh.WaitForLabeledPodsToRun("app=osc-admin-admin", namespace) + assert.NoError(t, err) commandLine := []string{"exec", "-n", namespace, "deployment/osc-" + projectname + "-" + username, "--"} From 0e395f0a6786a4f6fcde39b1648edab96fa23061 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Wed, 12 Feb 2025 02:42:18 +0530 Subject: [PATCH 4/5] manifest: update default ceph version to v19.2.1 with ceph release v19.2.1 updating it to be dafult version in rook. Signed-off-by: subhamkrai --- Documentation/CRDs/Cluster/ceph-cluster-crd.md | 12 ++++++------ Documentation/CRDs/Cluster/host-cluster.md | 6 +++--- Documentation/CRDs/Cluster/pvc-cluster.md | 6 +++--- Documentation/CRDs/Cluster/stretch-cluster.md | 2 +- Documentation/Upgrade/ceph-upgrade.md | 10 +++++----- deploy/charts/rook-ceph-cluster/values.yaml | 6 +++--- deploy/examples/cluster-external-management.yaml | 2 +- deploy/examples/cluster-on-local-pvc.yaml | 2 +- deploy/examples/cluster-on-pvc.yaml | 2 +- deploy/examples/cluster-stretched-aws.yaml | 2 +- deploy/examples/cluster-stretched.yaml | 2 +- deploy/examples/cluster.yaml | 4 ++-- deploy/examples/images.txt | 2 +- design/ceph/ceph-cluster-cleanup.md | 2 +- tests/manifests/test-cluster-on-pvc-encrypted.yaml | 2 +- 15 files changed, 31 insertions(+), 31 deletions(-) diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md index 3cfd60d2dd0e..51b4e4fc1e63 100755 --- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md +++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md @@ -26,7 +26,7 @@ Settings can be specified at the global level to apply to the cluster as a whole * `external`: * `enable`: if `true`, the cluster will not be managed by Rook but via an external entity. This mode is intended to connect to an existing cluster. In this case, Rook will only consume the external cluster. However, Rook will be able to deploy various daemons in Kubernetes such as object gateways, mds and nfs if an image is provided and will refuse otherwise. If this setting is enabled **all** the other options will be ignored except `cephVersion.image` and `dataDirHostPath`. See [external cluster configuration](external-cluster/external-cluster.md). If `cephVersion.image` is left blank, Rook will refuse the creation of extra CRs like object, file and nfs. * `cephVersion`: The version information for launching the ceph daemons. - * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v19.2.0`. For more details read the [container images section](#ceph-container-images). + * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v19.2.1`. For more details read the [container images section](#ceph-container-images). For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/). To ensure a consistent version of the image is running across all nodes in the cluster, it is recommended to use a very specific image version. Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v19` will be updated each time a new Squid build is released. @@ -124,8 +124,8 @@ These are general purpose Ceph container with all necessary daemons and dependen | -------------------- | --------------------------------------------------------- | | vRELNUM | Latest release in this series (e.g., **v19** = Squid) | | vRELNUM.Y | Latest stable release in this stable series (e.g., v19.2) | -| vRELNUM.Y.Z | A specific release (e.g., v19.2.0) | -| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v19.2.0-20240927) | +| vRELNUM.Y.Z | A specific release (e.g., v19.2.1) | +| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v19.2.1-20250202) | A specific will contain a specific release of Ceph as well as security fixes from the Operating System. @@ -431,7 +431,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -538,7 +538,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -668,7 +668,7 @@ kubectl -n rook-ceph get CephCluster -o yaml deviceClasses: - name: hdd version: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 version: 16.2.6-0 conditions: - lastHeartbeatTime: "2021-03-02T21:22:11Z" diff --git a/Documentation/CRDs/Cluster/host-cluster.md b/Documentation/CRDs/Cluster/host-cluster.md index 9d26088632bf..a9cd511a2b6d 100644 --- a/Documentation/CRDs/Cluster/host-cluster.md +++ b/Documentation/CRDs/Cluster/host-cluster.md @@ -22,7 +22,7 @@ metadata: spec: cephVersion: # see the "Cluster Settings" section below for more details on which image of ceph to run - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -49,7 +49,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -101,7 +101,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 diff --git a/Documentation/CRDs/Cluster/pvc-cluster.md b/Documentation/CRDs/Cluster/pvc-cluster.md index 651bd17ecfa4..6485e1e8d5c1 100644 --- a/Documentation/CRDs/Cluster/pvc-cluster.md +++ b/Documentation/CRDs/Cluster/pvc-cluster.md @@ -18,7 +18,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 @@ -72,7 +72,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: false dashboard: enabled: true @@ -128,7 +128,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 diff --git a/Documentation/CRDs/Cluster/stretch-cluster.md b/Documentation/CRDs/Cluster/stretch-cluster.md index d35fa7cc7117..bc45459cd007 100644 --- a/Documentation/CRDs/Cluster/stretch-cluster.md +++ b/Documentation/CRDs/Cluster/stretch-cluster.md @@ -34,7 +34,7 @@ spec: - name: b - name: c cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: true # Either storageClassDeviceSets or the storage section can be specified for creating OSDs. # This example uses all devices for simplicity. diff --git a/Documentation/Upgrade/ceph-upgrade.md b/Documentation/Upgrade/ceph-upgrade.md index 6ec671f33d73..efc1c77d4876 100644 --- a/Documentation/Upgrade/ceph-upgrade.md +++ b/Documentation/Upgrade/ceph-upgrade.md @@ -39,7 +39,7 @@ Official Ceph container images can be found on [Quay](https://quay.io/repository These images are tagged in a few ways: -* The most explicit form of tags are full-ceph-version-and-build tags (e.g., `v19.2.0-20240927`). +* The most explicit form of tags are full-ceph-version-and-build tags (e.g., `v19.2.1-20250202`). These tags are recommended for production clusters, as there is no possibility for the cluster to be heterogeneous with respect to the version of Ceph running in containers. * Ceph major version tags (e.g., `v19`) are useful for development and test clusters so that the @@ -56,7 +56,7 @@ CephCluster CRD (`spec.cephVersion.image`). ```console ROOK_CLUSTER_NAMESPACE=rook-ceph -NEW_CEPH_IMAGE='quay.io/ceph/ceph:v19.2.0-20240927' +NEW_CEPH_IMAGE='quay.io/ceph/ceph:v19.2.1-20250202' kubectl -n $ROOK_CLUSTER_NAMESPACE patch CephCluster $ROOK_CLUSTER_NAMESPACE --type=merge -p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}" ``` @@ -68,7 +68,7 @@ employed by the new Rook operator release. Employing an outdated Ceph version wi in unexpected behaviour. ```console -kubectl -n rook-ceph set image deploy/rook-ceph-tools rook-ceph-tools=quay.io/ceph/ceph:v19.2.0-20240927 +kubectl -n rook-ceph set image deploy/rook-ceph-tools rook-ceph-tools=quay.io/ceph/ceph:v19.2.1-20250202 ``` #### **3. Wait for the pod updates** @@ -86,9 +86,9 @@ Confirm the upgrade is completed when the versions are all on the desired Ceph v kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: ceph-version=v18.2.4-0 - ceph-version=v19.2.0-0 + ceph-version=v19.2.1-0 This cluster is finished: - ceph-version=v19.2.0-0 + ceph-version=v19.2.1-0 ``` #### **4. Verify cluster health** diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml index 0259c08512c1..86c8dc56b2dd 100644 --- a/deploy/charts/rook-ceph-cluster/values.yaml +++ b/deploy/charts/rook-ceph-cluster/values.yaml @@ -25,7 +25,7 @@ toolbox: # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md) enabled: false # -- Toolbox image, defaults to the image used by the Ceph cluster - image: #quay.io/ceph/ceph:v19.2.0 + image: #quay.io/ceph/ceph:v19.2.1 # -- Toolbox tolerations tolerations: [] # -- Toolbox affinity @@ -94,9 +94,9 @@ cephClusterSpec: # v18 is Reef, v19 is Squid # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927 + # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.1-20250202 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported. # Future versions such as Tentacle (v20) would require this to be set to `true`. # Do not set to true in production. diff --git a/deploy/examples/cluster-external-management.yaml b/deploy/examples/cluster-external-management.yaml index 777e6419bf45..290e58456154 100644 --- a/deploy/examples/cluster-external-management.yaml +++ b/deploy/examples/cluster-external-management.yaml @@ -20,4 +20,4 @@ spec: dataDirHostPath: /var/lib/rook # providing an image is required, if you want to create other CRs (rgw, mds, nfs) cephVersion: - image: quay.io/ceph/ceph:v19.2.0 # Should match external cluster version + image: quay.io/ceph/ceph:v19.2.1 # Should match external cluster version diff --git a/deploy/examples/cluster-on-local-pvc.yaml b/deploy/examples/cluster-on-local-pvc.yaml index f38f1f21e8ad..a8e2df3de17b 100644 --- a/deploy/examples/cluster-on-local-pvc.yaml +++ b/deploy/examples/cluster-on-local-pvc.yaml @@ -174,7 +174,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false diff --git a/deploy/examples/cluster-on-pvc.yaml b/deploy/examples/cluster-on-pvc.yaml index 522277ab402b..5933351954b2 100644 --- a/deploy/examples/cluster-on-pvc.yaml +++ b/deploy/examples/cluster-on-pvc.yaml @@ -34,7 +34,7 @@ spec: requests: storage: 10Gi cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false diff --git a/deploy/examples/cluster-stretched-aws.yaml b/deploy/examples/cluster-stretched-aws.yaml index c2ac50725a5c..eb742ea00c39 100644 --- a/deploy/examples/cluster-stretched-aws.yaml +++ b/deploy/examples/cluster-stretched-aws.yaml @@ -45,7 +45,7 @@ spec: mgr: count: 2 cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: true skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false diff --git a/deploy/examples/cluster-stretched.yaml b/deploy/examples/cluster-stretched.yaml index d45fbac1c1ea..677a8e53d442 100644 --- a/deploy/examples/cluster-stretched.yaml +++ b/deploy/examples/cluster-stretched.yaml @@ -39,7 +39,7 @@ spec: mgr: count: 2 cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 allowUnsupported: true skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false diff --git a/deploy/examples/cluster.yaml b/deploy/examples/cluster.yaml index b82bfd8fba4a..4158051602b9 100644 --- a/deploy/examples/cluster.yaml +++ b/deploy/examples/cluster.yaml @@ -19,9 +19,9 @@ spec: # v18 is Reef, v19 is Squid # RECOMMENDATION: In production, use a specific version tag instead of the general v19 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927 + # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.1-20250202 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported. # Future versions such as Tentacle (v20) would require this to be set to `true`. # Do not set to true in production. diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index a54eecc83df6..53cf473bf27c 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -1,6 +1,6 @@ docker.io/rook/ceph:master gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20240513-v0.1.0-35-gefb3255 - quay.io/ceph/ceph:v19.2.0 + quay.io/ceph/ceph:v19.2.1 quay.io/ceph/cosi:v0.1.2 quay.io/cephcsi/cephcsi:v3.13.0 quay.io/csiaddons/k8s-sidecar:v0.11.0 diff --git a/design/ceph/ceph-cluster-cleanup.md b/design/ceph/ceph-cluster-cleanup.md index ba534bbcf079..c50b8d6d0ccb 100644 --- a/design/ceph/ceph-cluster-cleanup.md +++ b/design/ceph/ceph-cluster-cleanup.md @@ -34,7 +34,7 @@ metadata: namespace: rook-ceph spec: cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dataDirHostPath: /var/lib/rook mon: count: 3 diff --git a/tests/manifests/test-cluster-on-pvc-encrypted.yaml b/tests/manifests/test-cluster-on-pvc-encrypted.yaml index 2ebdeb6d1925..941d9f2fa2bc 100644 --- a/tests/manifests/test-cluster-on-pvc-encrypted.yaml +++ b/tests/manifests/test-cluster-on-pvc-encrypted.yaml @@ -14,7 +14,7 @@ spec: requests: storage: 5Gi cephVersion: - image: quay.io/ceph/ceph:v19.2.0 + image: quay.io/ceph/ceph:v19.2.1 dashboard: enabled: false network: From 659d5746337cc757d25ed46d72f8040cf1c74160 Mon Sep 17 00:00:00 2001 From: Viraj Jadhav <45794663+VirajJadhav@users.noreply.github.com> Date: Fri, 21 Feb 2025 18:38:23 +0530 Subject: [PATCH 5/5] doc: typo for imagePullSecrets Signed-off-by: Viraj Jadhav --- .../Getting-Started/Prerequisites/authenticated-registry.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Getting-Started/Prerequisites/authenticated-registry.md b/Documentation/Getting-Started/Prerequisites/authenticated-registry.md index c382db2b3acd..424310797089 100644 --- a/Documentation/Getting-Started/Prerequisites/authenticated-registry.md +++ b/Documentation/Getting-Started/Prerequisites/authenticated-registry.md @@ -3,7 +3,7 @@ title: Authenticated Container Registries --- If you want to use an image from authenticated docker registry (e.g. for image cache/mirror), you'll need to -add an `imagePullSecret` to all relevant service accounts. See the next section for the required service accounts. +add `imagePullSecrets` to all relevant service accounts. See the next section for the required service accounts. The whole process is described in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account).