diff --git a/drivers/storage/portworx/component/portworx_basic.go b/drivers/storage/portworx/component/portworx_basic.go index ac365ffd5..5c5d7ed9b 100644 --- a/drivers/storage/portworx/component/portworx_basic.go +++ b/drivers/storage/portworx/component/portworx_basic.go @@ -1,6 +1,7 @@ package component import ( + "bytes" "context" "fmt" "os" @@ -19,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/apis/core" "sigs.k8s.io/controller-runtime/pkg/client" pxutil "github.com/libopenstorage/operator/drivers/storage/portworx/util" @@ -46,6 +46,7 @@ const ( var ( defaultPxSaTokenExpirationSeconds = int64(12 * 60 * 60) + rootCaCrtPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" ) type portworxBasic struct { @@ -557,23 +558,23 @@ func (c *portworxBasic) createAndMaintainPxSaTokenSecret(cluster *corev1.Storage return err } } - needRefresh, err := isTokenRefreshRequired(secret) + caCrtUpdated, err := updateCaCrtIfNeeded(secret) if err != nil { return err } - if needRefresh { - if err := c.refreshTokenSecret(secret, cluster, ownerRef); err != nil { - return fmt.Errorf("failed to refresh the token secret for px container: %w", err) + tokenRefreshed, err := refreshTokenIfNeeded(secret, cluster) + if err != nil { + return err + } + if caCrtUpdated || tokenRefreshed { + if err := k8sutil.CreateOrUpdateSecret(c.k8sClient, secret, ownerRef); err != nil { + return err } } return nil } func (c *portworxBasic) createTokenSecret(cluster *corev1.StorageCluster, ownerRef *metav1.OwnerReference) (*v1.Secret, error) { - rootCaCrt, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("error reading k8s cluster certificate located inside the pod at /var/run/secrets/kubernetes.io/serviceaccount/ca.crt: %w", err) - } secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: pxutil.PortworxServiceAccountTokenSecretName, @@ -582,8 +583,7 @@ func (c *portworxBasic) createTokenSecret(cluster *corev1.StorageCluster, ownerR }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ - core.ServiceAccountRootCAKey: rootCaCrt, - core.ServiceAccountNamespaceKey: []byte(cluster.Namespace), + v1.ServiceAccountNamespaceKey: []byte(cluster.Namespace), }, } if err := k8sutil.CreateOrUpdateSecret(c.k8sClient, secret, ownerRef); err != nil { @@ -592,36 +592,30 @@ func (c *portworxBasic) createTokenSecret(cluster *corev1.StorageCluster, ownerR return secret, nil } -func (c *portworxBasic) refreshTokenSecret(secret *v1.Secret, cluster *corev1.StorageCluster, ownerRef *metav1.OwnerReference) error { - expirationSeconds, err := getPxSaTokenExpirationSeconds(cluster) - if err != nil { - return err - } - secret.Data[PxSaTokenRefreshTimeKey] = []byte(time.Now().UTC().Add(time.Duration(expirationSeconds/2) * time.Second).Format(time.RFC3339)) - newToken, err := generatePxSaToken(cluster, expirationSeconds) - if err != nil { - return err +func updateCaCrtIfNeeded(secret *v1.Secret) (bool, error) { + rootCaCrt, err := os.ReadFile(rootCaCrtPath) + if err != nil && !os.IsNotExist(err) { + return false, fmt.Errorf("error reading k8s cluster certificate located inside the pod at %s: %w", rootCaCrtPath, err) } - secret.Data[core.ServiceAccountTokenKey] = newToken - err = k8sutil.CreateOrUpdateSecret(c.k8sClient, secret, ownerRef) - if err != nil { - return err + if len(secret.Data) == 0 || !bytes.Equal(secret.Data[v1.ServiceAccountRootCAKey], rootCaCrt) { + secret.Data[v1.ServiceAccountRootCAKey] = rootCaCrt + return true, nil } - return nil + return false, nil } -func generatePxSaToken(cluster *corev1.StorageCluster, expirationSeconds int64) ([]byte, error) { - tokenRequest := &authv1.TokenRequest{ - Spec: authv1.TokenRequestSpec{ - Audiences: []string{"px"}, - ExpirationSeconds: &expirationSeconds, - }, - } - tokenResp, err := coreops.Instance().CreateToken(pxutil.PortworxServiceAccountName(cluster), cluster.Namespace, tokenRequest) +func refreshTokenIfNeeded(secret *v1.Secret, cluster *corev1.StorageCluster) (bool, error) { + needRefreshToken, err := isTokenRefreshRequired(secret) if err != nil { - return nil, fmt.Errorf("error creating token from k8s: %w", err) + return false, err + } + if needRefreshToken { + if err := refreshToken(secret, cluster); err != nil { + return false, fmt.Errorf("failed to refresh the token secret for px container: %w", err) + } + return true, nil } - return []byte(tokenResp.Status.Token), nil + return false, nil } func isTokenRefreshRequired(secret *v1.Secret) (bool, error) { @@ -638,6 +632,36 @@ func isTokenRefreshRequired(secret *v1.Secret) (bool, error) { return false, nil } +func refreshToken(secret *v1.Secret, cluster *corev1.StorageCluster) error { + expirationSeconds, err := getPxSaTokenExpirationSeconds(cluster) + if err != nil { + return err + } + newToken, err := generatePxSaToken(cluster, expirationSeconds) + if err != nil { + return err + } + secret.Data[v1.ServiceAccountTokenKey] = []byte(newToken.Status.Token) + // ServiceAccount token expiration time we defined might get overwritten by the maxExpirationSeconds defined by the k8s token RESTful server, + // so our token refresh machanism has to honor this server limit. + // https://github.com/kubernetes/kubernetes/blob/79fee524e65ddc7c1448d5d2554c6f91233cf98d/pkg/registry/core/serviceaccount/storage/token.go#L208 + secret.Data[PxSaTokenRefreshTimeKey] = []byte(time.Now().UTC().Add(time.Duration(*newToken.Spec.ExpirationSeconds/2) * time.Second).Format(time.RFC3339)) + return nil +} + +func generatePxSaToken(cluster *corev1.StorageCluster, expirationSeconds int64) (*authv1.TokenRequest, error) { + tokenRequest := &authv1.TokenRequest{ + Spec: authv1.TokenRequestSpec{ + ExpirationSeconds: &expirationSeconds, + }, + } + tokenResp, err := coreops.Instance().CreateToken(pxutil.PortworxServiceAccountName(cluster), cluster.Namespace, tokenRequest) + if err != nil { + return nil, fmt.Errorf("error creating token from k8s: %w", err) + } + return tokenResp, nil +} + func (c *portworxBasic) createPortworxKVDBService( cluster *corev1.StorageCluster, ownerRef *metav1.OwnerReference, @@ -726,6 +750,11 @@ func getPxSaTokenExpirationSeconds(cluster *corev1.StorageCluster) (int64, error return defaultPxSaTokenExpirationSeconds, nil } +// Set the path of k8s cluster root certificate for the purpose of testing +func SetRootCertPath(path string) { + rootCaCrtPath = path +} + // RegisterPortworxBasicComponent registers the Portworx Basic component func RegisterPortworxBasicComponent() { Register(PortworxBasicComponentName, &portworxBasic{}) diff --git a/drivers/storage/portworx/component/telemetry.go b/drivers/storage/portworx/component/telemetry.go index 4a1908813..c3b9b7285 100644 --- a/drivers/storage/portworx/component/telemetry.go +++ b/drivers/storage/portworx/component/telemetry.go @@ -1,6 +1,7 @@ package component import ( + "context" cryptoTls "crypto/tls" "crypto/x509" "encoding/asn1" @@ -842,13 +843,12 @@ func (t *telemetry) createDeploymentTelemetryRegistration( // have a valid cluster UUID? lets validate the Telemetry SSL cert if cuuid := cluster.Status.ClusterUID; cuuid != "" { - if certBytes, err := t.getTelemetrySSLCert(deployment.Namespace); err != nil { - logrus.WithError(err).Errorf("failed to get telemetry SSL cert") - } else if err2 := t.validateTelemetrySSLCert(certBytes, cuuid); err2 == errInvalidTelemetryCert { + sec, err := t.validateTelemetrySSLCert(deployment.Namespace, cuuid) + if err == errInvalidTelemetryCert { logrus.Warn("refreshing telemetry SSL cert") - t.refreshTelemetrySSLCert(deployment) - } else if err2 != nil { - logrus.WithError(err2).Errorf("failed to validate telemetry SSL cert") + t.refreshTelemetrySSLCert(sec) + } else if err != nil { + logrus.WithError(err).Errorf("failed to validate telemetry SSL cert") } } @@ -1023,33 +1023,35 @@ func (t *telemetry) createDeploymentTelemetryCollectorV2( return nil } -// getTelemetrySSLCert returns the telemetry SSL cert -func (t *telemetry) getTelemetrySSLCert(namespace string) ([]byte, error) { +// validateTelemetrySSLCert validates the telemetry SSL certificate. +// - note: cert's Psaudonym needs to match the cluster UUID +func (t *telemetry) validateTelemetrySSLCert(namespace, cuuid string) (*v1.Secret, error) { + if namespace == "" || cuuid == "" { + return nil, fmt.Errorf("invalid namespace or cluster UUID") + } + var sec v1.Secret logrus.Debugf("Inspecting secret %s/%s for SSL cert", namespace, pxutil.TelemetryCertName) err := k8sutil.GetSecret(t.k8sClient, pxutil.TelemetryCertName, namespace, &sec) if err != nil { return nil, err } - return sec.Data["cert"], nil -} -// validateTelemetrySSLCert validates the telemetry SSL certificate. -// - note: cert's Psaudonym needs to match the cluster UUID -func (t *telemetry) validateTelemetrySSLCert(certBytes []byte, cuuid string) error { - if len(certBytes) <= 0 || cuuid == "" { - return nil + certBytes := sec.Data["cert"] + if len(certBytes) <= 0 { + logrus.Warnf("SSL cert not found in secret %s/%s", namespace, pxutil.TelemetryCertName) + return &sec, nil } block, _ := pem.Decode(certBytes) if block == nil { - return fmt.Errorf("failed to decode SSL certificate") + return &sec, fmt.Errorf("failed to decode SSL certificate") } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return err + return &sec, err } else if cert == nil { - return fmt.Errorf("failed to parse SSL certificate") + return &sec, fmt.Errorf("failed to parse SSL certificate") } // find Pseudonym in Subject names @@ -1059,38 +1061,43 @@ func (t *telemetry) validateTelemetrySSLCert(certBytes []byte, cuuid string) err var ok bool // quick sanity check! if pseudonym, ok = v.Value.(string); !ok { - return fmt.Errorf("SSL cert Pseudonym is not a string") + return &sec, fmt.Errorf("SSL cert Pseudonym is not a string") } break } } if pseudonym == "" { logrus.Errorf("SSL cert Pseudonym not found") - return errInvalidTelemetryCert + return &sec, errInvalidTelemetryCert } if pseudonym != cuuid { logrus.Errorf("SSL cert Pseudonym %s does not match cluster UUID %s", pseudonym, cuuid) - return errInvalidTelemetryCert + return &sec, errInvalidTelemetryCert } - logrus.Debugf("SSL cert Pseudonym %s matches cluster UUID", pseudonym) - return nil + logrus.Tracef("SSL cert Pseudonym %s matches cluster UUID", pseudonym) + return &sec, nil } // refreshTelemetrySSLCert deletes the telemetry SSL cert secret and telemetry-registration PODs -func (t *telemetry) refreshTelemetrySSLCert(dep *appsv1.Deployment) { - if dep == nil { +func (t *telemetry) refreshTelemetrySSLCert(sec *v1.Secret) { + if sec == nil { + return + } else if sec.Name != pxutil.TelemetryCertName { + logrus.Errorf("invalid secret name %s/%s (expected %s)", sec.Namespace, sec.Name, pxutil.TelemetryCertName) return } - logrus.Warnf("refreshTelemetrySSLCert - deleting telemetry SSL cert secret %s/%s", dep.Namespace, pxutil.TelemetryCertName) - err := k8sutil.DeleteSecret(t.k8sClient, pxutil.TelemetryCertName, dep.Namespace, dep.OwnerReferences...) + + logrus.Warnf("refreshTelemetrySSLCert - deleting telemetry SSL cert secret %s/%s", sec.Namespace, sec.Name) + err := t.k8sClient.Delete(context.TODO(), sec) if err != nil { - logrus.WithError(err).Warnf("failed to delete secret %s/%s", dep.Namespace, pxutil.TelemetryCertName) + logrus.WithError(err).Errorf("failed to delete secret %s/%s", sec.Namespace, sec.Name) + return } logrus.Warnf("refreshTelemetrySSLCert - deleting POD labeled role=%s", DeploymentNameTelemetryRegistration) - err = k8sutil.DeletePodsByLabel(t.k8sClient, map[string]string{"role": DeploymentNameTelemetryRegistration}, dep.Namespace) + err = k8sutil.DeletePodsByLabel(t.k8sClient, map[string]string{"role": DeploymentNameTelemetryRegistration}, sec.Namespace) if err != nil { logrus.WithError(err).Warnf("failed to delete px-telemetry-registration POD") } diff --git a/drivers/storage/portworx/components_test.go b/drivers/storage/portworx/components_test.go index b1375022f..6d4be9606 100644 --- a/drivers/storage/portworx/components_test.go +++ b/drivers/storage/portworx/components_test.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + "io/fs" "math/rand" + "os" "strconv" "strings" "testing" @@ -58,6 +60,8 @@ import ( testutil "github.com/libopenstorage/operator/pkg/util/test" ) +const fakeRootCertPath = "/tmp/ca.crt" + // For some reason simpleClientset doesn't work with createToken, erroring out with serviceaccounts "" not found. // Thus wrap the simpleClientset with MockCoreOps. func setUpMockCoreOps(mockCtrl *gomock.Controller, clientset *fakek8sclient.Clientset) *mockcore.MockOps { @@ -65,9 +69,13 @@ func setUpMockCoreOps(mockCtrl *gomock.Controller, clientset *fakek8sclient.Clie coreops.SetInstance(mockCoreOps) simpleClientset := coreops.New(clientset) + defaultTokenExpirationSeconds := int64(12 * 60 * 60) mockCoreOps.EXPECT(). CreateToken(gomock.Any(), gomock.Any(), gomock.Any()). Return(&authv1.TokenRequest{ + Spec: authv1.TokenRequestSpec{ + ExpirationSeconds: &defaultTokenExpirationSeconds, + }, Status: authv1.TokenRequestStatus{ Token: "xxxx", }, @@ -87,6 +95,19 @@ func setUpMockCoreOps(mockCtrl *gomock.Controller, clientset *fakek8sclient.Clie return mockCoreOps } +// Set root CA certificate path to a safe place +func setUpFakeRootCert(t *testing.T) { + component.SetRootCertPath(fakeRootCertPath) + rootCaCrtDir := "/tmp" + err := os.MkdirAll(rootCaCrtDir, fs.ModePerm) + require.NoError(t, err) + file, err := os.Create(fakeRootCertPath) + require.NoError(t, err) + file.Close() + err = os.WriteFile(fakeRootCertPath, []byte("test"), 0644) + require.NoError(t, err) +} + func TestOrderOfComponents(t *testing.T) { reregisterComponents() component.RegisterDisruptionBudgetComponent() @@ -237,6 +258,7 @@ func TestBasicComponentsInstallWithPreTLSPx(t *testing.T) { func TestBasicComponentsInstall(t *testing.T) { mockCtrl := gomock.NewController(t) setUpMockCoreOps(mockCtrl, fakek8sclient.NewSimpleClientset()) + setUpFakeRootCert(t) logrus.SetLevel(logrus.TraceLevel) reregisterComponents() k8sClient := testutil.FakeK8sClient() @@ -1244,12 +1266,6 @@ func TestServiceAccountTokenRefreshOnExpire(t *testing.T) { Name: "px-cluster", Namespace: "kube-test", }, - Spec: corev1.StorageClusterSpec{ - Stork: &corev1.StorkSpec{ - Enabled: true, - }, - Image: "portworx/image:2.10.1", - }, } err = driver.PreInstall(cluster) require.NoError(t, err) @@ -1273,6 +1289,42 @@ func TestServiceAccountTokenRefreshOnExpire(t *testing.T) { assert.True(t, updatedExpirationTime.Sub(fakeExpirationTime) > 6*time.Hour) } +func TestUpdateServiceAccountTokenSecretCaCrt(t *testing.T) { + mockCtrl := gomock.NewController(t) + setUpMockCoreOps(mockCtrl, fakek8sclient.NewSimpleClientset()) + setUpFakeRootCert(t) + reregisterComponents() + k8sClient := testutil.FakeK8sClient() + driver := portworx{} + err := driver.Init(k8sClient, runtime.NewScheme(), record.NewFakeRecorder(0)) + require.NoError(t, err) + + cluster := &corev1.StorageCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "px-cluster", + Namespace: "kube-test", + }, + } + err = driver.PreInstall(cluster) + require.NoError(t, err) + + saTokenSecret := &v1.Secret{} + err = testutil.Get(k8sClient, saTokenSecret, pxutil.PortworxServiceAccountTokenSecretName, cluster.Namespace) + require.NoError(t, err) + oldCaCrt := saTokenSecret.Data[v1.ServiceAccountRootCAKey] + + err = os.WriteFile(fakeRootCertPath, []byte("newtest"), 0644) + require.NoError(t, err) + + err = driver.PreInstall(cluster) + require.NoError(t, err) + err = testutil.Get(k8sClient, saTokenSecret, pxutil.PortworxServiceAccountTokenSecretName, cluster.Namespace) + require.NoError(t, err) + newCaCrt := saTokenSecret.Data[v1.ServiceAccountRootCAKey] + require.NotEqual(t, oldCaCrt, newCaCrt) + require.Equal(t, string(newCaCrt), "newtest") +} + func TestDefaultStorageClassesWithStork(t *testing.T) { mockCtrl := gomock.NewController(t) versionClient := fakek8sclient.NewSimpleClientset() diff --git a/pkg/controller/storagecluster/controller_test.go b/pkg/controller/storagecluster/controller_test.go index 930bdd577..8452f4b29 100644 --- a/pkg/controller/storagecluster/controller_test.go +++ b/pkg/controller/storagecluster/controller_test.go @@ -3304,6 +3304,13 @@ func TestGarbageCollection(t *testing.T) { }, Data: map[string]string{}, }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "px-attachdriveset-lock", + Namespace: cluster.Namespace, + }, + Data: map[string]string{}, + }, { ObjectMeta: metav1.ObjectMeta{ Name: "px-attach-driveset-lock", @@ -3311,6 +3318,13 @@ func TestGarbageCollection(t *testing.T) { }, Data: map[string]string{}, }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "px-attachdriveset-lock", + Namespace: "kube-system", + }, + Data: map[string]string{}, + }, { ObjectMeta: metav1.ObjectMeta{ Name: "px-bringup-queue-lockb", diff --git a/pkg/controller/storagecluster/storagecluster.go b/pkg/controller/storagecluster/storagecluster.go index 71eb7396d..2ca9a2731 100644 --- a/pkg/controller/storagecluster/storagecluster.go +++ b/pkg/controller/storagecluster/storagecluster.go @@ -988,7 +988,10 @@ func (c *Controller) gcNeeded(obj client.Object) bool { } if obj.GetObjectKind().GroupVersionKind().Kind == "ConfigMap" { - if obj.GetName() == "px-attach-driveset-lock" || + // px-attachdriveset-lock configmap name has changed in porx to px-attach-driveset-lock + // and changes are merged to PX 2.13.2 onwards and 3.0.0 onwards. Since operator has to support till n-2 px versions, + // operator needs to have support for clean up of both px-attachdriveset-lock as well as px-attach-driveset-lock + if obj.GetName() == "px-attach-driveset-lock" || obj.GetName() == "px-attachdriveset-lock" || strings.HasPrefix(obj.GetName(), "px-bringup-queue-lock") { return true } diff --git a/pkg/util/test/util.go b/pkg/util/test/util.go index 37d7457c4..0458ce7ab 100644 --- a/pkg/util/test/util.go +++ b/pkg/util/test/util.go @@ -5354,7 +5354,6 @@ func ValidatePodDisruptionBudget(cluster *corev1.StorageCluster, timeout, interv // PodDisruptionBudget is supported for k8s version greater than or equal to 1.21 and operator version greater than or equal to 1.5.0 // Changing opVersion to 23.10.0 for PTX-23350 | TODO: add better logic with PTX-23407 - // Smart and parallel upgrades is supported from px version 3.1.2 and operator version 24.2.0 if k8sVersion.GreaterThanOrEqual(minSupportedK8sVersionForPdb) && opVersion.GreaterThanOrEqual(opVer24_2_0) && pxVersion.GreaterThanOrEqual(pxVer3_1_2) { t := func() (interface{}, bool, error) { diff --git a/test/integration_test/basic_test.go b/test/integration_test/basic_test.go index 417dc846d..f579a46e0 100644 --- a/test/integration_test/basic_test.go +++ b/test/integration_test/basic_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-version" "github.com/libopenstorage/operator/drivers/storage/portworx" + pxutil "github.com/libopenstorage/operator/drivers/storage/portworx/util" corev1 "github.com/libopenstorage/operator/pkg/apis/core/v1" k8sutil "github.com/libopenstorage/operator/pkg/util/k8s" testutil "github.com/libopenstorage/operator/pkg/util/test" @@ -27,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -139,6 +141,29 @@ var testStorageClusterBasicCases = []types.TestCase{ }), TestFunc: BasicInstallWithNodeAffinity, }, + { + TestName: "BasicInstallWithPxSaTokenRefresh", + TestrailCaseIDs: []string{"C299624"}, + TestSpec: ci_utils.CreateStorageClusterTestSpecFunc(&corev1.StorageCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-stc", + Annotations: map[string]string{ + "portworx.io/host-pid": "true", // for running commands inside px runc container from oci-mon container + }, + }, + Spec: corev1.StorageClusterSpec{ + CommonConfig: corev1.CommonConfig{ + Env: []v1.EnvVar{ + { + Name: pxutil.EnvKeyPortworxServiceAccountTokenExpirationMinutes, + Value: "10", + }, + }, + }, + }, + }), + TestFunc: BasicInstallWithPxSaTokenRefresh, + }, { TestName: "BasicTelemetryRegression", TestrailCaseIDs: []string{"C54888, C83063, C83064, C83160, C83161, C83076, C83077, C83078, C83082, C83162, C83163, C83164, C83165, C54892, C82916, C83083"}, @@ -440,6 +465,58 @@ func BasicInstallWithNodeAffinity(tc *types.TestCase) func(*testing.T) { } } +// 1. Deploy PX and verify the secret containing the token for px stored in the k8s secret correctly mounted inside px runc container. +// 2. The cluster spec set the token expiration time to be 10 min, which is the minimum allowed token expiration time. The token should get refreshed after 5min. +// 3. Wait for 5min. Verify the token is refreshed, correctly mounted inside px runc container, and able to talk to k8s api server. +// 4. Delete the secret. Wait for 2min. Verify the token is refreshed, correctly mounted inside px runc container, and able to talk to k8s api server. +func BasicInstallWithPxSaTokenRefresh(tc *types.TestCase) func(*testing.T) { + return func(t *testing.T) { + testSpec := tc.TestSpec(t) + cluster, ok := testSpec.(*corev1.StorageCluster) + require.True(t, ok) + + verifyTokenFunc := func() string { + pxSaSecret, err := coreops.Instance().GetSecret(pxutil.PortworxServiceAccountTokenSecretName, cluster.Namespace) + require.NoError(t, err) + expectedToken := string(pxSaSecret.Data[core.ServiceAccountTokenKey]) + require.Eventually(t, func() bool { + actualToken, stderr, err := ci_utils.RunPxCmd("runc exec portworx cat /var/run/secrets/kubernetes.io/serviceaccount/token") + require.Empty(t, stderr) + require.NoError(t, err) + return expectedToken == actualToken + }, 10*time.Minute, 15*time.Second, "the token inside px runc container is different from the token in the k8s secret") + + stdout, stderr, err := ci_utils.RunPxCmd(fmt.Sprintf("runc exec portworx "+ + "curl -s https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1/namespaces/$(runc exec portworx cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)/secrets "+ + "--header 'Authorization: Bearer %s' --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt | grep %s", expectedToken, pxutil.PortworxServiceAccountTokenSecretName)) + errMsg := "px not able to communicate with k8s api server with the mounted service account token" + require.True(t, strings.Contains(stdout, pxutil.PortworxServiceAccountTokenSecretName), + fmt.Sprintf("the secret list returned from k8s api server does not contain %s. output: %s", pxutil.PortworxServiceAccountTokenSecretName, stdout)) + require.Empty(t, stderr, fmt.Sprintf("%s: %s", errMsg, stderr)) + require.NoError(t, err, fmt.Sprintf("%s: %s", errMsg, err.Error())) + logrus.Infof("token is created and verified: %s", expectedToken) + return expectedToken + } + + cluster = ci_utils.DeployAndValidateStorageCluster(cluster, ci_utils.PxSpecImages, t) + + logrus.Infof("Verifying px container token...") + token := verifyTokenFunc() + + time.Sleep(time.Duration(5) * time.Minute) + logrus.Infof("Verifying auto-refreshed px runc container token...") + refreshedToken := verifyTokenFunc() + require.NotEqual(t, token, refreshedToken, "the token did not get refreshed") + + logrus.Infof("Verifying px runc container token gets recreated after manual deletion...") + err := coreops.Instance().DeleteSecret(pxutil.PortworxServiceAccountTokenSecretName, cluster.Namespace) + require.NoError(t, err) + time.Sleep(time.Duration(2) * time.Minute) + recreatedToken := verifyTokenFunc() + require.NotEqual(t, refreshedToken, recreatedToken, "the token did not get refreshed") + } +} + func BasicUpgradeStorageCluster(tc *types.TestCase) func(*testing.T) { return func(t *testing.T) { // Get the storage cluster to start with