Skip to content

Commit

Permalink
Add topology spread constraint to kvdb specifically so kvdb can be fo…
Browse files Browse the repository at this point in the history
…rce-distributed across a specific zonal designation without the label selectors

Signed-off-by: jmcshane <jmcshane@purestorage.com>
  • Loading branch information
jmcshane committed Feb 24, 2025
1 parent 303cc37 commit fc71df1
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 72 deletions.
4 changes: 4 additions & 0 deletions drivers/storage/portworx/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,10 @@ func (p *portworx) GetKVDBPodSpec(
NodeName: nodeName,
}

if cluster.Spec.Kvdb.TopologySpreadConstraints != nil && len(cluster.Spec.Kvdb.TopologySpreadConstraints) > 0 {
podSpec.TopologySpreadConstraints = cluster.Spec.Kvdb.TopologySpreadConstraints
}

if t.cluster.Spec.Placement != nil {
if len(t.cluster.Spec.Placement.Tolerations) > 0 {
podSpec.Tolerations = make([]v1.Toleration, 0)
Expand Down
76 changes: 76 additions & 0 deletions drivers/storage/portworx/deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4700,6 +4700,82 @@ func TestPodSpecWithClusterIDOverwritten(t *testing.T) {
assert.ElementsMatch(t, expectedArgs, actual.Containers[0].Args)
}

func TestPodSpecWithKvdbTopologySpreadConstraints(t *testing.T) {
coreops.SetInstance(coreops.New(fakek8sclient.NewSimpleClientset()))
nodeName := "testNode"

cluster := &corev1.StorageCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "px-cluster",
Namespace: "kube-system",
},
Spec: corev1.StorageClusterSpec{
Image: "portworx/oci-monitor:2.0.3.4",
Placement: &corev1.PlacementSpec{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "px/enabled",
Operator: v1.NodeSelectorOpNotIn,
Values: []string{"false"},
},
{
Key: "kubernetes.io/os",
Operator: v1.NodeSelectorOpIn,
Values: []string{"linux"},
},
{
Key: "node-role.kubernetes.io/master",
Operator: v1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
},
Kvdb: &corev1.KvdbSpec{
Internal: true,
TopologySpreadConstraints: []v1.TopologySpreadConstraint{{
TopologyKey: "kubernetes.io/zone",
MaxSkew: 1,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kvdb": "true",
},
},
}},
},
SecretsProvider: stringPtr("k8s"),
CommonConfig: corev1.CommonConfig{
Storage: &corev1.StorageSpec{
UseAll: boolPtr(true),
ForceUseDisks: boolPtr(true),
},
Env: []v1.EnvVar{
{
Name: "TEST_KEY",
Value: "TEST_VALUE",
},
},
RuntimeOpts: map[string]string{
"op1": "10",
},
},
},
}

driver := portworx{}

actual, err := driver.GetKVDBPodSpec(cluster, nodeName)
require.NoError(t, err)
assert.Len(t, actual.TopologySpreadConstraints, 1)
}

func getExpectedPodSpecFromDaemonset(t *testing.T, fileName string) *v1.PodSpec {
json, err := os.ReadFile(fileName)
assert.NoError(t, err)
Expand Down
4 changes: 4 additions & 0 deletions pkg/apis/core/v1/storagecluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,10 @@ type KvdbSpec struct {
// to authenticate with the kvdb. It could have the username/password
// for basic auth, certificate information or ACL token.
AuthSecret string `json:"authSecret,omitempty"`

// TopologySpreadConstraints enable the kvdb cluster to be distributed
// based on a topology spread constraint spec
TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
}

// NetworkSpec contains network information
Expand Down
Loading

0 comments on commit fc71df1

Please sign in to comment.