Skip to content

Commit

Permalink
[PWX-35604] Update KubeSchedulerConfiguration to api v1 for k8s versi…
Browse files Browse the repository at this point in the history
…on 1.25+ (#1395)

* Update KubeSchedulerConfiguration to api v1 for k8s version 1.25+

* Addressing comments
  • Loading branch information
olavangad-px committed Apr 30, 2024
1 parent 663c64e commit fc1d9b9
Show file tree
Hide file tree
Showing 16 changed files with 3,057 additions and 102 deletions.
171 changes: 111 additions & 60 deletions pkg/controller/storagecluster/stork.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/go-version"
"reflect"
"sort"
"strings"
Expand All @@ -25,8 +26,10 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
schedcomp "k8s.io/component-base/config/v1alpha1"
schedconfig "k8s.io/kube-scheduler/config/v1beta3"
schedconfigapi "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3"
schedconfig "k8s.io/kube-scheduler/config/v1"
schedconfigbeta3 "k8s.io/kube-scheduler/config/v1beta3"
schedconfigapi "k8s.io/kubernetes/pkg/scheduler/apis/config/v1"
schedconfigapibeta3 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3"
"sigs.k8s.io/yaml"
)

Expand All @@ -51,6 +54,9 @@ const (
// K8S scheduler policy decoder changed in this version.
// https://github.com/kubernetes/kubernetes/blob/release-1.21/pkg/scheduler/scheduler.go#L306
policyDecoderChangeVersion = "1.17.0"
// kubescheduler.config.k8s.io/v1 is GA'ed only in k8s 1.25
// so for 1.23 <= ver < 1.25 we should continue using kubescheduler.config.k8s.io/v1beta3
minK8sVersionForKubeSchedulerV1Configuration = "1.25.0"
)

const (
Expand Down Expand Up @@ -232,76 +238,121 @@ func (c *Controller) createStorkConfigMap(
clusterNamespace string,
ownerRef *metav1.OwnerReference,
) error {
policy := SchedulerPolicy{
Kind: "Policy",
APIVersion: "kubescheduler.config.k8s.io/v1",
Extenders: []SchedulerExtender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, clusterNamespace, storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute}.Nanoseconds(),
},
},
// KubeSchedulerConfiguration is beta in 1.23 and GA in 1.25
k8sMinVersionForKubeSchedulerV1Configuration, err := version.NewVersion(minK8sVersionForKubeSchedulerV1Configuration)
if err != nil {
logrus.WithError(err).Errorf("Could not parse version %s", k8sMinVersionForKubeSchedulerV1Configuration)
return err
}

leaderElect := true
schedulerName := storkDeploymentName
kubeSchedulerConfiguration := schedconfig.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1beta3",
},
LeaderElection: schedcomp.LeaderElectionConfiguration{
LeaderElect: &leaderElect,
ResourceNamespace: clusterNamespace,
ResourceName: storkSchedDeploymentName,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "leases",
},
Profiles: []schedconfig.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfig.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, clusterNamespace, storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
leaderElectionConfiguration := schedcomp.LeaderElectionConfiguration{
LeaderElect: &leaderElect,
ResourceNamespace: clusterNamespace,
ResourceName: storkSchedDeploymentName,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "leases",
}

// Auto fill the default configuration params
schedconfigapi.SetDefaults_KubeSchedulerConfiguration(&kubeSchedulerConfiguration)

var policyConfig []byte
var dataKey string
var err error
if c.kubernetesVersion.GreaterThanOrEqual(k8sutil.MinVersionForKubeSchedulerConfiguration) {
policyConfig, err = yaml.Marshal(kubeSchedulerConfiguration)
if err != nil {
logrus.WithError(err).Errorf("Could not encode policy object")
return err
if c.kubernetesVersion.GreaterThanOrEqual(k8sMinVersionForKubeSchedulerV1Configuration) {
// enter this branch when k8s ver >= 1.25
kubeSchedulerConfigurationV1 := schedconfig.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1",
},
LeaderElection: leaderElectionConfiguration,
Profiles: []schedconfig.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfig.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, clusterNamespace, storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
}
// Auto fill the default configuration params
schedconfigapi.SetDefaults_KubeSchedulerConfiguration(&kubeSchedulerConfigurationV1)
policyConfig, err = yaml.Marshal(kubeSchedulerConfigurationV1)
if err != nil {
logrus.WithError(err).Errorf("Could not encode policy object")
return err
}
} else {
// enter this branch when 1.23 <= k8s ver < 1.25
kubeSchedulerConfigurationV1Beta := schedconfigbeta3.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1beta3",
},
LeaderElection: leaderElectionConfiguration,
Profiles: []schedconfigbeta3.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfigbeta3.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, clusterNamespace, storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
}

// Auto fill the default configuration params
schedconfigapibeta3.SetDefaults_KubeSchedulerConfiguration(&kubeSchedulerConfigurationV1Beta)
policyConfig, err = yaml.Marshal(kubeSchedulerConfigurationV1Beta)
if err != nil {
logrus.WithError(err).Errorf("Could not encode policy object")
return err
}
}
dataKey = "stork-config.yaml"
} else {
// enter this branch when k8s ver < 1.23
policy := SchedulerPolicy{
Kind: "Policy",
APIVersion: "kubescheduler.config.k8s.io/v1",
Extenders: []SchedulerExtender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, clusterNamespace, storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute}.Nanoseconds(),
},
},
}
policyConfig, err = json.Marshal(policy)
if err != nil {
logrus.WithError(err).Errorf("Could not encode policy object")
Expand Down
125 changes: 83 additions & 42 deletions pkg/controller/storagecluster/stork_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ import (
fakek8sclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
schedcomp "k8s.io/component-base/config/v1alpha1"
schedconfig "k8s.io/kube-scheduler/config/v1beta3"
schedconfig "k8s.io/kube-scheduler/config/v1"
schedconfigbeta3 "k8s.io/kube-scheduler/config/v1beta3"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"

Expand All @@ -42,6 +43,7 @@ func TestStorkInstallation(t *testing.T) {
testStorkInstallation(t, "1.18.0")
testStorkInstallation(t, "1.23.0")
testStorkInstallation(t, "1.25.0")
testStorkInstallation(t, "1.29.0")
}

func testStorkInstallation(t *testing.T, k8sVersionStr string) {
Expand Down Expand Up @@ -108,57 +110,96 @@ func testStorkInstallation(t *testing.T, k8sVersionStr string) {
// Stork ConfigMap
leaderElect := true
schedulerName := storkDeploymentName
expectedKubeSchedulerConfiguration := schedconfig.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1beta3",
},
LeaderElection: schedcomp.LeaderElectionConfiguration{
LeaderElect: &leaderElect,
ResourceNamespace: "kube-test",
ResourceName: storkSchedDeploymentName,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "leases",
},
Profiles: []schedconfig.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfig.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, "kube-test", storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
leaderElectionConfiguration := schedcomp.LeaderElectionConfiguration{
LeaderElect: &leaderElect,
ResourceNamespace: "kube-test",
ResourceName: storkSchedDeploymentName,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "leases",
}

var actualKubeSchedulerConfiguration = schedconfig.KubeSchedulerConfiguration{}
storkConfigMap := &v1.ConfigMap{}
err = testutil.Get(k8sClient, storkConfigMap, storkConfigMapName, cluster.Namespace)
require.NoError(t, err)
require.Equal(t, storkConfigMapName, storkConfigMap.Name)
require.Equal(t, cluster.Namespace, storkConfigMap.Namespace)
require.Len(t, storkConfigMap.OwnerReferences, 1)
require.Equal(t, cluster.Name, storkConfigMap.OwnerReferences[0].Name)

err = yaml.Unmarshal([]byte(storkConfigMap.Data["stork-config.yaml"]), &actualKubeSchedulerConfiguration)
k8sMinVersionForKubeSchedulerV1Configuration, err := version.NewVersion(minK8sVersionForKubeSchedulerV1Configuration)
require.NoError(t, err)

require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.TypeMeta, actualKubeSchedulerConfiguration.TypeMeta))
require.Equal(t, expectedKubeSchedulerConfiguration.Profiles[0].SchedulerName, actualKubeSchedulerConfiguration.Profiles[0].SchedulerName)
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.LeaderElection, actualKubeSchedulerConfiguration.LeaderElection))
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.Extenders[0], actualKubeSchedulerConfiguration.Extenders[0]))
if k8sVersion.GreaterThanOrEqual(k8sMinVersionForKubeSchedulerV1Configuration) {
expectedKubeSchedulerConfiguration := schedconfig.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1",
},
LeaderElection: leaderElectionConfiguration,
Profiles: []schedconfig.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfig.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, "kube-test", storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
}
var actualKubeSchedulerConfiguration = schedconfig.KubeSchedulerConfiguration{}
err = yaml.Unmarshal([]byte(storkConfigMap.Data["stork-config.yaml"]), &actualKubeSchedulerConfiguration)
require.NoError(t, err)

require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.TypeMeta, actualKubeSchedulerConfiguration.TypeMeta))
require.Equal(t, expectedKubeSchedulerConfiguration.Profiles[0].SchedulerName, actualKubeSchedulerConfiguration.Profiles[0].SchedulerName)
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.LeaderElection, actualKubeSchedulerConfiguration.LeaderElection))
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.Extenders[0], actualKubeSchedulerConfiguration.Extenders[0]))
} else {
expectedKubeSchedulerConfiguration := schedconfigbeta3.KubeSchedulerConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "KubeSchedulerConfiguration",
APIVersion: "kubescheduler.config.k8s.io/v1beta3",
},
LeaderElection: leaderElectionConfiguration,
Profiles: []schedconfigbeta3.KubeSchedulerProfile{
{
SchedulerName: &schedulerName,
},
},
Extenders: []schedconfigbeta3.Extender{
{
URLPrefix: fmt.Sprintf(
"http://%s.%s:%d",
storkServiceName, "kube-test", storkServicePort,
),
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 5,
EnableHTTPS: false,
NodeCacheCapable: false,
HTTPTimeout: metav1.Duration{Duration: 5 * time.Minute},
},
},
}
var actualKubeSchedulerConfiguration = schedconfigbeta3.KubeSchedulerConfiguration{}
err = yaml.Unmarshal([]byte(storkConfigMap.Data["stork-config.yaml"]), &actualKubeSchedulerConfiguration)
require.NoError(t, err)

require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.TypeMeta, actualKubeSchedulerConfiguration.TypeMeta))
require.Equal(t, expectedKubeSchedulerConfiguration.Profiles[0].SchedulerName, actualKubeSchedulerConfiguration.Profiles[0].SchedulerName)
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.LeaderElection, actualKubeSchedulerConfiguration.LeaderElection))
require.True(t, reflect.DeepEqual(expectedKubeSchedulerConfiguration.Extenders[0], actualKubeSchedulerConfiguration.Extenders[0]))
}
} else {
// Stork ConfigMap
expectedPolicy := SchedulerPolicy{
Expand Down
21 changes: 21 additions & 0 deletions vendor/k8s.io/kube-scheduler/config/v1/doc.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit fc1d9b9

Please sign in to comment.