diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 66ccd46d..fe3654a9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -28,17 +28,6 @@ rules: - create - patch - update -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - patch - - update - - watch - apiGroups: - batch resources: diff --git a/controllers/cosmosfullnode_controller.go b/controllers/cosmosfullnode_controller.go index 005a955f..6d293d26 100644 --- a/controllers/cosmosfullnode_controller.go +++ b/controllers/cosmosfullnode_controller.go @@ -45,7 +45,7 @@ type CosmosFullNodeReconciler struct { cacheController *cosmos.CacheController configMapControl fullnode.ConfigMapControl - nodeKeyControl fullnode.NodeKeyControl + nodeKeyCollector *fullnode.NodeKeyCollector peerCollector *fullnode.PeerCollector podControl fullnode.PodControl pvcControl fullnode.PVCControl @@ -69,7 +69,7 @@ func NewFullNode( cacheController: cacheController, configMapControl: fullnode.NewConfigMapControl(client), - nodeKeyControl: fullnode.NewNodeKeyControl(client), + nodeKeyCollector: fullnode.NewNodeKeyCollector(client), peerCollector: fullnode.NewPeerCollector(client), podControl: fullnode.NewPodControl(client, cacheController), pvcControl: fullnode.NewPVCControl(client), @@ -93,7 +93,6 @@ var ( // Generate RBAC roles to watch and update resources. IMPORTANT!!!! All resource names must be lowercase or cluster role will not work. //+kubebuilder:rbac:groups="",resources=pods;persistentvolumeclaims;services;serviceaccounts;configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete;bind;escalate -//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch //+kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -136,21 +135,21 @@ func (r *CosmosFullNodeReconciler) Reconcile(ctx context.Context, req ctrl.Reque errs.Append(err) } - // Reconcile Secrets. - err = r.nodeKeyControl.Reconcile(ctx, reporter, crd) + // Node keys are required for peers but created in config maps. Since config maps require peers, we need to resolve node keys first and pass them to both. + nodeKeys, err := r.nodeKeyCollector.Collect(ctx, crd) if err != nil { errs.Append(err) } // Find peer information that's used downstream. - peers, perr := r.peerCollector.Collect(ctx, crd) + peers, perr := r.peerCollector.Collect(ctx, crd, nodeKeys) if perr != nil { peers = peers.Default() errs.Append(perr) } // Reconcile ConfigMaps. - configCksums, err := r.configMapControl.Reconcile(ctx, reporter, crd, peers) + configCksums, err := r.configMapControl.Reconcile(ctx, reporter, crd, peers, nodeKeys) if err != nil { errs.Append(err) } diff --git a/internal/fullnode/configmap_builder.go b/internal/fullnode/configmap_builder.go index 598dd8bd..93ea2f29 100644 --- a/internal/fullnode/configmap_builder.go +++ b/internal/fullnode/configmap_builder.go @@ -14,16 +14,18 @@ import ( "github.com/strangelove-ventures/cosmos-operator/internal/diff" "github.com/strangelove-ventures/cosmos-operator/internal/kube" corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( configOverlayFile = "config-overlay.toml" appOverlayFile = "app-overlay.toml" + nodeKeyFile = "node_key.json" ) // BuildConfigMaps creates a ConfigMap with configuration to be mounted as files into containers. -// Currently, the config.toml (for Comet) and app.toml (for the Cosmos SDK). -func BuildConfigMaps(crd *cosmosv1.CosmosFullNode, peers Peers) ([]diff.Resource[*corev1.ConfigMap], error) { +// Currently, the config.toml (for Comet), app.toml (for the Cosmos SDK) and node_key.json. +func BuildConfigMaps(crd *cosmosv1.CosmosFullNode, peers Peers, nodeKeys NodeKeys) ([]diff.Resource[*corev1.ConfigMap], error) { var ( buf = bufPool.Get().(*bytes.Buffer) cms = make([]diff.Resource[*corev1.ConfigMap], 0, crd.Spec.Replicas) @@ -66,6 +68,16 @@ func BuildConfigMaps(crd *cosmosv1.CosmosFullNode, peers Peers) ([]diff.Resource } buf.Reset() + nodeKey, ok := nodeKeys[client.ObjectKey{Name: instanceName(crd, i), Namespace: crd.Namespace}] + + if !ok { + return nil, kube.UnrecoverableError(fmt.Errorf("node key not found for %s", instanceName(crd, i))) + } + + nodeKeyValue := string(nodeKey.MarshaledNodeKey) + + data[nodeKeyFile] = nodeKeyValue + var cm corev1.ConfigMap cm.Name = instanceName(crd, i) cm.Namespace = crd.Namespace diff --git a/internal/fullnode/configmap_builder_test.go b/internal/fullnode/configmap_builder_test.go index 0158e757..d0e3f87b 100644 --- a/internal/fullnode/configmap_builder_test.go +++ b/internal/fullnode/configmap_builder_test.go @@ -2,7 +2,9 @@ package fullnode import ( "bytes" + "crypto/ed25519" _ "embed" + "encoding/json" "fmt" "strings" "testing" @@ -11,6 +13,7 @@ import ( cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" "github.com/strangelove-ventures/cosmos-operator/internal/test" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -30,6 +33,11 @@ var ( wantAppOverrides string ) +var configMapRequiredEqualKeys = []string{ + configOverlayFile, + appOverlayFile, +} + func TestBuildConfigMaps(t *testing.T) { t.Parallel() @@ -44,7 +52,10 @@ func TestBuildConfigMaps(t *testing.T) { crd.Spec.ChainSpec.Network = "testnet" //Default starting ordinal is 0 - cms, err := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(&crd, nil, nodeKeys) require.NoError(t, err) require.Equal(t, crd.Spec.Replicas, int32(len(cms))) @@ -73,10 +84,27 @@ func TestBuildConfigMaps(t *testing.T) { require.Equal(t, fmt.Sprintf("%s-%d", crd.Name, crd.Spec.Ordinals.Start+1), cm.Name) require.NotEmpty(t, cms[0].Object().Data) - require.Equal(t, cms[0].Object().Data, cms[1].Object().Data) + + cms0Data := cms[0].Object().Data + cms1Data := cms[1].Object().Data + + for _, key := range configMapRequiredEqualKeys { + require.Equal(t, cms0Data[key], cms1Data[key]) + } + + nodeKeysFromConfigMap := NodeKeys{} + for _, cmDiff := range cms { + currCm := cmDiff.Object() + + nodeKey, nErr := getNodeKeyFromConfigMap(currCm) + + require.NoError(t, nErr) + + nodeKeysFromConfigMap[client.ObjectKey{Name: currCm.Name, Namespace: currCm.Namespace}] = *nodeKey + } crd.Spec.Type = cosmosv1.FullNode - cms2, err := BuildConfigMaps(&crd, nil) + cms2, err := BuildConfigMaps(&crd, nil, nodeKeysFromConfigMap) require.NoError(t, err) require.Equal(t, cms, cms2) @@ -91,7 +119,10 @@ func TestBuildConfigMaps(t *testing.T) { crd.Spec.ChainSpec.Network = "testnet" crd.Spec.Ordinals.Start = 2 - cms, err := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(&crd, nil, nodeKeys) require.NoError(t, err) require.Equal(t, crd.Spec.Replicas, int32(len(cms))) @@ -120,10 +151,27 @@ func TestBuildConfigMaps(t *testing.T) { require.Equal(t, fmt.Sprintf("%s-%d", crd.Name, crd.Spec.Ordinals.Start+1), cm.Name) require.NotEmpty(t, cms[0].Object().Data) - require.Equal(t, cms[0].Object().Data, cms[1].Object().Data) + + cm0Data := cms[0].Object().Data + cm1Data := cms[1].Object().Data + + for _, key := range configMapRequiredEqualKeys { + require.Equal(t, cm0Data[key], cm1Data[key]) + } + + nodeKeysFromConfigMap := NodeKeys{} + for _, cmDiff := range cms { + currCm := cmDiff.Object() + + nodeKey, nErr := getNodeKeyFromConfigMap(currCm) + + require.NoError(t, nErr) + + nodeKeysFromConfigMap[client.ObjectKey{Name: currCm.Name, Namespace: currCm.Namespace}] = *nodeKey + } crd.Spec.Type = cosmosv1.FullNode - cms2, err := BuildConfigMaps(&crd, nil) + cms2, err := BuildConfigMaps(&crd, nil, nodeKeysFromConfigMap) require.NoError(t, err) require.Equal(t, cms, cms2) @@ -135,7 +183,10 @@ func TestBuildConfigMaps(t *testing.T) { crd.Name = strings.Repeat("chain", 300) crd.Spec.ChainSpec.Network = strings.Repeat("network", 300) - cms, err := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(&crd, nil, nodeKeys) require.NoError(t, err) require.NotEmpty(t, cms) @@ -164,10 +215,13 @@ func TestBuildConfigMaps(t *testing.T) { custom.Spec.ChainSpec.Comet.MaxInboundPeers = ptr(int32(5)) custom.Spec.ChainSpec.Comet.MaxOutboundPeers = ptr(int32(15)) + nodeKeys, err := getMockNodeKeysForCRD(*custom, "") + require.NoError(t, err) + peers := Peers{ client.ObjectKey{Namespace: namespace, Name: "osmosis-0"}: {NodeID: "should not see me", PrivateAddress: "should not see me"}, } - cms, err := BuildConfigMaps(custom, peers) + cms, err := BuildConfigMaps(custom, peers, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -189,7 +243,10 @@ func TestBuildConfigMaps(t *testing.T) { }) t.Run("defaults", func(t *testing.T) { - cms, err := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(&crd, nil, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -217,7 +274,11 @@ func TestBuildConfigMaps(t *testing.T) { client.ObjectKey{Namespace: namespace, Name: "osmosis-1"}: {NodeID: "1", PrivateAddress: "1.local:26656"}, client.ObjectKey{Namespace: namespace, Name: "osmosis-2"}: {NodeID: "2", PrivateAddress: "2.local:26656"}, } - cms, err := BuildConfigMaps(peerCRD, peers) + + nodeKeys, err := getMockNodeKeysForCRD(*peerCRD, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(peerCRD, peers, nodeKeys) require.NoError(t, err) require.Len(t, cms, 3) @@ -245,7 +306,11 @@ func TestBuildConfigMaps(t *testing.T) { t.Run("validator sentry", func(t *testing.T) { sentry := crd.DeepCopy() sentry.Spec.Type = cosmosv1.Sentry - cms, err := BuildConfigMaps(sentry, nil) + + nodeKeys, err := getMockNodeKeysForCRD(*sentry, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(sentry, nil, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -287,7 +352,11 @@ func TestBuildConfigMaps(t *testing.T) { peers := Peers{ client.ObjectKey{Name: "osmosis-0", Namespace: namespace}: {ExternalAddress: "should not see me"}, } - cms, err := BuildConfigMaps(overrides, peers) + + nodeKeys, err := getMockNodeKeysForCRD(*overrides, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(overrides, peers, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -322,7 +391,11 @@ func TestBuildConfigMaps(t *testing.T) { p2pCrd := crd.DeepCopy() p2pCrd.Namespace = namespace p2pCrd.Spec.Replicas = 3 - cms, err := BuildConfigMaps(p2pCrd, peers) + + nodeKeys, err := getMockNodeKeysForCRD(*p2pCrd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(p2pCrd, peers, nodeKeys) require.NoError(t, err) require.Equal(t, 3, len(cms)) @@ -344,7 +417,11 @@ func TestBuildConfigMaps(t *testing.T) { t.Run("invalid toml", func(t *testing.T) { malformed := crd.DeepCopy() malformed.Spec.ChainSpec.Comet.TomlOverrides = ptr(`invalid_toml = should be invalid`) - _, err := BuildConfigMaps(malformed, nil) + + nodeKeys, err := getMockNodeKeysForCRD(*malformed, "") + require.NoError(t, err) + + _, err = BuildConfigMaps(malformed, nil, nodeKeys) require.Error(t, err) require.Contains(t, err.Error(), "invalid toml in comet overrides") @@ -371,7 +448,10 @@ func TestBuildConfigMaps(t *testing.T) { MinRetainBlocks: ptr(uint32(271500)), } - cms, err := BuildConfigMaps(custom, nil) + nodeKeys, err := getMockNodeKeysForCRD(*custom, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(custom, nil, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -393,7 +473,10 @@ func TestBuildConfigMaps(t *testing.T) { }) t.Run("defaults", func(t *testing.T) { - cms, err := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(&crd, nil, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -422,7 +505,10 @@ func TestBuildConfigMaps(t *testing.T) { enable = false new-field = "test" `) - cms, err := BuildConfigMaps(overrides, nil) + nodeKeys, err := getMockNodeKeysForCRD(*overrides, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(overrides, nil, nodeKeys) require.NoError(t, err) cm := cms[0].Object() @@ -452,7 +538,11 @@ func TestBuildConfigMaps(t *testing.T) { overrides.Spec.InstanceOverrides["osmosis-1"] = cosmosv1.InstanceOverridesSpec{ ExternalAddress: &overrideAddr1, } - cms, err := BuildConfigMaps(overrides, nil) + + nodeKeys, err := getMockNodeKeysForCRD(*overrides, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(overrides, nil, nodeKeys) require.NoError(t, err) var config map[string]any @@ -471,15 +561,94 @@ func TestBuildConfigMaps(t *testing.T) { t.Run("invalid toml", func(t *testing.T) { malformed := crd.DeepCopy() malformed.Spec.ChainSpec.App.TomlOverrides = ptr(`invalid_toml = should be invalid`) - _, err := BuildConfigMaps(malformed, nil) + + nodeKeys, err := getMockNodeKeysForCRD(*malformed, "") + require.NoError(t, err) + + _, err = BuildConfigMaps(malformed, nil, nodeKeys) require.Error(t, err) require.Contains(t, err.Error(), "invalid toml in app overrides") }) }) + t.Run("node_key.json", func(t *testing.T) { + crd := defaultCRD() + crd.Spec.Replicas = 3 + + t.Run("happy path", func(t *testing.T) { + custom := crd.DeepCopy() + + nodeKeys, err := getMockNodeKeysForCRD(*custom, "") + require.NoError(t, err) + + cms, err := BuildConfigMaps(custom, nil, nodeKeys) + require.NoError(t, err) + + cm := cms[0].Object() + + require.NotEmpty(t, cm.Data) + require.Empty(t, cm.BinaryData) + + nodeKey := NodeKey{} + + err = json.Unmarshal([]byte(cm.Data[nodeKeyFile]), &nodeKey) + require.NoError(t, err) + require.Equal(t, nodeKey.PrivKey.Type, "tendermint/PrivKeyEd25519") + require.NotEmpty(t, nodeKey.PrivKey.Value) + }) + + t.Run("with existing", func(t *testing.T) { + const namespace = "test-namespace" + var crd cosmosv1.CosmosFullNode + crd.Namespace = namespace + crd.Name = "juno" + crd.Spec.Replicas = 3 + + var existingNodeKeys NodeKeys = map[client.ObjectKey]NodeKeyRepresenter{ + {Namespace: namespace, Name: "juno-0"}: { + NodeKey: NodeKey{ + PrivKey: NodeKeyPrivKey{ + Type: "tendermint/PrivKeyEd25519", + Value: ed25519.PrivateKey{}, + }, + }, + MarshaledNodeKey: []byte("existing"), + }, + {Namespace: namespace, Name: "juno-1"}: { + NodeKey: NodeKey{ + PrivKey: NodeKeyPrivKey{ + Type: "tendermint/PrivKeyEd25519", + Value: ed25519.PrivateKey{}, + }, + }, + MarshaledNodeKey: []byte("existing"), + }, + {Namespace: namespace, Name: "juno-2"}: { + NodeKey: NodeKey{ + PrivKey: NodeKeyPrivKey{ + Type: "tendermint/PrivKeyEd25519", + Value: ed25519.PrivateKey{}, + }, + }, + MarshaledNodeKey: []byte("existing"), + }, + } + + got, err := BuildConfigMaps(&crd, nil, existingNodeKeys) + require.NoError(t, err) + require.Equal(t, 3, len(got)) + + nodeKey := got[0].Object().Data["node_key.json"] + require.Equal(t, "existing", nodeKey) + }) + }) + test.HasTypeLabel(t, func(crd cosmosv1.CosmosFullNode) []map[string]string { - cms, _ := BuildConfigMaps(&crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cms, _ := BuildConfigMaps(&crd, nil, nodeKeys) labels := make([]map[string]string, 0) for _, cm := range cms { labels = append(labels, cm.Object().Labels) @@ -487,3 +656,19 @@ func TestBuildConfigMaps(t *testing.T) { return labels }) } + +func getNodeKeyFromConfigMap(cm *corev1.ConfigMap) (*NodeKeyRepresenter, error) { + nodeKey := NodeKey{} + + nodeKeyData := []byte(cm.Data[nodeKeyFile]) + + err := json.Unmarshal(nodeKeyData, &nodeKey) + if err != nil { + return nil, err + } + + return &NodeKeyRepresenter{ + NodeKey: nodeKey, + MarshaledNodeKey: nodeKeyData, + }, nil +} diff --git a/internal/fullnode/configmap_control.go b/internal/fullnode/configmap_control.go index 19f853e2..463feabd 100644 --- a/internal/fullnode/configmap_control.go +++ b/internal/fullnode/configmap_control.go @@ -14,7 +14,7 @@ import ( // ConfigMapControl creates or updates configmaps. type ConfigMapControl struct { - build func(*cosmosv1.CosmosFullNode, Peers) ([]diff.Resource[*corev1.ConfigMap], error) + build func(*cosmosv1.CosmosFullNode, Peers, NodeKeys) ([]diff.Resource[*corev1.ConfigMap], error) client Client } @@ -30,7 +30,7 @@ type ConfigChecksums map[client.ObjectKey]string // Reconcile creates or updates configmaps containing items that are mounted into pods as files. // The ConfigMap is never deleted unless the CRD itself is deleted. -func (cmc ConfigMapControl) Reconcile(ctx context.Context, log kube.Logger, crd *cosmosv1.CosmosFullNode, peers Peers) (ConfigChecksums, kube.ReconcileError) { +func (cmc ConfigMapControl) Reconcile(ctx context.Context, log kube.Logger, crd *cosmosv1.CosmosFullNode, peers Peers, nodeKeys NodeKeys) (ConfigChecksums, kube.ReconcileError) { var cms corev1.ConfigMapList if err := cmc.client.List(ctx, &cms, client.InNamespace(crd.Namespace), @@ -41,7 +41,7 @@ func (cmc ConfigMapControl) Reconcile(ctx context.Context, log kube.Logger, crd current := ptrSlice(cms.Items) - want, err := cmc.build(crd, peers) + want, err := cmc.build(crd, peers, nodeKeys) if err != nil { return nil, kube.UnrecoverableError(err) } diff --git a/internal/fullnode/configmap_control_test.go b/internal/fullnode/configmap_control_test.go index beb5a979..b55f72f6 100644 --- a/internal/fullnode/configmap_control_test.go +++ b/internal/fullnode/configmap_control_test.go @@ -35,7 +35,10 @@ func TestConfigMapControl_Reconcile(t *testing.T) { crd.Namespace = namespace crd.Spec.ChainSpec.Network = "testnet" - cksums, err := control.Reconcile(ctx, nopReporter, &crd, nil) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + + cksums, err := control.Reconcile(ctx, nopReporter, &crd, nil, nodeKeys) require.NoError(t, err) require.Len(t, mClient.GotListOpts, 2) @@ -66,12 +69,16 @@ func TestConfigMapControl_Reconcile(t *testing.T) { t.Run("build error", func(t *testing.T) { var mClient mockConfigClient control := NewConfigMapControl(&mClient) - control.build = func(crd *cosmosv1.CosmosFullNode, _ Peers) ([]diff.Resource[*corev1.ConfigMap], error) { + control.build = func(crd *cosmosv1.CosmosFullNode, _ Peers, _ NodeKeys) ([]diff.Resource[*corev1.ConfigMap], error) { return nil, errors.New("boom") } crd := defaultCRD() - _, err := control.Reconcile(ctx, nopReporter, &crd, nil) + + nodeKeys, nErr := getMockNodeKeysForCRD(crd, "") + require.NoError(t, nErr) + + _, err := control.Reconcile(ctx, nopReporter, &crd, nil, nodeKeys) require.Error(t, err) require.EqualError(t, err, "boom") diff --git a/internal/fullnode/node_key_builder.go b/internal/fullnode/node_key_builder.go deleted file mode 100644 index c5bfa285..00000000 --- a/internal/fullnode/node_key_builder.go +++ /dev/null @@ -1,92 +0,0 @@ -package fullnode - -import ( - "crypto/ed25519" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - - cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" - "github.com/strangelove-ventures/cosmos-operator/internal/diff" - "github.com/strangelove-ventures/cosmos-operator/internal/kube" - corev1 "k8s.io/api/core/v1" -) - -const nodeKeyFile = "node_key.json" - -// BuildNodeKeySecrets builds the node key secrets for the given CRD. -// If the secret already has a node key, it is reused. -// Returns an error if a new node key cannot be serialized. (Should never happen.) -func BuildNodeKeySecrets(existing []*corev1.Secret, crd *cosmosv1.CosmosFullNode) ([]diff.Resource[*corev1.Secret], error) { - secrets := make([]diff.Resource[*corev1.Secret], 0, crd.Spec.Replicas) - startOrdinal := crd.Spec.Ordinals.Start - - for i := startOrdinal; i < startOrdinal+crd.Spec.Replicas; i++ { - var s corev1.Secret - s.Name = nodeKeySecretName(crd, i) - s.Namespace = crd.Namespace - s = *kube.FindOrDefaultCopy(existing, &s) - - var secret corev1.Secret - secret.Name = s.Name - secret.Namespace = s.Namespace - secret.Kind = "Secret" - secret.APIVersion = "v1" - secret.Data = s.Data - - secret.Labels = defaultLabels(crd, - kube.InstanceLabel, instanceName(crd, i), - ) - - secret.Immutable = ptr(true) - secret.Type = corev1.SecretTypeOpaque - - // Create node key if it doesn't exist - if secret.Data[nodeKeyFile] == nil { - nk, err := randNodeKey() - if err != nil { - return nil, err - } - secret.Data = map[string][]byte{ - nodeKeyFile: nk, - } - } - - secrets = append(secrets, diff.Adapt(&secret, int(i-startOrdinal))) - } - return secrets, nil -} - -type NodeKey struct { - PrivKey NodeKeyPrivKey `json:"priv_key"` -} - -type NodeKeyPrivKey struct { - Type string `json:"type"` - Value ed25519.PrivateKey `json:"value"` -} - -func (nk NodeKey) ID() string { - pub := nk.PrivKey.Value.Public() - hash := sha256.Sum256(pub.(ed25519.PublicKey)) - return hex.EncodeToString(hash[:20]) -} - -func randNodeKey() ([]byte, error) { - _, pk, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed to generate ed25519 node key: %w", err) - } - return json.Marshal(NodeKey{ - PrivKey: NodeKeyPrivKey{ - Type: "tendermint/PrivKeyEd25519", - Value: pk, - }, - }) -} - -func nodeKeySecretName(crd *cosmosv1.CosmosFullNode, ordinal int32) string { - return kube.ToName(fmt.Sprintf("%s-node-key-%d", appName(crd), ordinal)) -} diff --git a/internal/fullnode/node_key_builder_test.go b/internal/fullnode/node_key_builder_test.go deleted file mode 100644 index 65019c4d..00000000 --- a/internal/fullnode/node_key_builder_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package fullnode - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/strangelove-ventures/cosmos-operator/internal/test" - - cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" -) - -func TestBuildNodeKeySecrets(t *testing.T) { - t.Parallel() - - t.Run("happy path", func(t *testing.T) { - var crd cosmosv1.CosmosFullNode - crd.Namespace = "test-namespace" - crd.Name = "juno" - crd.Spec.Replicas = 3 - crd.Spec.ChainSpec.Network = "mainnet" - crd.Spec.PodTemplate.Image = "ghcr.io/juno:v1.2.3" - - secrets, err := BuildNodeKeySecrets(nil, &crd) - require.NoError(t, err) - require.Len(t, secrets, 3) - - for i, s := range secrets { - require.Equal(t, int64(i), s.Ordinal()) - require.NotEmpty(t, s.Revision()) - got := s.Object() - require.Equal(t, crd.Namespace, got.Namespace) - require.Equal(t, fmt.Sprintf("juno-node-key-%d", i), got.Name) - require.Equal(t, "Secret", got.Kind) - require.Equal(t, "v1", got.APIVersion) - - wantLabels := map[string]string{ - "app.kubernetes.io/created-by": "cosmos-operator", - "app.kubernetes.io/component": "CosmosFullNode", - "app.kubernetes.io/name": "juno", - "app.kubernetes.io/instance": fmt.Sprintf("juno-%d", i), - "app.kubernetes.io/version": "v1.2.3", - "cosmos.strange.love/network": "mainnet", - "cosmos.strange.love/type": "FullNode", - } - require.Equal(t, wantLabels, got.Labels) - - require.Empty(t, got.Annotations) - - require.True(t, *got.Immutable) - require.Equal(t, corev1.SecretTypeOpaque, got.Type) - - nodeKey := got.Data["node_key.json"] - require.NotEmpty(t, nodeKey) - - var gotJSON map[string]map[string]string - err = json.Unmarshal(nodeKey, &gotJSON) - require.NoError(t, err) - require.Equal(t, gotJSON["priv_key"]["type"], "tendermint/PrivKeyEd25519") - require.NotEmpty(t, gotJSON["priv_key"]["value"]) - } - }) - - t.Run("happy path with non 0 starting ordinal", func(t *testing.T) { - var crd cosmosv1.CosmosFullNode - crd.Namespace = "test-namespace" - crd.Name = "juno" - crd.Spec.Replicas = 3 - crd.Spec.ChainSpec.Network = "mainnet" - crd.Spec.PodTemplate.Image = "ghcr.io/juno:v1.2.3" - // Start ordinal is 0 by default - crd.Spec.Ordinals.Start = 2 - - secrets, err := BuildNodeKeySecrets(nil, &crd) - require.NoError(t, err) - require.Equal(t, crd.Spec.Replicas, int32(len(secrets))) - - for i, s := range secrets { - ordinal := crd.Spec.Ordinals.Start + int32(i) - require.Equal(t, crd.Spec.Ordinals.Start, crd.Spec.Ordinals.Start) - require.NotEmpty(t, s.Revision()) - - got := s.Object() - require.Equal(t, crd.Namespace, got.Namespace) - require.Equal(t, fmt.Sprintf("juno-node-key-%d", ordinal), got.Name) - require.Equal(t, "Secret", got.Kind) - require.Equal(t, "v1", got.APIVersion) - - wantLabels := map[string]string{ - "app.kubernetes.io/created-by": "cosmos-operator", - "app.kubernetes.io/component": "CosmosFullNode", - "app.kubernetes.io/name": "juno", - "app.kubernetes.io/instance": fmt.Sprintf("%s-%d", crd.Name, ordinal), - "app.kubernetes.io/version": "v1.2.3", - "cosmos.strange.love/network": "mainnet", - "cosmos.strange.love/type": "FullNode", - } - require.Equal(t, wantLabels, got.Labels) - - require.Empty(t, got.Annotations) - - require.True(t, *got.Immutable) - require.Equal(t, corev1.SecretTypeOpaque, got.Type) - - nodeKey := got.Data["node_key.json"] - require.NotEmpty(t, nodeKey) - - var gotJSON map[string]map[string]string - err = json.Unmarshal(nodeKey, &gotJSON) - require.NoError(t, err) - require.Equal(t, gotJSON["priv_key"]["type"], "tendermint/PrivKeyEd25519") - require.NotEmpty(t, gotJSON["priv_key"]["value"]) - } - }) - - t.Run("with existing", func(t *testing.T) { - const namespace = "test-namespace" - var crd cosmosv1.CosmosFullNode - crd.Namespace = namespace - crd.Name = "juno" - crd.Spec.Replicas = 3 - - var existing corev1.Secret - existing.Name = "juno-node-key-0" - existing.Namespace = namespace - existing.Annotations = map[string]string{"foo": "bar"} - existing.Data = map[string][]byte{"node_key.json": []byte("existing")} - - got, err := BuildNodeKeySecrets([]*corev1.Secret{&existing}, &crd) - require.NoError(t, err) - require.Equal(t, 3, len(got)) - - nodeKey := got[0].Object().Data["node_key.json"] - require.Equal(t, "existing", string(nodeKey)) - - require.Empty(t, got[0].Object().Annotations) - }) - - t.Run("zero replicas", func(t *testing.T) { - var crd cosmosv1.CosmosFullNode - secrets, err := BuildNodeKeySecrets(nil, &crd) - require.NoError(t, err) - require.Empty(t, secrets) - }) - - test.HasTypeLabel(t, func(crd cosmosv1.CosmosFullNode) []map[string]string { - secrets, _ := BuildNodeKeySecrets(nil, &crd) - labels := make([]map[string]string, 0) - for _, secret := range secrets { - labels = append(labels, secret.Object().Labels) - } - return labels - }) -} diff --git a/internal/fullnode/node_key_collector.go b/internal/fullnode/node_key_collector.go new file mode 100644 index 00000000..36ae9c74 --- /dev/null +++ b/internal/fullnode/node_key_collector.go @@ -0,0 +1,117 @@ +package fullnode + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + + cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" + "github.com/strangelove-ventures/cosmos-operator/internal/kube" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type NodeKey struct { + PrivKey NodeKeyPrivKey `json:"priv_key"` +} + +type NodeKeyPrivKey struct { + Type string `json:"type"` + Value ed25519.PrivateKey `json:"value"` +} + +func (nk NodeKey) ID() string { + pub := nk.PrivKey.Value.Public() + hash := sha256.Sum256(pub.(ed25519.PublicKey)) + return hex.EncodeToString(hash[:20]) +} + +func randNodeKey() (*NodeKey, error) { + _, pk, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate ed25519 node key: %w", err) + } + return &NodeKey{ + PrivKey: NodeKeyPrivKey{ + Type: "tendermint/PrivKeyEd25519", + Value: pk, + }, + }, nil +} + +// NodeKeyRepresenter represents a NodeKey and its marshaled form. Since NodeKeys can be pulled from ConfigMaps, we store the marshaled form to avoid re-marshaling during ConfigMap creation. +type NodeKeyRepresenter struct { + NodeKey NodeKey + MarshaledNodeKey []byte +} + +// Namespace maps an ObjectKey using the instance name to NodeKey. +type NodeKeys map[client.ObjectKey]NodeKeyRepresenter + +// NodeKeyCollector finds and collects node key information. +type NodeKeyCollector struct { + client Client +} + +func NewNodeKeyCollector(client Client) *NodeKeyCollector { + return &NodeKeyCollector{ + client: client, + } +} + +// Collect node key information given the crd. +func (c NodeKeyCollector) Collect(ctx context.Context, crd *cosmosv1.CosmosFullNode) (NodeKeys, kube.ReconcileError) { + nodeKeys := make(NodeKeys) + + var cms corev1.ConfigMapList + if err := c.client.List(ctx, &cms, + client.InNamespace(crd.Namespace), + client.MatchingFields{kube.ControllerOwnerField: crd.Name}, + ); err != nil { + return nil, kube.TransientError(fmt.Errorf("list existing configmaps: %w", err)) + } + + currentCms := ptrSlice(cms.Items) + + for i := crd.Spec.Ordinals.Start; i < crd.Spec.Ordinals.Start+crd.Spec.Replicas; i++ { + var confMap corev1.ConfigMap + confMap.Name = instanceName(crd, i) + confMap.Namespace = crd.Namespace + confMap = *kube.FindOrDefaultCopy(currentCms, &confMap) + + var nodeKey NodeKey + var marshaledNodeKey []byte + + if confMap.Data[nodeKeyFile] != "" { + err := json.Unmarshal([]byte(confMap.Data[nodeKeyFile]), &nodeKey) + + if err != nil { + return nil, kube.UnrecoverableError(fmt.Errorf("unmarshal node key: %w", err)) + } + + // Store the exact value of the node key in the configmap to avoid non-deterministic JSON marshaling which can cause unnecessary updates. + marshaledNodeKey = []byte(confMap.Data[nodeKeyFile]) + } else { + nodeKey, err := randNodeKey() + if err != nil { + return nil, kube.UnrecoverableError(fmt.Errorf("generate node key: %w", err)) + } + + marshaledNodeKey, err = json.Marshal(nodeKey) + + if err != nil { + return nil, kube.UnrecoverableError(fmt.Errorf("marshal node key: %w", err)) + } + } + + nodeKeys[client.ObjectKey{Name: instanceName(crd, i), Namespace: crd.Namespace}] = NodeKeyRepresenter{ + NodeKey: nodeKey, + MarshaledNodeKey: marshaledNodeKey, + } + } + return nodeKeys, nil +} diff --git a/internal/fullnode/node_key_collector_test.go b/internal/fullnode/node_key_collector_test.go new file mode 100644 index 00000000..1ee43c22 --- /dev/null +++ b/internal/fullnode/node_key_collector_test.go @@ -0,0 +1,103 @@ +package fullnode + +import ( + "context" + "fmt" + "testing" + + cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestNodeKeyCollector_Collect(t *testing.T) { + t.Parallel() + + ctx := context.Background() + const ( + namespace = "strangelove" + nodeKey1 = `{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"HBX8VFQ4OdWfOwIOR7jj0af8mVHik5iGW9o1xnn4vRltk1HmwQS2LLGrMPVS2LIUO9BUqmZ1Pjt+qM8x0ibHxQ=="}}` + nodeKey2 = `{"priv_key": {"type": "tendermint/PrivKeyEd25519", "value": "1JJ0C2TqVfbwgrrCKQiFr1wpWWwOeiJXl4CLcuk2Uot9gnf9hEHmfITWXCQRGvtdXU6uL1v6Ri00i4aEm00DLw=="}}` + ) + + type mockConfigClient = mockClient[*corev1.ConfigMap] + + t.Run("happy path - non-existent node keys in old config maps", func(t *testing.T) { + var mClient mockConfigClient + mClient.ObjectList = corev1.ConfigMapList{Items: []corev1.ConfigMap{}} + + var crd cosmosv1.CosmosFullNode + crd.Name = "dydx" + crd.Namespace = namespace + crd.Spec.Replicas = 2 + + collector := NewNodeKeyCollector(&mClient) + + nodeKeys, err := collector.Collect(ctx, &crd) + + require.NoError(t, err) + + require.Len(t, nodeKeys, 2) + }) + + t.Run("happy path - existing node keys in old config maps", func(t *testing.T) { + var mClient mockConfigClient + mClient.ObjectList = corev1.ConfigMapList{Items: []corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Name: "dydx-0", Namespace: namespace}, + Data: map[string]string{nodeKeyFile: nodeKey1}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "dydx-1", Namespace: namespace}, + Data: map[string]string{nodeKeyFile: nodeKey2}, + }, + }} + + var crd cosmosv1.CosmosFullNode + crd.Name = "dydx" + crd.Namespace = namespace + crd.Spec.Replicas = 2 + + collector := NewNodeKeyCollector(&mClient) + + nodeKeys, err := collector.Collect(ctx, &crd) + + require.NoError(t, err) + + require.Len(t, nodeKeys, 2) + + require.Equal(t, nodeKey1, string(nodeKeys[client.ObjectKey{Name: "dydx-0", Namespace: namespace}].MarshaledNodeKey)) + require.Equal(t, nodeKey2, string(nodeKeys[client.ObjectKey{Name: "dydx-1", Namespace: namespace}].MarshaledNodeKey)) + }) +} + +type nodeKeyMockConfigClient = mockClient[*corev1.ConfigMap] + +var defaultMockNodeKeyData = `{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"HBX8VFQ4OdWfOwIOR7jj0af8mVHik5iGW9o1xnn4vRltk1HmwQS2LLGrMPVS2LIUO9BUqmZ1Pjt+qM8x0ibHxQ=="}}` + +func getMockNodeKeysForCRD(crd cosmosv1.CosmosFullNode, mockNodeKeyData string) (NodeKeys, error) { + var nodeKey = mockNodeKeyData + + if nodeKey == "" { + nodeKey = defaultMockNodeKeyData + } + + configMapItems := []corev1.ConfigMap{} + + for i := crd.Spec.Ordinals.Start; i < crd.Spec.Ordinals.Start+crd.Spec.Replicas; i++ { + configMapItems = append(configMapItems, corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-%d", crd.Name, i), Namespace: crd.Namespace}, + Data: map[string]string{nodeKeyFile: nodeKey}, + }) + } + + var mClient nodeKeyMockConfigClient + mClient.ObjectList = corev1.ConfigMapList{Items: configMapItems} + + collector := NewNodeKeyCollector(&mClient) + ctx := context.Background() + + return collector.Collect(ctx, &crd) +} diff --git a/internal/fullnode/node_key_control.go b/internal/fullnode/node_key_control.go deleted file mode 100644 index 2d2eb4b6..00000000 --- a/internal/fullnode/node_key_control.go +++ /dev/null @@ -1,63 +0,0 @@ -package fullnode - -import ( - "context" - "fmt" - - cosmosv1 "github.com/strangelove-ventures/cosmos-operator/api/v1" - "github.com/strangelove-ventures/cosmos-operator/internal/diff" - "github.com/strangelove-ventures/cosmos-operator/internal/kube" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NodeKeyControl reconciles node keys for a CosmosFullNode. Node keys are saved as secrets and later mounted -// into pods. -type NodeKeyControl struct { - client Client -} - -func NewNodeKeyControl(client Client) NodeKeyControl { - return NodeKeyControl{ - client: client, - } -} - -// Reconcile is the control loop for node keys. The secrets are never deleted. -func (control NodeKeyControl) Reconcile(ctx context.Context, reporter kube.Reporter, crd *cosmosv1.CosmosFullNode) kube.ReconcileError { - var secrets corev1.SecretList - if err := control.client.List(ctx, &secrets, - client.InNamespace(crd.Namespace), - client.MatchingFields{kube.ControllerOwnerField: crd.Name}, - ); err != nil { - return kube.TransientError(fmt.Errorf("list existing node key secrets: %w", err)) - } - - existing := ptrSlice(secrets.Items) - want, serr := BuildNodeKeySecrets(existing, crd) - if serr != nil { - return kube.UnrecoverableError(fmt.Errorf("build node key secrets: %w", serr)) - } - - diffed := diff.New(existing, want) - - for _, secret := range diffed.Creates() { - reporter.Info("Creating node key secret", "secret", secret.Name) - if err := ctrl.SetControllerReference(crd, secret, control.client.Scheme()); err != nil { - return kube.TransientError(fmt.Errorf("set controller reference on node key secret %q: %w", secret.Name, err)) - } - if err := control.client.Create(ctx, secret); kube.IgnoreAlreadyExists(err) != nil { - return kube.TransientError(fmt.Errorf("create node key secret %q: %w", secret.Name, err)) - } - } - - for _, secret := range diffed.Updates() { - reporter.Info("Updating node key secret", "secret", secret.Name) - if err := control.client.Update(ctx, secret); err != nil { - return kube.TransientError(fmt.Errorf("update node key secret %q: %w", secret.Name, err)) - } - } - - return nil -} diff --git a/internal/fullnode/node_key_control_test.go b/internal/fullnode/node_key_control_test.go deleted file mode 100644 index 25d08328..00000000 --- a/internal/fullnode/node_key_control_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package fullnode - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func TestNodeKeyControl_Reconcile(t *testing.T) { - t.Parallel() - - type mockNodeKeyClient = mockClient[*corev1.Secret] - const namespace = "default" - ctx := context.Background() - - var mClient mockNodeKeyClient - var existing corev1.Secret - existing.Name = "juno-node-key-0" - existing.Namespace = namespace - mClient.ObjectList = corev1.SecretList{Items: []corev1.Secret{existing}} - - crd := defaultCRD() - crd.Namespace = namespace - crd.Spec.Replicas = 3 - crd.Name = "juno" - crd.Spec.ChainSpec.Network = "testnet" - - control := NewNodeKeyControl(&mClient) - err := control.Reconcile(ctx, nopReporter, &crd) - require.NoError(t, err) - - require.Len(t, mClient.GotListOpts, 2) - var listOpt client.ListOptions - for _, opt := range mClient.GotListOpts { - opt.ApplyToList(&listOpt) - } - require.Equal(t, namespace, listOpt.Namespace) - require.Zero(t, listOpt.Limit) - require.Equal(t, ".metadata.controller=juno", listOpt.FieldSelector.String()) - - require.Equal(t, 1, mClient.UpdateCount) - require.Equal(t, 2, mClient.CreateCount) - - require.NotEmpty(t, mClient.LastCreateObject.OwnerReferences) - require.Equal(t, crd.Name, mClient.LastCreateObject.OwnerReferences[0].Name) - require.Equal(t, "CosmosFullNode", mClient.LastCreateObject.OwnerReferences[0].Kind) - require.True(t, *mClient.LastCreateObject.OwnerReferences[0].Controller) -} diff --git a/internal/fullnode/peer_collector.go b/internal/fullnode/peer_collector.go index 57e32ee2..789a586b 100644 --- a/internal/fullnode/peer_collector.go +++ b/internal/fullnode/peer_collector.go @@ -2,7 +2,6 @@ package fullnode import ( "context" - "encoding/json" "fmt" "net" "sort" @@ -103,7 +102,7 @@ func NewPeerCollector(client Getter) *PeerCollector { } // Collect peer information given the crd. -func (c PeerCollector) Collect(ctx context.Context, crd *cosmosv1.CosmosFullNode) (Peers, kube.ReconcileError) { +func (c PeerCollector) Collect(ctx context.Context, crd *cosmosv1.CosmosFullNode, nodeKeys NodeKeys) (Peers, kube.ReconcileError) { peers := make(Peers) startOrdinal := crd.Spec.Ordinals.Start @@ -113,22 +112,16 @@ func (c PeerCollector) Collect(ctx context.Context, crd *cosmosv1.CosmosFullNode } for i := startOrdinal; i < startOrdinal+crd.Spec.Replicas; i++ { - secretName := nodeKeySecretName(crd, i) - var secret corev1.Secret - // Hoping the caching layer kubebuilder prevents API errors or rate limits. Simplifies logic to use a Get here - // vs. manually filtering through a List. - if err := c.client.Get(ctx, client.ObjectKey{Name: secretName, Namespace: crd.Namespace}, &secret); err != nil { - return nil, kube.TransientError(fmt.Errorf("get secret %s: %w", secretName, err)) - } + nodeKey, ok := nodeKeys[c.objectKey(crd, i)] - var nodeKey NodeKey - if err := json.Unmarshal(secret.Data[nodeKeyFile], &nodeKey); err != nil { - return nil, kube.UnrecoverableError(err) + if !ok { + return nil, kube.UnrecoverableError(fmt.Errorf("node key not found for %s", c.objectKey(crd, i))) } + svcName := p2pServiceName(crd, i) peers[c.objectKey(crd, i)] = Peer{ - NodeID: nodeKey.ID(), - PrivateAddress: fmt.Sprintf("%s.%s.svc.%s:%d", svcName, secret.Namespace, clusterDomain, p2pPort), + NodeID: nodeKey.NodeKey.ID(), + PrivateAddress: fmt.Sprintf("%s.%s.svc.%s:%d", svcName, crd.Namespace, clusterDomain, p2pPort), } if err := c.addExternalAddress(ctx, peers, crd, i); err != nil { return nil, kube.TransientError(err) diff --git a/internal/fullnode/peer_collector_test.go b/internal/fullnode/peer_collector_test.go index 73b19dee..20a1a03a 100644 --- a/internal/fullnode/peer_collector_test.go +++ b/internal/fullnode/peer_collector_test.go @@ -34,7 +34,6 @@ func TestPeerCollector_Collect(t *testing.T) { ctx := context.Background() const ( namespace = "strangelove" - nodeKey = `{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"HBX8VFQ4OdWfOwIOR7jj0af8mVHik5iGW9o1xnn4vRltk1HmwQS2LLGrMPVS2LIUO9BUqmZ1Pjt+qM8x0ibHxQ=="}}` ) t.Run("happy path - private addresses", func(t *testing.T) { @@ -42,10 +41,9 @@ func TestPeerCollector_Collect(t *testing.T) { crd.Name = "dydx" crd.Namespace = namespace crd.Spec.Replicas = 2 - res, err := BuildNodeKeySecrets(nil, &crd) + + nodeKeys, err := getMockNodeKeysForCRD(crd, "") require.NoError(t, err) - secret := res[0].Object() - secret.Data[nodeKeyFile] = []byte(nodeKey) var ( getCount int @@ -55,8 +53,6 @@ func TestPeerCollector_Collect(t *testing.T) { objKeys = append(objKeys, key) getCount++ switch ref := obj.(type) { - case *corev1.Secret: - *ref = *secret case *corev1.Service: *ref = corev1.Service{} } @@ -64,16 +60,14 @@ func TestPeerCollector_Collect(t *testing.T) { }) collector := NewPeerCollector(getter) - peers, err := collector.Collect(ctx, &crd) + peers, err := collector.Collect(ctx, &crd, nodeKeys) require.NoError(t, err) require.Len(t, peers, 2) - require.Equal(t, 4, getCount) // 2 secrets + 2 services + require.Equal(t, 2, getCount) // 2 services wantKeys := []client.ObjectKey{ - {Name: "dydx-node-key-0", Namespace: namespace}, {Name: "dydx-p2p-0", Namespace: namespace}, - {Name: "dydx-node-key-1", Namespace: namespace}, {Name: "dydx-p2p-1", Namespace: namespace}, } require.Equal(t, wantKeys, objKeys) @@ -98,15 +92,12 @@ func TestPeerCollector_Collect(t *testing.T) { crd.Name = "dydx" crd.Namespace = namespace crd.Spec.Replicas = 3 - res, err := BuildNodeKeySecrets(nil, &crd) + + nodeKeys, err := getMockNodeKeysForCRD(crd, "") require.NoError(t, err) - secret := res[0].Object() - secret.Data[nodeKeyFile] = []byte(nodeKey) getter := mockGetter(func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { switch ref := obj.(type) { - case *corev1.Secret: - *ref = *secret case *corev1.Service: var svc corev1.Service switch key.Name { @@ -125,7 +116,7 @@ func TestPeerCollector_Collect(t *testing.T) { }) collector := NewPeerCollector(getter) - peers, err := collector.Collect(ctx, &crd) + peers, err := collector.Collect(ctx, &crd, nodeKeys) require.NoError(t, err) require.Len(t, peers, 3) @@ -156,11 +147,8 @@ func TestPeerCollector_Collect(t *testing.T) { crd.Spec.Replicas = 2 crd.Spec.Ordinals.Start = 2 - res, err := BuildNodeKeySecrets(nil, &crd) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") require.NoError(t, err) - require.Equal(t, crd.Spec.Replicas, int32(len(res))) - secret := res[0].Object() - secret.Data[nodeKeyFile] = []byte(nodeKey) var ( getCount int @@ -170,8 +158,6 @@ func TestPeerCollector_Collect(t *testing.T) { objKeys = append(objKeys, key) getCount++ switch ref := obj.(type) { - case *corev1.Secret: - *ref = *secret case *corev1.Service: *ref = corev1.Service{} } @@ -179,16 +165,14 @@ func TestPeerCollector_Collect(t *testing.T) { }) collector := NewPeerCollector(getter) - peers, err := collector.Collect(ctx, &crd) + peers, err := collector.Collect(ctx, &crd, nodeKeys) require.NoError(t, err) require.Len(t, peers, 2) - require.Equal(t, 4, getCount) // 2 secrets + 2 services + require.Equal(t, 2, getCount) // 2 services wantKeys := []client.ObjectKey{ - {Name: fmt.Sprintf("dydx-node-key-%d", crd.Spec.Ordinals.Start), Namespace: namespace}, {Name: fmt.Sprintf("dydx-p2p-%d", crd.Spec.Ordinals.Start), Namespace: namespace}, - {Name: fmt.Sprintf("dydx-node-key-%d", crd.Spec.Ordinals.Start+1), Namespace: namespace}, {Name: fmt.Sprintf("dydx-p2p-%d", crd.Spec.Ordinals.Start+1), Namespace: namespace}, } require.Equal(t, wantKeys, objKeys) @@ -215,16 +199,11 @@ func TestPeerCollector_Collect(t *testing.T) { crd.Spec.Replicas = 3 crd.Spec.Ordinals.Start = 0 - res, err := BuildNodeKeySecrets(nil, &crd) + nodeKeys, err := getMockNodeKeysForCRD(crd, "") require.NoError(t, err) - require.Equal(t, crd.Spec.Replicas, int32(len(res))) - secret := res[0].Object() - secret.Data[nodeKeyFile] = []byte(nodeKey) getter := mockGetter(func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { switch ref := obj.(type) { - case *corev1.Secret: - *ref = *secret case *corev1.Service: var svc corev1.Service switch key.Name { @@ -243,7 +222,7 @@ func TestPeerCollector_Collect(t *testing.T) { }) collector := NewPeerCollector(getter) - peers, err := collector.Collect(ctx, &crd) + peers, err := collector.Collect(ctx, &crd, nodeKeys) require.NoError(t, err) require.Len(t, peers, 3) @@ -273,49 +252,32 @@ func TestPeerCollector_Collect(t *testing.T) { var crd cosmosv1.CosmosFullNode crd.Spec.Replicas = 0 + nodeKeys, err := getMockNodeKeysForCRD(crd, "") + require.NoError(t, err) + collector := NewPeerCollector(panicGetter) - peers, err := collector.Collect(ctx, &crd) + peers, err := collector.Collect(ctx, &crd, nodeKeys) require.NoError(t, err) require.Len(t, peers, 0) }) t.Run("get error", func(t *testing.T) { - getter := mockGetter(func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - return errors.New("boom") - }) - - collector := NewPeerCollector(getter) var crd cosmosv1.CosmosFullNode crd.Name = "dydx" crd.Spec.Replicas = 1 - _, err := collector.Collect(ctx, &crd) - require.Error(t, err) - require.EqualError(t, err, "get secret dydx-node-key-0: boom") - require.True(t, err.IsTransient()) - }) + nodeKeys, nErr := getMockNodeKeysForCRD(crd, "") + require.NoError(t, nErr) - t.Run("invalid node key", func(t *testing.T) { getter := mockGetter(func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - switch ref := obj.(type) { - case *corev1.Secret: - var secret corev1.Secret - secret.Data = map[string][]byte{nodeKeyFile: []byte("invalid")} - *ref = secret - case *corev1.Service: - panic("should not be called") - } - return nil + return errors.New("boom") }) - var crd cosmosv1.CosmosFullNode - crd.Name = "dydx" - crd.Spec.Replicas = 1 collector := NewPeerCollector(getter) - _, err := collector.Collect(ctx, &crd) + _, err := collector.Collect(ctx, &crd, nodeKeys) require.Error(t, err) - require.Contains(t, err.Error(), "invalid character") - require.False(t, err.IsTransient()) + require.EqualError(t, err, "get server dydx-p2p-0: boom") + require.True(t, err.IsTransient()) }) } diff --git a/internal/fullnode/pod_builder.go b/internal/fullnode/pod_builder.go index 92ba5c2a..a2fdd409 100644 --- a/internal/fullnode/pod_builder.go +++ b/internal/fullnode/pod_builder.go @@ -223,9 +223,9 @@ func (b PodBuilder) Build() (*corev1.Pod, error) { const ( volChainHome = "vol-chain-home" // Stores live chain data and config files. volTmp = "vol-tmp" // Stores temporary config files for manipulation later. - volConfig = "vol-config" // Items from ConfigMap. + volConfig = "vol-config" // Overlay items from ConfigMap. volSystemTmp = "vol-system-tmp" // Necessary for statesync or else you may see the error: ERR State sync failed err="failed to create chunk queue: unable to create temp dir for state sync chunks: stat /tmp: no such file or directory" module=statesync - volNodeKey = "vol-node-key" // Secret containing the node key. + volNodeKey = "vol-node-key" // Config map containing the node key. ) // WithOrdinal updates adds name and other metadata to the pod using "ordinal" which is the pod's @@ -276,8 +276,8 @@ func (b PodBuilder) WithOrdinal(ordinal int32) PodBuilder { { Name: volNodeKey, VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: nodeKeySecretName(b.crd, ordinal), + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: instanceName(b.crd, ordinal)}, Items: []corev1.KeyToPath{ {Key: nodeKeyFile, Path: nodeKeyFile}, }, diff --git a/internal/fullnode/pod_builder_test.go b/internal/fullnode/pod_builder_test.go index bea97424..f9b5fe00 100644 --- a/internal/fullnode/pod_builder_test.go +++ b/internal/fullnode/pod_builder_test.go @@ -363,8 +363,8 @@ func TestPodBuilder(t *testing.T) { // Node key require.Equal(t, "vol-node-key", vols[4].Name) - require.Equal(t, "osmosis-node-key-5", vols[4].Secret.SecretName) - require.Equal(t, []corev1.KeyToPath{{Key: "node_key.json", Path: "node_key.json"}}, vols[4].Secret.Items) + require.Equal(t, "osmosis-5", vols[4].ConfigMap.Name) + require.Equal(t, []corev1.KeyToPath{{Key: "node_key.json", Path: "node_key.json"}}, vols[4].ConfigMap.Items) require.Equal(t, len(pod.Spec.Containers), 2)