diff --git a/docs/docs/reference/configuration.md b/docs/docs/reference/configuration.md
index bdd1bd9c..d3ef96c9 100644
--- a/docs/docs/reference/configuration.md
+++ b/docs/docs/reference/configuration.md
@@ -460,6 +460,23 @@ extensionServices:
:negative_squared_cross_mark: |
+
+`volumes` |
+[][Volume](#volume) |
+Machine volume configs specification.*Show example*
+```yaml
+volumes:
+ - name: EPHEMERAL
+ provisioning:
+ diskSelector:
+ match: disk.transport == "nvme"
+ maxSize: 50GiB
+```
+ |
+`nil` |
+:negative_squared_cross_mark: |
+
+
`nodeLabels` |
map[string]string |
@@ -910,6 +927,44 @@ environment:
+## Volume
+
+`Volume` defines machine volume configuration for a node.
+
+
+
+Field | Type | Description | Default Value | Required |
+
+
+
+`name` |
+`string` |
+Name of the volume config.*Show example*
+```yaml
+name: EPHEMERAL
+```
+ |
+`nil` |
+:white_check_mark: |
+
+
+
+`provisioning` |
+[ProvisioningSpec](#provisioningspec) |
+Provisioning spec of the volume config.*Show example*
+```yaml
+provisioning:
+ diskSelector:
+ match: disk.transport == "nvme"
+ maxSize: 50GiB
+```
+ |
+`nil` |
+:white_check_mark: |
+
+
+
+
## NetworkRule
`NetworkRule` defines the firewall rules to match.
@@ -1005,3 +1060,7 @@ ingress:
## ConfigFile
`ConfigFile` is type of upstream Talos `extensions.ConfigFile`
+
+## ProvisioningSpec
+
+`ProvisioningSpec` is type of upstream Talos `block.ProvisioningSpec`
diff --git a/example/talconfig.yaml b/example/talconfig.yaml
index 29702360..1a1129b5 100644
--- a/example/talconfig.yaml
+++ b/example/talconfig.yaml
@@ -32,6 +32,12 @@ nodes:
mountPath: /usr/local/etc/nut/upsmon.conf
environment:
- UPS_NAME=ups
+ volumes:
+ - name: EPHEMERAL
+ provisioning:
+ diskSelector:
+ match: disk.transport == "nvme"
+ maxSize: 50GiB
ingressFirewall:
defaultAction: block
rules:
@@ -74,8 +80,9 @@ nodes:
installDiskSelector:
size: 4GB
model: WDC*
- name: /sys/block/sda/device/name
- busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0
+ # TODO: broken since Talos 1.9 and I need to investigate
+ # name: /sys/block/sda/device/name
+ # busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0
nodeLabels:
rack: rack1a
zone: us-east-1a
diff --git a/pkg/config/config.go b/pkg/config/config.go
index a3b3f61f..59df7f07 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -2,6 +2,7 @@ package config
import (
"github.com/siderolabs/image-factory/pkg/schematic"
+ "github.com/siderolabs/talos/pkg/machinery/config/types/block"
"github.com/siderolabs/talos/pkg/machinery/config/types/network"
"github.com/siderolabs/talos/pkg/machinery/config/types/runtime/extensions"
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
@@ -59,6 +60,7 @@ type NodeConfigs struct {
MachineSpec MachineSpec `yaml:"machineSpec,omitempty" jsonschema:"description=Machine hardware specification"`
IngressFirewall *IngressFirewall `yaml:"ingressFirewall,omitempty" jsonschema:"description=Machine firewall specification"`
ExtensionServices []*ExtensionService `yaml:"extensionServices,omitempty" jsonschema:"description=Machine extension services specification"`
+ Volumes []*Volume `yaml:"volumes,omitempty" jsonschema:"description=Machine volume configs specification"`
}
type ImageFactory struct {
@@ -92,3 +94,8 @@ type ExtensionService struct {
ConfigFiles extensions.ConfigFileList `yaml:"configFiles,omitempty" jsonschema:"description=The config files for the extension service"`
Environment []string `yaml:"environment,omitempty" jsonschema:"description=The environment for the extension service"`
}
+
+type Volume struct {
+ Name string `yaml:"name" jsonschema:"description=Name of the volume config"`
+ Provisioning block.ProvisioningSpec `yaml:"provisioning" jsonschema:"description=Provisioning spec of the volume config"`
+}
diff --git a/pkg/generate/config.go b/pkg/generate/config.go
index 13c31ee8..ab9e1f3f 100644
--- a/pkg/generate/config.go
+++ b/pkg/generate/config.go
@@ -81,6 +81,15 @@ func GenerateConfig(c *config.TalhelperConfig, dryRun bool, outDir, secretFile,
cfg = append(cfg, ext...)
}
+ if len(node.Volumes) > 0 {
+ slog.Debug(fmt.Sprintf("generating volume config for %s", node.Hostname))
+ vc, err := talos.GenerateVolumeConfigBytes(node.Volumes, mode)
+ if err != nil {
+ return err
+ }
+ cfg = append(cfg, vc...)
+ }
+
if len(node.ExtraManifests) > 0 {
slog.Debug(fmt.Sprintf("generating extra manifests for %s", node.Hostname))
content, err := combineExtraManifests(node.ExtraManifests)
diff --git a/pkg/talos/volumeconfig.go b/pkg/talos/volumeconfig.go
new file mode 100644
index 00000000..7b8d4e07
--- /dev/null
+++ b/pkg/talos/volumeconfig.go
@@ -0,0 +1,60 @@
+package talos
+
+import (
+ "fmt"
+ "slices"
+
+ "github.com/budimanjojo/talhelper/v3/pkg/config"
+ "github.com/siderolabs/talos/pkg/machinery/config/types/block"
+)
+
+func GenerateVolumeConfigBytes(cfgs []*config.Volume, mode string) ([]byte, error) {
+ var result [][]byte
+
+ vcs, err := GenerateVolumeConfig(cfgs, mode)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, vc := range vcs {
+ vcByte, err := marshalYaml(vc)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, vcByte)
+ }
+
+ return CombineYamlBytes(result), nil
+}
+
+func GenerateVolumeConfig(cfgs []*config.Volume, mode string) ([]*block.VolumeConfigV1Alpha1, error) {
+ var (
+ // I suppose we shouldn't allow same volume names?
+ names []string
+ result []*block.VolumeConfigV1Alpha1
+ )
+
+ m, err := parseMode(mode)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, v := range cfgs {
+ if slices.Index(names, v.Name) != -1 {
+ return nil, fmt.Errorf("duplicated volume config name found: %s", v.Name)
+ }
+ names = append(names, v.Name)
+ vc := block.NewVolumeConfigV1Alpha1()
+ vc.MetaName = v.Name
+ vc.ProvisioningSpec = v.Provisioning
+
+ if _, err := vc.Validate(m); err != nil {
+ return nil, err
+ }
+
+ result = append(result, vc)
+ }
+
+ return result, nil
+}
diff --git a/pkg/talos/volumeconfig_test.go b/pkg/talos/volumeconfig_test.go
new file mode 100644
index 00000000..6b149e84
--- /dev/null
+++ b/pkg/talos/volumeconfig_test.go
@@ -0,0 +1,54 @@
+package talos
+
+import (
+ "testing"
+
+ "github.com/budimanjojo/talhelper/v3/pkg/config"
+ "github.com/siderolabs/talos/pkg/machinery/cel"
+ "github.com/siderolabs/talos/pkg/machinery/cel/celenv"
+ "github.com/siderolabs/talos/pkg/machinery/config/types/block"
+ "gopkg.in/yaml.v3"
+)
+
+func TestGenerateNodeVolumeConfig(t *testing.T) {
+ data := []byte(`nodes:
+ - hostname: node1
+ volumes:
+ - name: EPHEMERAL
+ provisioning:
+ diskSelector:
+ match: disk.transport == "nvme"
+ maxSize: 50GiB
+ - name: IMAGECACHE
+ provisioning:
+ diskSelector:
+ match: disk.size > 120u * GB && disk.size < 1u * TB`)
+ var m config.TalhelperConfig
+ if err := yaml.Unmarshal(data, &m); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedVolume1Name := "EPHEMERAL"
+ expectedVolume1Provisioning := block.ProvisioningSpec{
+ DiskSelectorSpec: block.DiskSelector{
+ Match: cel.MustExpression(cel.ParseBooleanExpression(`disk.transport == "nvme"`, celenv.DiskLocator())),
+ },
+ ProvisioningMaxSize: block.MustByteSize("50GiB"),
+ }
+ expectedVolume2Name := "IMAGECACHE"
+ expectedVolume2Provisioning := block.ProvisioningSpec{
+ DiskSelectorSpec: block.DiskSelector{
+ Match: cel.MustExpression(cel.ParseBooleanExpression(`disk.size > 120u * GB && disk.size < 1u * TB`, celenv.DiskLocator())),
+ },
+ }
+
+ result, err := GenerateVolumeConfig(m.Nodes[0].Volumes, "metal")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ compare(result[0].Name(), expectedVolume1Name, t)
+ compare(result[0].ProvisioningSpec, expectedVolume1Provisioning, t)
+ compare(result[1].Name(), expectedVolume2Name, t)
+ compare(result[1].ProvisioningSpec, expectedVolume2Provisioning, t)
+}