Skip to content

Instantly share code, notes, and snippets.

@Archisman-Mridha
Created February 6, 2026 15:19
Show Gist options
  • Select an option

  • Save Archisman-Mridha/dd0e9e743dbd34ac29ee2b7528b09060 to your computer and use it in GitHub Desktop.

Select an option

Save Archisman-Mridha/dd0e9e743dbd34ac29ee2b7528b09060 to your computer and use it in GitHub Desktop.
package main
import (
"errors"
"log/slog"
"slices"
)
const (
DiskTypeHDD = "hdd"
DiskTypeSSD = "ssd"
DiskTypeNVMe = "nvme"
)
type (
Disk struct {
Name,
WWN,
Type string
Size,
Unallocated uint
HasHighSpeedNIC bool
PriorityScores PriorityScores
}
PriorityScores struct {
OSInstallation,
ZPoolInstallation,
CEPHInstallation int
}
)
type (
StoragePlan struct {
OS OSStoragePlan
ZPool ZPoolStoragePlan
CEPH CEPHStoragePlan
}
OSStoragePlan struct {
Size uint
/*
WWNs of the 2 (cheapest) disks, across which the OS will be installed, with RAID 1 enabled.
OS installation is managed by ClusterAPI Provider Hetzner (CAPH). For each of those 2 disks,
it'll create 3 partitions :
(1) Boot partition (/boot), of size 512MB.
(2) UEFI partition (/boot/efi), of size ~1GB.
(3) Partition where the OS gets installed.
*/
DiskWWNs []string
}
/*
We'll always have a single ZFS Pool in RAID-Z1 mode, with the following components :
(1) 100GB ZVolume for storing container images.
(2) 50GB ZVolume for storing pod ephemeral volumes.
(3) 50GB ZVolume for storing pod logs.
(4) Optionally, additional storage from the ZPool allocated to OpenEBS ZFS LocalPV
provisioner, which will be used for fast node-local storage requirements (Redis cache for
e.g.).
Let that amount of storage be x. By default, x = 50GB.
*/
ZPoolStoragePlan struct {
Disks []string
}
CEPHStoragePlan struct {
Disks []struct {
Name string
// Whether the disk has been partitioned or not. If not, that means, we're allocating the
// whole disk to CEPH.
Partitioned bool
}
}
StoragePlanner struct {
disks []*Disk
}
)
func (s *StoragePlanner) Plan(disks []*Disk) (*StoragePlan, error) {
plan := &StoragePlan{}
/*
Determine the (at max 2) disks we'll use for OS installation.
This is the priority policy we'll use for disk selection :
HDD > SSD > NVMe > SSD with high speed NIC > NVMe with high speed NIC
*/
{
// Sort the disks based on the priority policy.
slices.SortFunc(disks, func(a *Disk, b *Disk) int {
return a.PriorityScores.OSInstallation - b.PriorityScores.OSInstallation
})
// Find disks and allocate storage from them.
var perDiskAllocation uint = 20
targetDisks := []*Disk{}
for _, targetDisk := range targetDisks {
if (len(targetDisks) < 2) && (targetDisk.Unallocated > perDiskAllocation) {
targetDisk.Unallocated -= perDiskAllocation
targetDisks = append(targetDisks, targetDisk)
}
}
if len(targetDisks) != 2 {
return nil, errors.New("couldn't find 2 disks suitable for OS installation")
}
plan.OS = OSStoragePlan{
Size: perDiskAllocation,
DiskWWNs: []string{},
}
for _, targetDisk := range targetDisks {
plan.OS.DiskWWNs = append(plan.OS.DiskWWNs, targetDisk.WWN)
}
}
/*
Determine the (at max 2) disks across which we'll run the ZPool.
This is the priority policy we'll use for disk selection :
NVMe > SSD > HDD > SSD with high speed NIC > NVMe with high speed NIC
*/
{
// Sort the disks based on the priority policy.
slices.SortFunc(disks, func(a *Disk, b *Disk) int {
return a.PriorityScores.ZPoolInstallation - b.PriorityScores.ZPoolInstallation
})
// Find disks and allocate storage from them.
var perDiskAllocation uint = 250
targetDisks := []*Disk{}
for _, targetDisk := range targetDisks {
if (len(targetDisks) < 2) && (targetDisk.Unallocated > perDiskAllocation) {
targetDisk.Unallocated -= perDiskAllocation
targetDisks = append(targetDisks, targetDisk)
}
}
if len(targetDisks) != 2 {
return nil, errors.New("couldn't find 2 disks suitable for running ZPool")
}
plan.ZPool = ZPoolStoragePlan{
Disks: []string{},
}
for _, targetDisk := range targetDisks {
// TODO : Rather use disk ID.
plan.ZPool.Disks = append(plan.ZPool.Disks, targetDisk.Name)
}
}
// All of the remaining storage will get allocated to Rook CEPH.
plan.CEPH = CEPHStoragePlan{
Disks: []struct {
Name string
Partitioned bool
}{},
}
for _, disk := range disks {
// We need atleast 500GB of space.
if disk.Unallocated < 500 {
slog.Warn("Leaving out < 500GB space", slog.String("disk", disk.Name))
continue
}
plan.CEPH.Disks = append(plan.CEPH.Disks, struct {
Name string
Partitioned bool
}{disk.Name, (disk.Unallocated < disk.Size)})
}
return plan, nil
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment