1
1
Fork 1
mirror of https://github.com/oddlama/nix-config.git synced 2025-10-11 07:10:39 +02:00

feat: allow multiple zfs dataset definitions for container

This commit is contained in:
oddlama 2023-12-18 00:32:20 +01:00
parent b4c7fbd0e8
commit 8f28273b65
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
6 changed files with 141 additions and 132 deletions

View file

@ -58,22 +58,26 @@
#};
guests = let
mkGuest = mainModule: {
mkGuest = guestName: {
autostart = true;
zfs = {
enable = true;
zfs."/state" = {
pool = "rpool";
dataset = "local/guests/${guestName}";
};
zfs."/persist" = {
pool = "rpool";
dataset = "safe/guests/${guestName}";
};
modules = [
../../modules
./guests/common.nix
({config, ...}: {node.secretsDir = ./secrets + "/${config.node.guestName}";})
mainModule
./guests/${guestName}.nix
{node.secretsDir = ./secrets/${guestName};}
];
};
mkMicrovm = system: mainModule:
mkGuest mainModule
mkMicrovm = system: guestName:
mkGuest guestName
// {
backend = "microvm";
microvm = {
@ -82,23 +86,26 @@
};
};
mkContainer = mainModule:
mkGuest mainModule
// {
backend = "container";
container.macvlan = "lan";
};
in
lib.mkIf (!minimal) {
adguardhome = mkContainer ./guests/adguardhome.nix;
forgejo = mkContainer ./guests/forgejo.nix;
grafana = mkContainer ./guests/grafana.nix;
influxdb = mkContainer ./guests/influxdb.nix;
kanidm = mkContainer ./guests/kanidm.nix;
loki = mkContainer ./guests/loki.nix;
paperless = mkContainer ./guests/paperless.nix;
vaultwarden = mkContainer ./guests/vaultwarden.nix;
mkContainer = guestName: {
${guestName} =
mkGuest guestName
// {
backend = "container";
container.macvlan = "lan";
};
};
in
lib.mkIf (!minimal) (
{}
// mkContainer "adguardhome"
// mkContainer "forgejo"
// mkContainer "grafana"
// mkContainer "influxdb"
// mkContainer "kanidm"
// mkContainer "loki"
// mkContainer "paperless"
// mkContainer "vaultwarden"
);
#ddclient = defineVm;
#samba+wsdd = defineVm;

View file

@ -26,7 +26,7 @@
datasets =
defaultZfsDatasets
// {
"safe/vms" = unmountable;
"safe/guests" = unmountable;
};
};
};

View file

@ -1,7 +1,6 @@
guestName: guestCfg: {lib, ...}: let
_guestName: guestCfg: {lib, ...}: let
inherit (lib) mkForce;
in {
node.guestName = guestName;
node.name = guestCfg.nodeName;
node.type = guestCfg.backend;

View file

@ -6,25 +6,25 @@ guestName: guestCfg: {
nodes,
pkgs,
...
}: {
}: let
inherit
(lib)
mapAttrs'
flip
nameValuePair
;
in {
autoStart = guestCfg.autostart;
macvlans = ["${guestCfg.container.macvlan}:${guestCfg.networking.mainLinkName}"];
ephemeral = true;
privateNetwork = true;
# We bind-mount stuff from the host into /guest first, and later bind
# mount them into the correct path inside the guest, so we have a
# fileSystems entry that impermanence can depend upon.
bindMounts = {
"/guest/state" = {
hostPath = "/state/guests/${guestName}";
isReadOnly = false;
};
# Mount persistent data from the host
"/guest/persist" = lib.mkIf guestCfg.zfs.enable {
hostPath = guestCfg.zfs.mountpoint;
isReadOnly = false;
};
};
bindMounts = flip mapAttrs' guestCfg.zfs (
_: zfsCfg:
nameValuePair zfsCfg.guestMountpoint {
hostPath = zfsCfg.hostMountpoint;
isReadOnly = false;
}
);
nixosConfiguration = inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
inherit lib nodes inputs minimal;
@ -46,18 +46,17 @@ guestName: guestCfg: {
nixpkgs.config = pkgs.config;
# Bind the /guest/* paths from above so impermancence doesn't complain.
fileSystems."/state" = {
fsType = "none";
neededForBoot = true;
device = "/guest/state";
options = ["bind"];
};
fileSystems."/persist" = lib.mkIf guestCfg.zfs.enable {
fsType = "none";
neededForBoot = true;
device = "/guest/persist";
options = ["bind"];
};
# We bind-mount stuff from the host to itself, which is perfectly defined
# and not recursive. This allows us to have a fileSystems entry for each
# bindMount which other stuff can depend upon (impermanence adds dependencies
# to the state fs).
fileSystems = flip mapAttrs' guestCfg.zfs (_: zfsCfg:
nameValuePair zfsCfg.guestMountpoint {
neededForBoot = true;
fsType = "none";
device = zfsCfg.guestMountpoint;
options = ["bind"];
});
}
(import ./common-guest-config.nix guestName guestCfg)
]

View file

@ -14,8 +14,9 @@
escapeShellArg
makeBinPath
mapAttrsToList
mkMerge
mergeToplevelConfigs
mkEnableOption
flip
mkIf
mkOption
types
@ -26,58 +27,57 @@
# Configuration required on the host for a specific guest
defineGuest = guestName: guestCfg: {
# Add the required datasets to the disko configuration of the machine
disko.devices.zpool = mkIf guestCfg.zfs.enable {
${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset} =
disko.zfs.filesystem guestCfg.zfs.mountpoint;
};
disko.devices.zpool = mkMerge (flip map (attrValues guestCfg.zfs) (zfsCfg: {
${zfsCfg.pool}.datasets.${zfsCfg.dataset} =
disko.zfs.filesystem zfsCfg.hostMountpoint;
}));
# Ensure that the zfs dataset exists before it is mounted.
systemd.services = let
fsMountUnit = "${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.mount";
in
mkIf guestCfg.zfs.enable {
# Ensure that the zfs dataset exists before it is mounted.
"zfs-ensure-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
wantedBy = [fsMountUnit];
before = [fsMountUnit];
after = [
"zfs-import-${utils.escapeSystemdPath guestCfg.zfs.pool}.service"
"zfs-mount.target"
];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = let
poolDataset = "${guestCfg.zfs.pool}/${guestCfg.zfs.dataset}";
diskoDataset = config.disko.devices.zpool.${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset};
in ''
export PATH=${makeBinPath [pkgs.zfs]}":$PATH"
if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
${diskoDataset._create}
fi
'';
};
# Ensure that the zfs dataset has the correct permissions when mounted
"zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
after = [fsMountUnit];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = ''
chmod 700 ${escapeShellArg guestCfg.zfs.mountpoint}
'';
};
"microvm@${guestName}" = mkIf (guestCfg.backend == "microvm") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
};
"container@${guestName}" = mkIf (guestCfg.backend == "container") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
};
systemd.services = mkMerge (flip map (attrValues guestCfg.zfs) (zfsCfg: let
fsMountUnit = "${utils.escapeSystemdPath zfsCfg.hostMountpoint}.mount";
in {
# Ensure that the zfs dataset exists before it is mounted.
"zfs-ensure-${utils.escapeSystemdPath zfsCfg.hostMountpoint}" = {
wantedBy = [fsMountUnit];
before = [fsMountUnit];
after = [
"zfs-import-${utils.escapeSystemdPath zfsCfg.pool}.service"
"zfs-mount.target"
];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = let
poolDataset = "${zfsCfg.pool}/${zfsCfg.dataset}";
diskoDataset = config.disko.devices.zpool.${zfsCfg.pool}.datasets.${zfsCfg.dataset};
in ''
export PATH=${makeBinPath [pkgs.zfs]}":$PATH"
if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
${diskoDataset._create}
fi
'';
};
# Ensure that the zfs dataset has the correct permissions when mounted
"zfs-chown-${utils.escapeSystemdPath zfsCfg.hostMountpoint}" = {
after = [fsMountUnit];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = ''
chmod 700 ${escapeShellArg zfsCfg.hostMountpoint}
'';
};
"microvm@${guestName}" = mkIf (guestCfg.backend == "microvm") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath zfsCfg.hostMountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath zfsCfg.hostMountpoint}.service"];
};
"container@${guestName}" = mkIf (guestCfg.backend == "container") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath zfsCfg.hostMountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath zfsCfg.hostMountpoint}.service"];
};
}));
microvm.vms.${guestName} =
mkIf (guestCfg.backend == "microvm") (import ./microvm.nix guestName guestCfg attrs);
@ -97,17 +97,10 @@ in {
}
];
options.node = {
type = mkOption {
type = types.enum ["host" "microvm" "container"];
description = "The type of this machine.";
default = "host";
};
guestName = mkOption {
type = types.str;
description = "The base name of this machine without the parent's name. Only defined if this is a guest.";
};
options.node.type = mkOption {
type = types.enum ["host" "microvm" "container"];
description = "The type of this machine.";
default = "host";
};
options.containers = mkOption {
@ -126,11 +119,11 @@ in {
options.guests = mkOption {
default = {};
description = "Defines the actual vms and handles the necessary base setup for them.";
type = types.attrsOf (types.submodule ({name, ...}: {
type = types.attrsOf (types.submodule (submod: {
options = {
nodeName = mkOption {
type = types.str;
default = "${nodeName}-${name}";
default = "${nodeName}-${submod.config._module.args.name}";
description = ''
The name of the resulting node. By default this will be a compound name
of the host's name and the vm's name to avoid name clashes. Can be
@ -175,25 +168,37 @@ in {
};
};
zfs = {
enable = mkEnableOption "persistent data on separate zfs dataset";
zfs = mkOption {
description = "zfs datasets to mount into the guest";
default = {};
type = types.attrsOf (types.submodule (zfsSubmod: {
options = {
pool = mkOption {
type = types.str;
description = "The host's zfs pool on which the dataset resides";
};
pool = mkOption {
type = types.str;
description = "The host's zfs pool on which the dataset resides";
};
dataset = mkOption {
type = types.str;
example = "safe/guests/mycontainer";
description = "The host's dataset that should be used for this mountpoint (will automatically be created, including parent datasets)";
};
dataset = mkOption {
type = types.str;
default = "safe/guests/${name}";
description = "The host's dataset that should be used for this vm's state (will automatically be created, parent dataset must exist)";
};
hostMountpoint = mkOption {
type = types.path;
default = "/guests/${submod.config._module.args.name}${zfsSubmod.config._module.args.name}";
example = "/guests/mycontainer/persist";
description = "The host's mountpoint for the guest's dataset";
};
mountpoint = mkOption {
type = types.str;
default = "/guests/${name}";
description = "The host's mountpoint for the vm's dataset (will be shared via virtiofs as /persist in the vm)";
};
guestMountpoint = mkOption {
type = types.path;
default = zfsSubmod.config._module.args.name;
example = "/persist";
description = "The mountpoint inside the guest.";
};
};
}));
};
autostart = mkOption {

View file

@ -11,7 +11,6 @@
concatStringsSep
duplicates
mapAttrsToList
mkIf
mkOption
types
;