1
1
Fork 1
mirror of https://github.com/oddlama/nix-config.git synced 2025-10-10 23:00:39 +02:00

feat: properly ensure vm zfs dataset exists

This commit is contained in:
oddlama 2023-05-21 22:57:40 +02:00
parent a0d22b8be1
commit f3ed1248af
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
9 changed files with 74 additions and 25 deletions

View file

@ -37,7 +37,17 @@
};
in {
test = defineVm 11;
#hi = defineVm 12;
hi = defineVm 12;
};
microvm.vms.hi.config = {
imports = [
../common/core
../../users/root
];
home-manager.users.root.home.minimal = true;
rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBXXjI6uB26xOF0DPy/QyLladoGIKfAtofyqPgIkCH/g";
};
microvm.vms.test.config = {

View file

@ -44,16 +44,14 @@
fileSystems."/persist".neededForBoot = true;
# After importing the rpool, rollback the root system to be empty.
boot.initrd.systemd.services = {
impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
boot.initrd.systemd.services.impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
};
}

View file

@ -7,6 +7,7 @@
nodeName,
nodePath,
pkgs,
utils,
...
}: let
inherit
@ -16,6 +17,7 @@
escapeShellArg
filterAttrs
foldl'
makeBinPath
mapAttrsToList
mdDoc
mkDefault
@ -42,21 +44,49 @@
extraLib.disko.zfs.filesystem vmCfg.zfs.mountpoint;
};
# TODO not cool, this might change or require more creation options.
# TODO better to only add disko and a mount point requirement.
# TODO the user can do the rest if required.
# TODO needed for boot false
# When installing a microvm, make sure that its persitent zfs dataset exists
# TODO make this an activation function before mounting stuff.
systemd.services."install-microvm-${vmName}".preStart = let
# Ensure that the zfs dataset exists before it is mounted.
systemd.services = let
fsMountUnit = "${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.mount";
poolDataset = "${vmCfg.zfs.pool}/${vmCfg.zfs.dataset}";
in
mkIf vmCfg.zfs.enable ''
diskoDataset = config.disko.devices.zpool.${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset};
createDatasetScript = pkgs.writeShellScript "create-microvm-${vmName}-zfs-dataset" ''
export PATH=${makeBinPath (diskoDataset._pkgs pkgs)}":$PATH"
if ! ${pkgs.zfs}/bin/zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
${config.disko.devices.zpool.${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset}._create {zpool = vmCfg.zfs.pool;}}
${diskoDataset._create {zpool = vmCfg.zfs.pool;}}
fi
chmod 700 ${escapeShellArg vmCfg.zfs.mountpoint}
'';
in
mkIf vmCfg.zfs.enable {
# Ensure that the zfs dataset exists before it is mounted.
"zfs-ensure-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}" = let
fsMountUnit = "${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.mount";
poolDataset = "${vmCfg.zfs.pool}/${vmCfg.zfs.dataset}";
diskoDataset = config.disko.devices.zpool.${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset};
createDatasetScript = pkgs.writeShellScript "create-microvm-${vmName}-zfs-dataset" ''
export PATH=${makeBinPath [pkgs.zfs]}":$PATH"
if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
${diskoDataset._create {zpool = vmCfg.zfs.pool;}}
fi
chmod 700 ${escapeShellArg vmCfg.zfs.mountpoint}
'';
in
mkIf vmCfg.zfs.enable {
wantedBy = [fsMountUnit];
before = [fsMountUnit];
after = ["zfs-import-${utils.escapeSystemdPath vmCfg.zfs.pool}.service"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${createDatasetScript}";
};
};
"microvm@${vmName}" = {
requires = [fsMountUnit];
after = [fsMountUnit];
};
};
microvm.vms.${vmName} = let
# Loads configuration from a subfolder of this nodes configuration, if it exists.

View file

@ -12,7 +12,7 @@ in
[[ -d .git ]] && [[ -f flake.nix ]] || { echo "error: Please execute this from the project's root folder (the folder with flake.nix)" >&2; exit 1; }
for f in $(find . -type f -name '*.nix.age'); do
echo "Formatting $f ..."
decrypted=$(${../rage-decrypt-and-cache.sh} --print-out-path "$f" ${concatStringsSep " " self.secrets.masterIdentities}) \
decrypted=$(${../rage-decrypt-and-cache.sh} --print-out-path "$f" ${concatStringsSep " " self.secretsConfig.masterIdentities}) \
|| { echo "error: Failed to decrypt!" >&2; exit 1; }
formatted=$(${pkgs.alejandra}/bin/alejandra --quiet < "$decrypted") \
|| { echo "error: Failed to format $decrypted!" >&2; exit 1; }

View file

@ -119,7 +119,7 @@ in rec {
};
};
rageMasterIdentityArgs = concatMapStrings (x: ''-i ${escapeShellArg x} '') self.secrets.masterIdentities;
rageMasterIdentityArgs = concatMapStrings (x: ''-i ${escapeShellArg x} '') self.secretsConfig.masterIdentities;
rageExtraEncryptionPubkeys =
concatMapStrings (
x:
@ -127,7 +127,7 @@ in rec {
then ''-R ${escapeShellArg x} ''
else ''-r ${escapeShellArg x} ''
)
self.secrets.extraEncryptionPubkeys;
self.secretsConfig.extraEncryptionPubkeys;
# The arguments required to de-/encrypt a secret in this repository
rageDecryptArgs = "${rageMasterIdentityArgs}";
rageEncryptArgs = "${rageMasterIdentityArgs} ${rageExtraEncryptionPubkeys}";

Binary file not shown.

View file

@ -0,0 +1 @@
vTtaQGwBCg3t7JVaKg8U1k1Lv41XMdDhiTc4K7mi9Ss=

View file

@ -0,0 +1,10 @@
age-encryption.org/v1
-> X25519 +rh+OOkCRYCr2yQyj3XaxJZiZeoeyyPDHXUiQ3SMqAQ
rs6MQlD8/ccPU/HtdWuOIeX1RWsihBlxZ0MuustxxsQ
-> piv-p256 xqSe8Q AwxXPO3A1XMHGKE8HMtwnXJ8pgyjp2uS/q/GKmCkf+BR
/54hKpxBptCRfFUt5OdhTyjInf3556nC5vBy43uSgNU
-> I-grease "w0 ./zzhbg ,4iOy/r3
3ojmDBEzftsdy7oMF8zYU/7Yc92xQku7QIJkXDtO7LgGZGjsng0B+ZiwbRJGxWiL
AZioiI0KllFnam8rMtHk9w
--- VFUOXs7a5xhlh0wlOVe04hgpB/FCSPhAblqmeuLftac
ŽxòÁ˜;/�‘óYºµâ°¿ñåóê®îO¬°’º)6ìîüK!Ädžw@þÆÛèûªaëÄLt`§ãrÏÑ$ô*o÷e–{Žª½Ð